repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
EdLogan18/logan-repository | plugin.video.playlistLoader/resources/lib/chardet/gb2312freq.py | 3132 | 36011 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# GB2312 most frequently used character table
#
# Char to FreqOrder table , from hz6763
# 512 --> 0.79 -- 0.79
# 1024 --> 0.92 -- 0.13
# 2048 --> 0.98 -- 0.06
# 6768 --> 1.00 -- 0.02
#
# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
# Random Distribution Ration = 512 / (3755 - 512) = 0.157
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760
GB2312CharToFreqOrder = (
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, # last 512
#Everything below is of no interest for detection purpose
5508,6484,3900,3414,3974,4441,4024,3537,4037,5628,5099,3633,6485,3148,6486,3636,
5509,3257,5510,5973,5445,5872,4941,4403,3174,4627,5873,6276,2286,4230,5446,5874,
5122,6102,6103,4162,5447,5123,5323,4849,6277,3980,3851,5066,4246,5774,5067,6278,
3001,2807,5695,3346,5775,5974,5158,5448,6487,5975,5976,5776,3598,6279,5696,4806,
4211,4154,6280,6488,6489,6490,6281,4212,5037,3374,4171,6491,4562,4807,4722,4827,
5977,6104,4532,4079,5159,5324,5160,4404,3858,5359,5875,3975,4288,4610,3486,4512,
5325,3893,5360,6282,6283,5560,2522,4231,5978,5186,5449,2569,3878,6284,5401,3578,
4415,6285,4656,5124,5979,2506,4247,4449,3219,3417,4334,4969,4329,6492,4576,4828,
4172,4416,4829,5402,6286,3927,3852,5361,4369,4830,4477,4867,5876,4173,6493,6105,
4657,6287,6106,5877,5450,6494,4155,4868,5451,3700,5629,4384,6288,6289,5878,3189,
4881,6107,6290,6495,4513,6496,4692,4515,4723,5100,3356,6497,6291,3810,4080,5561,
3570,4430,5980,6498,4355,5697,6499,4724,6108,6109,3764,4050,5038,5879,4093,3226,
6292,5068,5217,4693,3342,5630,3504,4831,4377,4466,4309,5698,4431,5777,6293,5778,
4272,3706,6110,5326,3752,4676,5327,4273,5403,4767,5631,6500,5699,5880,3475,5039,
6294,5562,5125,4348,4301,4482,4068,5126,4593,5700,3380,3462,5981,5563,3824,5404,
4970,5511,3825,4738,6295,6501,5452,4516,6111,5881,5564,6502,6296,5982,6503,4213,
4163,3454,6504,6112,4009,4450,6113,4658,6297,6114,3035,6505,6115,3995,4904,4739,
4563,4942,4110,5040,3661,3928,5362,3674,6506,5292,3612,4791,5565,4149,5983,5328,
5259,5021,4725,4577,4564,4517,4364,6298,5405,4578,5260,4594,4156,4157,5453,3592,
3491,6507,5127,5512,4709,4922,5984,5701,4726,4289,6508,4015,6116,5128,4628,3424,
4241,5779,6299,4905,6509,6510,5454,5702,5780,6300,4365,4923,3971,6511,5161,3270,
3158,5985,4100, 867,5129,5703,6117,5363,3695,3301,5513,4467,6118,6512,5455,4232,
4242,4629,6513,3959,4478,6514,5514,5329,5986,4850,5162,5566,3846,4694,6119,5456,
4869,5781,3779,6301,5704,5987,5515,4710,6302,5882,6120,4392,5364,5705,6515,6121,
6516,6517,3736,5988,5457,5989,4695,2457,5883,4551,5782,6303,6304,6305,5130,4971,
6122,5163,6123,4870,3263,5365,3150,4871,6518,6306,5783,5069,5706,3513,3498,4409,
5330,5632,5366,5458,5459,3991,5990,4502,3324,5991,5784,3696,4518,5633,4119,6519,
4630,5634,4417,5707,4832,5992,3418,6124,5993,5567,4768,5218,6520,4595,3458,5367,
6125,5635,6126,4202,6521,4740,4924,6307,3981,4069,4385,6308,3883,2675,4051,3834,
4302,4483,5568,5994,4972,4101,5368,6309,5164,5884,3922,6127,6522,6523,5261,5460,
5187,4164,5219,3538,5516,4111,3524,5995,6310,6311,5369,3181,3386,2484,5188,3464,
5569,3627,5708,6524,5406,5165,4677,4492,6312,4872,4851,5885,4468,5996,6313,5709,
5710,6128,2470,5886,6314,5293,4882,5785,3325,5461,5101,6129,5711,5786,6525,4906,
6526,6527,4418,5887,5712,4808,2907,3701,5713,5888,6528,3765,5636,5331,6529,6530,
3593,5889,3637,4943,3692,5714,5787,4925,6315,6130,5462,4405,6131,6132,6316,5262,
6531,6532,5715,3859,5716,5070,4696,5102,3929,5788,3987,4792,5997,6533,6534,3920,
4809,5000,5998,6535,2974,5370,6317,5189,5263,5717,3826,6536,3953,5001,4883,3190,
5463,5890,4973,5999,4741,6133,6134,3607,5570,6000,4711,3362,3630,4552,5041,6318,
6001,2950,2953,5637,4646,5371,4944,6002,2044,4120,3429,6319,6537,5103,4833,6538,
6539,4884,4647,3884,6003,6004,4758,3835,5220,5789,4565,5407,6540,6135,5294,4697,
4852,6320,6321,3206,4907,6541,6322,4945,6542,6136,6543,6323,6005,4631,3519,6544,
5891,6545,5464,3784,5221,6546,5571,4659,6547,6324,6137,5190,6548,3853,6549,4016,
4834,3954,6138,5332,3827,4017,3210,3546,4469,5408,5718,3505,4648,5790,5131,5638,
5791,5465,4727,4318,6325,6326,5792,4553,4010,4698,3439,4974,3638,4335,3085,6006,
5104,5042,5166,5892,5572,6327,4356,4519,5222,5573,5333,5793,5043,6550,5639,5071,
4503,6328,6139,6551,6140,3914,3901,5372,6007,5640,4728,4793,3976,3836,4885,6552,
4127,6553,4451,4102,5002,6554,3686,5105,6555,5191,5072,5295,4611,5794,5296,6556,
5893,5264,5894,4975,5466,5265,4699,4976,4370,4056,3492,5044,4886,6557,5795,4432,
4769,4357,5467,3940,4660,4290,6141,4484,4770,4661,3992,6329,4025,4662,5022,4632,
4835,4070,5297,4663,4596,5574,5132,5409,5895,6142,4504,5192,4664,5796,5896,3885,
5575,5797,5023,4810,5798,3732,5223,4712,5298,4084,5334,5468,6143,4052,4053,4336,
4977,4794,6558,5335,4908,5576,5224,4233,5024,4128,5469,5225,4873,6008,5045,4729,
4742,4633,3675,4597,6559,5897,5133,5577,5003,5641,5719,6330,6560,3017,2382,3854,
4406,4811,6331,4393,3964,4946,6561,2420,3722,6562,4926,4378,3247,1736,4442,6332,
5134,6333,5226,3996,2918,5470,4319,4003,4598,4743,4744,4485,3785,3902,5167,5004,
5373,4394,5898,6144,4874,1793,3997,6334,4085,4214,5106,5642,4909,5799,6009,4419,
4189,3330,5899,4165,4420,5299,5720,5227,3347,6145,4081,6335,2876,3930,6146,3293,
3786,3910,3998,5900,5300,5578,2840,6563,5901,5579,6147,3531,5374,6564,6565,5580,
4759,5375,6566,6148,3559,5643,6336,6010,5517,6337,6338,5721,5902,3873,6011,6339,
6567,5518,3868,3649,5722,6568,4771,4947,6569,6149,4812,6570,2853,5471,6340,6341,
5644,4795,6342,6012,5723,6343,5724,6013,4349,6344,3160,6150,5193,4599,4514,4493,
5168,4320,6345,4927,3666,4745,5169,5903,5005,4928,6346,5725,6014,4730,4203,5046,
4948,3395,5170,6015,4150,6016,5726,5519,6347,5047,3550,6151,6348,4197,4310,5904,
6571,5581,2965,6152,4978,3960,4291,5135,6572,5301,5727,4129,4026,5905,4853,5728,
5472,6153,6349,4533,2700,4505,5336,4678,3583,5073,2994,4486,3043,4554,5520,6350,
6017,5800,4487,6351,3931,4103,5376,6352,4011,4321,4311,4190,5136,6018,3988,3233,
4350,5906,5645,4198,6573,5107,3432,4191,3435,5582,6574,4139,5410,6353,5411,3944,
5583,5074,3198,6575,6354,4358,6576,5302,4600,5584,5194,5412,6577,6578,5585,5413,
5303,4248,5414,3879,4433,6579,4479,5025,4854,5415,6355,4760,4772,3683,2978,4700,
3797,4452,3965,3932,3721,4910,5801,6580,5195,3551,5907,3221,3471,3029,6019,3999,
5908,5909,5266,5267,3444,3023,3828,3170,4796,5646,4979,4259,6356,5647,5337,3694,
6357,5648,5338,4520,4322,5802,3031,3759,4071,6020,5586,4836,4386,5048,6581,3571,
4679,4174,4949,6154,4813,3787,3402,3822,3958,3215,3552,5268,4387,3933,4950,4359,
6021,5910,5075,3579,6358,4234,4566,5521,6359,3613,5049,6022,5911,3375,3702,3178,
4911,5339,4521,6582,6583,4395,3087,3811,5377,6023,6360,6155,4027,5171,5649,4421,
4249,2804,6584,2270,6585,4000,4235,3045,6156,5137,5729,4140,4312,3886,6361,4330,
6157,4215,6158,3500,3676,4929,4331,3713,4930,5912,4265,3776,3368,5587,4470,4855,
3038,4980,3631,6159,6160,4132,4680,6161,6362,3923,4379,5588,4255,6586,4121,6587,
6363,4649,6364,3288,4773,4774,6162,6024,6365,3543,6588,4274,3107,3737,5050,5803,
4797,4522,5589,5051,5730,3714,4887,5378,4001,4523,6163,5026,5522,4701,4175,2791,
3760,6589,5473,4224,4133,3847,4814,4815,4775,3259,5416,6590,2738,6164,6025,5304,
3733,5076,5650,4816,5590,6591,6165,6592,3934,5269,6593,3396,5340,6594,5804,3445,
3602,4042,4488,5731,5732,3525,5591,4601,5196,6166,6026,5172,3642,4612,3202,4506,
4798,6366,3818,5108,4303,5138,5139,4776,3332,4304,2915,3415,4434,5077,5109,4856,
2879,5305,4817,6595,5913,3104,3144,3903,4634,5341,3133,5110,5651,5805,6167,4057,
5592,2945,4371,5593,6596,3474,4182,6367,6597,6168,4507,4279,6598,2822,6599,4777,
4713,5594,3829,6169,3887,5417,6170,3653,5474,6368,4216,2971,5228,3790,4579,6369,
5733,6600,6601,4951,4746,4555,6602,5418,5475,6027,3400,4665,5806,6171,4799,6028,
5052,6172,3343,4800,4747,5006,6370,4556,4217,5476,4396,5229,5379,5477,3839,5914,
5652,5807,4714,3068,4635,5808,6173,5342,4192,5078,5419,5523,5734,6174,4557,6175,
4602,6371,6176,6603,5809,6372,5735,4260,3869,5111,5230,6029,5112,6177,3126,4681,
5524,5915,2706,3563,4748,3130,6178,4018,5525,6604,6605,5478,4012,4837,6606,4534,
4193,5810,4857,3615,5479,6030,4082,3697,3539,4086,5270,3662,4508,4931,5916,4912,
5811,5027,3888,6607,4397,3527,3302,3798,2775,2921,2637,3966,4122,4388,4028,4054,
1633,4858,5079,3024,5007,3982,3412,5736,6608,3426,3236,5595,3030,6179,3427,3336,
3279,3110,6373,3874,3039,5080,5917,5140,4489,3119,6374,5812,3405,4494,6031,4666,
4141,6180,4166,6032,5813,4981,6609,5081,4422,4982,4112,3915,5653,3296,3983,6375,
4266,4410,5654,6610,6181,3436,5082,6611,5380,6033,3819,5596,4535,5231,5306,5113,
6612,4952,5918,4275,3113,6613,6376,6182,6183,5814,3073,4731,4838,5008,3831,6614,
4888,3090,3848,4280,5526,5232,3014,5655,5009,5737,5420,5527,6615,5815,5343,5173,
5381,4818,6616,3151,4953,6617,5738,2796,3204,4360,2989,4281,5739,5174,5421,5197,
3132,5141,3849,5142,5528,5083,3799,3904,4839,5480,2880,4495,3448,6377,6184,5271,
5919,3771,3193,6034,6035,5920,5010,6036,5597,6037,6378,6038,3106,5422,6618,5423,
5424,4142,6619,4889,5084,4890,4313,5740,6620,3437,5175,5307,5816,4199,5198,5529,
5817,5199,5656,4913,5028,5344,3850,6185,2955,5272,5011,5818,4567,4580,5029,5921,
3616,5233,6621,6622,6186,4176,6039,6379,6380,3352,5200,5273,2908,5598,5234,3837,
5308,6623,6624,5819,4496,4323,5309,5201,6625,6626,4983,3194,3838,4167,5530,5922,
5274,6381,6382,3860,3861,5599,3333,4292,4509,6383,3553,5481,5820,5531,4778,6187,
3955,3956,4324,4389,4218,3945,4325,3397,2681,5923,4779,5085,4019,5482,4891,5382,
5383,6040,4682,3425,5275,4094,6627,5310,3015,5483,5657,4398,5924,3168,4819,6628,
5925,6629,5532,4932,4613,6041,6630,4636,6384,4780,4204,5658,4423,5821,3989,4683,
5822,6385,4954,6631,5345,6188,5425,5012,5384,3894,6386,4490,4104,6632,5741,5053,
6633,5823,5926,5659,5660,5927,6634,5235,5742,5824,4840,4933,4820,6387,4859,5928,
4955,6388,4143,3584,5825,5346,5013,6635,5661,6389,5014,5484,5743,4337,5176,5662,
6390,2836,6391,3268,6392,6636,6042,5236,6637,4158,6638,5744,5663,4471,5347,3663,
4123,5143,4293,3895,6639,6640,5311,5929,5826,3800,6189,6393,6190,5664,5348,3554,
3594,4749,4603,6641,5385,4801,6043,5827,4183,6642,5312,5426,4761,6394,5665,6191,
4715,2669,6643,6644,5533,3185,5427,5086,5930,5931,5386,6192,6044,6645,4781,4013,
5745,4282,4435,5534,4390,4267,6045,5746,4984,6046,2743,6193,3501,4087,5485,5932,
5428,4184,4095,5747,4061,5054,3058,3862,5933,5600,6646,5144,3618,6395,3131,5055,
5313,6396,4650,4956,3855,6194,3896,5202,4985,4029,4225,6195,6647,5828,5486,5829,
3589,3002,6648,6397,4782,5276,6649,6196,6650,4105,3803,4043,5237,5830,6398,4096,
3643,6399,3528,6651,4453,3315,4637,6652,3984,6197,5535,3182,3339,6653,3096,2660,
6400,6654,3449,5934,4250,4236,6047,6401,5831,6655,5487,3753,4062,5832,6198,6199,
6656,3766,6657,3403,4667,6048,6658,4338,2897,5833,3880,2797,3780,4326,6659,5748,
5015,6660,5387,4351,5601,4411,6661,3654,4424,5935,4339,4072,5277,4568,5536,6402,
6662,5238,6663,5349,5203,6200,5204,6201,5145,4536,5016,5056,4762,5834,4399,4957,
6202,6403,5666,5749,6664,4340,6665,5936,5177,5667,6666,6667,3459,4668,6404,6668,
6669,4543,6203,6670,4276,6405,4480,5537,6671,4614,5205,5668,6672,3348,2193,4763,
6406,6204,5937,5602,4177,5669,3419,6673,4020,6205,4443,4569,5388,3715,3639,6407,
6049,4058,6206,6674,5938,4544,6050,4185,4294,4841,4651,4615,5488,6207,6408,6051,
5178,3241,3509,5835,6208,4958,5836,4341,5489,5278,6209,2823,5538,5350,5206,5429,
6675,4638,4875,4073,3516,4684,4914,4860,5939,5603,5389,6052,5057,3237,5490,3791,
6676,6409,6677,4821,4915,4106,5351,5058,4243,5539,4244,5604,4842,4916,5239,3028,
3716,5837,5114,5605,5390,5940,5430,6210,4332,6678,5540,4732,3667,3840,6053,4305,
3408,5670,5541,6410,2744,5240,5750,6679,3234,5606,6680,5607,5671,3608,4283,4159,
4400,5352,4783,6681,6411,6682,4491,4802,6211,6412,5941,6413,6414,5542,5751,6683,
4669,3734,5942,6684,6415,5943,5059,3328,4670,4144,4268,6685,6686,6687,6688,4372,
3603,6689,5944,5491,4373,3440,6416,5543,4784,4822,5608,3792,4616,5838,5672,3514,
5391,6417,4892,6690,4639,6691,6054,5673,5839,6055,6692,6056,5392,6212,4038,5544,
5674,4497,6057,6693,5840,4284,5675,4021,4545,5609,6418,4454,6419,6213,4113,4472,
5314,3738,5087,5279,4074,5610,4959,4063,3179,4750,6058,6420,6214,3476,4498,4716,
5431,4960,4685,6215,5241,6694,6421,6216,6695,5841,5945,6422,3748,5946,5179,3905,
5752,5545,5947,4374,6217,4455,6423,4412,6218,4803,5353,6696,3832,5280,6219,4327,
4702,6220,6221,6059,4652,5432,6424,3749,4751,6425,5753,4986,5393,4917,5948,5030,
5754,4861,4733,6426,4703,6697,6222,4671,5949,4546,4961,5180,6223,5031,3316,5281,
6698,4862,4295,4934,5207,3644,6427,5842,5950,6428,6429,4570,5843,5282,6430,6224,
5088,3239,6060,6699,5844,5755,6061,6431,2701,5546,6432,5115,5676,4039,3993,3327,
4752,4425,5315,6433,3941,6434,5677,4617,4604,3074,4581,6225,5433,6435,6226,6062,
4823,5756,5116,6227,3717,5678,4717,5845,6436,5679,5846,6063,5847,6064,3977,3354,
6437,3863,5117,6228,5547,5394,4499,4524,6229,4605,6230,4306,4500,6700,5951,6065,
3693,5952,5089,4366,4918,6701,6231,5548,6232,6702,6438,4704,5434,6703,6704,5953,
4168,6705,5680,3420,6706,5242,4407,6066,3812,5757,5090,5954,4672,4525,3481,5681,
4618,5395,5354,5316,5955,6439,4962,6707,4526,6440,3465,4673,6067,6441,5682,6708,
5435,5492,5758,5683,4619,4571,4674,4804,4893,4686,5493,4753,6233,6068,4269,6442,
6234,5032,4705,5146,5243,5208,5848,6235,6443,4963,5033,4640,4226,6236,5849,3387,
6444,6445,4436,4437,5850,4843,5494,4785,4894,6709,4361,6710,5091,5956,3331,6237,
4987,5549,6069,6711,4342,3517,4473,5317,6070,6712,6071,4706,6446,5017,5355,6713,
6714,4988,5436,6447,4734,5759,6715,4735,4547,4456,4754,6448,5851,6449,6450,3547,
5852,5318,6451,6452,5092,4205,6716,6238,4620,4219,5611,6239,6072,4481,5760,5957,
5958,4059,6240,6453,4227,4537,6241,5761,4030,4186,5244,5209,3761,4457,4876,3337,
5495,5181,6242,5959,5319,5612,5684,5853,3493,5854,6073,4169,5613,5147,4895,6074,
5210,6717,5182,6718,3830,6243,2798,3841,6075,6244,5855,5614,3604,4606,5496,5685,
5118,5356,6719,6454,5960,5357,5961,6720,4145,3935,4621,5119,5962,4261,6721,6455,
4786,5963,4375,4582,6245,6246,6247,6076,5437,4877,5856,3376,4380,6248,4160,6722,
5148,6456,5211,6457,6723,4718,6458,6724,6249,5358,4044,3297,6459,6250,5857,5615,
5497,5245,6460,5498,6725,6251,6252,5550,3793,5499,2959,5396,6461,6462,4572,5093,
5500,5964,3806,4146,6463,4426,5762,5858,6077,6253,4755,3967,4220,5965,6254,4989,
5501,6464,4352,6726,6078,4764,2290,5246,3906,5438,5283,3767,4964,2861,5763,5094,
6255,6256,4622,5616,5859,5860,4707,6727,4285,4708,4824,5617,6257,5551,4787,5212,
4965,4935,4687,6465,6728,6466,5686,6079,3494,4413,2995,5247,5966,5618,6729,5967,
5764,5765,5687,5502,6730,6731,6080,5397,6467,4990,6258,6732,4538,5060,5619,6733,
4719,5688,5439,5018,5149,5284,5503,6734,6081,4607,6259,5120,3645,5861,4583,6260,
4584,4675,5620,4098,5440,6261,4863,2379,3306,4585,5552,5689,4586,5285,6735,4864,
6736,5286,6082,6737,4623,3010,4788,4381,4558,5621,4587,4896,3698,3161,5248,4353,
4045,6262,3754,5183,4588,6738,6263,6739,6740,5622,3936,6741,6468,6742,6264,5095,
6469,4991,5968,6743,4992,6744,6083,4897,6745,4256,5766,4307,3108,3968,4444,5287,
3889,4343,6084,4510,6085,4559,6086,4898,5969,6746,5623,5061,4919,5249,5250,5504,
5441,6265,5320,4878,3242,5862,5251,3428,6087,6747,4237,5624,5442,6266,5553,4539,
6748,2585,3533,5398,4262,6088,5150,4736,4438,6089,6267,5505,4966,6749,6268,6750,
6269,5288,5554,3650,6090,6091,4624,6092,5690,6751,5863,4270,5691,4277,5555,5864,
6752,5692,4720,4865,6470,5151,4688,4825,6753,3094,6754,6471,3235,4653,6755,5213,
5399,6756,3201,4589,5865,4967,6472,5866,6473,5019,3016,6757,5321,4756,3957,4573,
6093,4993,5767,4721,6474,6758,5625,6759,4458,6475,6270,6760,5556,4994,5214,5252,
6271,3875,5768,6094,5034,5506,4376,5769,6761,2120,6476,5253,5770,6762,5771,5970,
3990,5971,5557,5558,5772,6477,6095,2787,4641,5972,5121,6096,6097,6272,6763,3703,
5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978,
4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767)
# flake8: noqa
| gpl-2.0 |
dset0x/invenio | invenio/modules/deposit/testsuite/test_deposit_models.py | 2 | 2455 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the Deposit models."""
from flask_registry import RegistryError
from invenio.testsuite import InvenioTestCase, make_test_suite, run_test_suite
class DepositionTest(InvenioTestCase):
"""Test."""
def setUp(self):
"""Test."""
from invenio.modules.deposit.models import DepositionType
from invenio.modules.deposit.registry import deposit_types, \
deposit_default_type
# Unregister any default types
try:
deposit_default_type.unregister()
except RegistryError:
pass
# Create some test types.
class DefaultType(DepositionType):
pass
class AnotherType(DepositionType):
pass
# Register types
self.DefaultType = DefaultType
self.AnotherType = AnotherType
deposit_types.register(DefaultType)
deposit_types.register(AnotherType)
deposit_default_type.register(DefaultType)
def test_create(self):
"""Test."""
from invenio.ext.login.legacy_user import UserInfo
from invenio.modules.deposit.models import Deposition
user = UserInfo(uid=1)
d = Deposition.create(user)
assert d.type == self.DefaultType
assert Deposition.get(d.id).type == self.DefaultType
d2 = Deposition.create(user, type=self.AnotherType)
assert d2.type == self.AnotherType
assert Deposition.get(d2.id).type == self.AnotherType
# remove the records
Deposition.delete(d)
Deposition.delete(d2)
TEST_SUITE = make_test_suite(DepositionTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
apark263/tensorflow | tensorflow/python/autograph/operators/py_builtins.py | 2 | 7074 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators corresponding to Python builtin functions.
List of built-in functions: https://docs.python.org/3/library/functions.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.autograph.utils import py_func
from tensorflow.python.autograph.utils import tensors
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
UNSPECIFIED = object()
def overload_of(f):
if f in SUPPORTED_BUILTINS:
return BUILTIN_FUINCTIONS_MAP[f.__name__]
return f
def abs_(x):
if tensor_util.is_tensor(x):
return _tf_abs(x)
return _py_abs(x)
def _tf_abs(x):
return math_ops.abs(x)
def _py_abs(x):
return abs(x)
def float_(x=0):
if tensor_util.is_tensor(x):
return _tf_float(x)
return _py_float(x)
def _tf_float(x):
# TODO(mdan): We shouldn't assume float32.
if x.dtype == dtypes.string:
return gen_parsing_ops.string_to_number(x, out_type=dtypes.float32)
return math_ops.cast(x, dtype=dtypes.float32)
def _py_float(x):
return float(x)
def int_(x=0, base=UNSPECIFIED):
if tensor_util.is_tensor(x):
return _tf_int(x, base)
return _py_int(x, base)
def _tf_int(x, base):
if base not in (10, UNSPECIFIED):
raise NotImplementedError('base {} not supported for int'.format(base))
# TODO(mdan): We shouldn't assume int32.
if x.dtype == dtypes.string:
return gen_parsing_ops.string_to_number(x, out_type=dtypes.int32)
return math_ops.cast(x, dtype=dtypes.int32)
def _py_int(x, base):
if base is UNSPECIFIED:
return int(x)
return int(x, base)
def len_(s):
if tensors.is_tensor_array(s):
return _tf_tensor_array_len(s)
elif tensors.is_tensor_list(s):
return _tf_tensor_list_len(s)
elif tensor_util.is_tensor(s):
return _tf_tensor_len(s)
return _py_len(s)
def _tf_tensor_array_len(s):
return s.size()
def _tf_tensor_list_len(s):
return list_ops.tensor_list_length(s)
def _tf_tensor_len(s):
"""Overload of len_ for Tensor arguments."""
# Statically shaped tensors: length is known ahead of time.
if s.shape.ndims and s.shape.dims[0].value is not None:
return s.shape.dims[0].value
# Static shape of unknown dimensions: use dynamic shape but statically
# chech that it's a scalar.
shape = array_ops.shape(s)
assert shape.shape, 'shape tensor of zero size? {}'.format(shape)
if shape.shape[0] == 0:
raise ValueError(
'len requires a non-scalar tensor, got one of shape {}'.format(shape))
if shape.shape.dims[0].value is not None:
return array_ops.shape(s)[0]
# Fully dynamic shape: use ops.
rank = array_ops.rank(s)
def raise_zero_rank_error():
msg = gen_string_ops.string_join(
['len requires non-zero rank, got ',
gen_string_ops.as_string(rank)])
with ops.control_dependencies([control_flow_ops.Assert(False, [msg])]):
return constant_op.constant(0, dtype=dtypes.int32)
return control_flow_ops.cond(rank > 0, lambda: array_ops.shape(s)[0],
raise_zero_rank_error)
def _py_len(s):
return len(s)
def print_(*objects, **kwargs):
# Note: Python 2.6 doesn't support explicit keywords after starargs.
unknown_kwargs = tuple(
set(kwargs.keys()) - set(('sep', 'end', 'file', 'flush')))
if unknown_kwargs:
raise ValueError('invalid keyword arguments: {}'.format(unknown_kwargs))
# TODO(mdan): use logging_ops.Print when py_func is not supported.
return _tf_py_func_print(objects, kwargs)
def _tf_py_func_print(objects, kwargs):
"""Overload of print_ as a py_func implementation."""
override_kwargs = {k: v for k, v in kwargs.items() if v is not UNSPECIFIED}
if 'flush' not in override_kwargs:
# Defaulting to flushing the console in graph mode, which helps reduce
# garbled output in IPython.
override_kwargs['flush'] = True
def print_wrapper(*vals):
vals = tuple(v.numpy() if tensor_util.is_tensor(v) else v for v in vals)
if six.PY3:
# TensorFlow doesn't seem to generate Unicode when passing strings to
# py_func. This causes the print to add a "b'" wrapper to the output,
# which is probably never what you want.
vals = tuple(
v.decode('utf-8') if isinstance(v, bytes) else v for v in vals)
six.print_(*vals, **override_kwargs)
return py_func.wrap_py_func(
print_wrapper, None, objects, use_dummy_return=True)
def range_(start_or_stop, stop=UNSPECIFIED, step=UNSPECIFIED):
if any(tensor_util.is_tensor(s) for s in (start_or_stop, stop, step)):
return _tf_range(start_or_stop, stop, step)
return _py_range(start_or_stop, stop, step)
def _tf_range(start_or_stop, stop, step):
"""Overload of range_ that generates a TF range tensor."""
# Note: for static inputs (e.g. constants), tf.range errors out at graph
# construction time, instead of returning an empty tensor. Preventing the
# graph construction error aligns the semantics with Python.
# TODO(mdan): We should optimize this when a full tensor is not required.
if step is not UNSPECIFIED:
# TODO(mdan): Add argument coercion similar to other cases.
return math_ops.range(start_or_stop, stop, step)
if stop is not UNSPECIFIED:
stop = math_ops.maximum(start_or_stop, stop)
return math_ops.range(start_or_stop, stop)
start_or_stop = math_ops.maximum(start_or_stop, 0)
return math_ops.range(start_or_stop)
def _py_range(start_or_stop, stop, step):
if step is not UNSPECIFIED:
return range(start_or_stop, stop, step)
if stop is not UNSPECIFIED:
return range(start_or_stop, stop)
return range(start_or_stop)
SUPPORTED_BUILTINS = (abs, float, int, len, print, range)
if six.PY2:
SUPPORTED_BUILTINS += (xrange,)
BUILTIN_FUINCTIONS_MAP = {
'abs': abs_,
'float': float_,
'int': int_,
'len': len_,
'print': print_,
'range': range_,
# TODO(mdan): This might make more sense as tf.data.range.
'xrange': range_,
}
| apache-2.0 |
leedm777/ansible-modules-core | cloud/openstack/keystone_user.py | 14 | 13229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Based on Jimmy Tang's implementation
DOCUMENTATION = '''
---
module: keystone_user
version_added: "1.2"
short_description: Manage OpenStack Identity (keystone) users, tenants and roles
description:
- Manage users,tenants, roles from OpenStack.
options:
login_user:
description:
- login username to authenticate to keystone
required: false
default: admin
login_password:
description:
- Password of login user
required: false
default: 'yes'
login_tenant_name:
description:
- The tenant login_user belongs to
required: false
default: None
version_added: "1.3"
token:
description:
- The token to be uses in case the password is not specified
required: false
default: None
endpoint:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
user:
description:
- The name of the user that has to added/removed from OpenStack
required: false
default: None
password:
description:
- The password to be assigned to the user
required: false
default: None
tenant:
description:
- The tenant name that has be added/removed
required: false
default: None
tenant_description:
description:
- A description for the tenant
required: false
default: None
email:
description:
- An email address for the user
required: false
default: None
role:
description:
- The name of the role to be assigned or created
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
requirements:
- "python >= 2.6"
- python-keystoneclient
author: "Lorin Hochstein (@lorin)"
'''
EXAMPLES = '''
# Create a tenant
- keystone_user: tenant=demo tenant_description="Default Tenant"
# Create a user
- keystone_user: user=john tenant=demo password=secrete
# Apply the admin role to the john user in the demo tenant
- keystone_user: role=admin user=john tenant=demo
'''
try:
from keystoneclient.v2_0 import client
except ImportError:
keystoneclient_found = False
else:
keystoneclient_found = True
def authenticate(endpoint, token, login_user, login_password, login_tenant_name):
"""Return a keystone client object"""
if token:
return client.Client(endpoint=endpoint, token=token)
else:
return client.Client(auth_url=endpoint, username=login_user,
password=login_password, tenant_name=login_tenant_name)
def tenant_exists(keystone, tenant):
""" Return True if tenant already exists"""
return tenant in [x.name for x in keystone.tenants.list()]
def user_exists(keystone, user):
"""" Return True if user already exists"""
return user in [x.name for x in keystone.users.list()]
def get_tenant(keystone, name):
""" Retrieve a tenant by name"""
tenants = [x for x in keystone.tenants.list() if x.name == name]
count = len(tenants)
if count == 0:
raise KeyError("No keystone tenants with name %s" % name)
elif count > 1:
raise ValueError("%d tenants with name %s" % (count, name))
else:
return tenants[0]
def get_user(keystone, name):
""" Retrieve a user by name"""
users = [x for x in keystone.users.list() if x.name == name]
count = len(users)
if count == 0:
raise KeyError("No keystone users with name %s" % name)
elif count > 1:
raise ValueError("%d users with name %s" % (count, name))
else:
return users[0]
def get_role(keystone, name):
""" Retrieve a role by name"""
roles = [x for x in keystone.roles.list() if x.name == name]
count = len(roles)
if count == 0:
raise KeyError("No keystone roles with name %s" % name)
elif count > 1:
raise ValueError("%d roles with name %s" % (count, name))
else:
return roles[0]
def get_tenant_id(keystone, name):
return get_tenant(keystone, name).id
def get_user_id(keystone, name):
return get_user(keystone, name).id
def ensure_tenant_exists(keystone, tenant_name, tenant_description,
check_mode):
""" Ensure that a tenant exists.
Return (True, id) if a new tenant was created, (False, None) if it
already existed.
"""
# Check if tenant already exists
try:
tenant = get_tenant(keystone, tenant_name)
except KeyError:
# Tenant doesn't exist yet
pass
else:
if tenant.description == tenant_description:
return (False, tenant.id)
else:
# We need to update the tenant description
if check_mode:
return (True, tenant.id)
else:
tenant.update(description=tenant_description)
return (True, tenant.id)
# We now know we will have to create a new tenant
if check_mode:
return (True, None)
ks_tenant = keystone.tenants.create(tenant_name=tenant_name,
description=tenant_description,
enabled=True)
return (True, ks_tenant.id)
def ensure_tenant_absent(keystone, tenant, check_mode):
""" Ensure that a tenant does not exist
Return True if the tenant was removed, False if it didn't exist
in the first place
"""
if not tenant_exists(keystone, tenant):
return False
# We now know we will have to delete the tenant
if check_mode:
return True
def ensure_user_exists(keystone, user_name, password, email, tenant_name,
check_mode):
""" Check if user exists
Return (True, id) if a new user was created, (False, id) user alrady
exists
"""
# Check if tenant already exists
try:
user = get_user(keystone, user_name)
except KeyError:
# Tenant doesn't exist yet
pass
else:
# User does exist, we're done
return (False, user.id)
# We now know we will have to create a new user
if check_mode:
return (True, None)
tenant = get_tenant(keystone, tenant_name)
user = keystone.users.create(name=user_name, password=password,
email=email, tenant_id=tenant.id)
return (True, user.id)
def ensure_role_exists(keystone, user_name, tenant_name, role_name,
check_mode):
""" Check if role exists
Return (True, id) if a new role was created or if the role was newly
assigned to the user for the tenant. (False, id) if the role already
exists and was already assigned to the user ofr the tenant.
"""
# Check if the user has the role in the tenant
user = get_user(keystone, user_name)
tenant = get_tenant(keystone, tenant_name)
roles = [x for x in keystone.roles.roles_for_user(user, tenant)
if x.name == role_name]
count = len(roles)
if count == 1:
# If the role is in there, we are done
role = roles[0]
return (False, role.id)
elif count > 1:
# Too many roles with the same name, throw an error
raise ValueError("%d roles with name %s" % (count, role_name))
# At this point, we know we will need to make changes
if check_mode:
return (True, None)
# Get the role if it exists
try:
role = get_role(keystone, role_name)
except KeyError:
# Role doesn't exist yet
role = keystone.roles.create(role_name)
# Associate the role with the user in the admin
keystone.roles.add_user_role(user, role, tenant)
return (True, role.id)
def ensure_user_absent(keystone, user, check_mode):
raise NotImplementedError("Not yet implemented")
def ensure_role_absent(keystone, uesr, tenant, role, check_mode):
raise NotImplementedError("Not yet implemented")
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
tenant_description=dict(required=False),
email=dict(required=False),
user=dict(required=False),
tenant=dict(required=False),
password=dict(required=False),
role=dict(required=False),
state=dict(default='present', choices=['present', 'absent']),
endpoint=dict(required=False,
default="http://127.0.0.1:35357/v2.0"),
token=dict(required=False),
login_user=dict(required=False),
login_password=dict(required=False),
login_tenant_name=dict(required=False)
))
# keystone operations themselves take an endpoint, not a keystone auth_url
del(argument_spec['auth_url'])
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['token', 'login_user'],
['token', 'login_password'],
['token', 'login_tenant_name']]
)
if not keystoneclient_found:
module.fail_json(msg="the python-keystoneclient module is required")
user = module.params['user']
password = module.params['password']
tenant = module.params['tenant']
tenant_description = module.params['tenant_description']
email = module.params['email']
role = module.params['role']
state = module.params['state']
endpoint = module.params['endpoint']
token = module.params['token']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_tenant_name = module.params['login_tenant_name']
keystone = authenticate(endpoint, token, login_user, login_password, login_tenant_name)
check_mode = module.check_mode
try:
d = dispatch(keystone, user, password, tenant, tenant_description,
email, role, state, endpoint, token, login_user,
login_password, check_mode)
except Exception, e:
if check_mode:
# If we have a failure in check mode
module.exit_json(changed=True,
msg="exception: %s" % e)
else:
module.fail_json(msg="exception: %s" % e)
else:
module.exit_json(**d)
def dispatch(keystone, user=None, password=None, tenant=None,
tenant_description=None, email=None, role=None,
state="present", endpoint=None, token=None, login_user=None,
login_password=None, check_mode=False):
""" Dispatch to the appropriate method.
Returns a dict that will be passed to exit_json
tenant user role state
------ ---- ---- --------
X present ensure_tenant_exists
X absent ensure_tenant_absent
X X present ensure_user_exists
X X absent ensure_user_absent
X X X present ensure_role_exists
X X X absent ensure_role_absent
"""
changed = False
id = None
if tenant and not user and not role and state == "present":
changed, id = ensure_tenant_exists(keystone, tenant,
tenant_description, check_mode)
elif tenant and not user and not role and state == "absent":
changed = ensure_tenant_absent(keystone, tenant, check_mode)
elif tenant and user and not role and state == "present":
changed, id = ensure_user_exists(keystone, user, password,
email, tenant, check_mode)
elif tenant and user and not role and state == "absent":
changed = ensure_user_absent(keystone, user, check_mode)
elif tenant and user and role and state == "present":
changed, id = ensure_role_exists(keystone, user, tenant, role,
check_mode)
elif tenant and user and role and state == "absent":
changed = ensure_role_absent(keystone, user, tenant, role, check_mode)
else:
# Should never reach here
raise ValueError("Code should never reach here")
return dict(changed=changed, id=id)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
XCage15/privacyidea | privacyidea/lib/applications/luks.py | 3 | 3609 | # -*- coding: utf-8 -*-
#
# privacyIDEA
# Jul 18, 2014 Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from privacyidea.lib.applications import MachineApplicationBase
import logging
log = logging.getLogger(__name__)
from privacyidea.lib.crypto import geturandom
import binascii
from privacyidea.lib.token import get_tokens
class MachineApplication(MachineApplicationBase):
"""
This is the application for LUKS.
required options:
slot
partition
"""
application_name = "luks"
@classmethod
def get_authentication_item(cls,
token_type,
serial,
challenge=None, options=None,
filter_param=None):
"""
:param token_type: the type of the token. At the moment
we only support yubikeys, tokentype "TOTP".
:param serial: the serial number of the token.
The challenge response token needs to start with
"UBOM".
:param challenge: A challenge, for which a response get calculated.
If none is presented, we create one.
:type challenge: hex string
:return auth_item: For Yubikey token type it
returns a dictionary with a "challenge" and
a "response".
"""
ret = {}
options = options or {}
if token_type.lower() == "totp" and serial.startswith("UBOM"):
# create a challenge of 32 byte
# Although the yubikey is capable of doing 64byte challenges
# the hmac module calculates different responses for 64 bytes.
if challenge is None:
challenge = geturandom(32)
challenge_hex = binascii.hexlify(challenge)
else:
challenge_hex = challenge
ret["challenge"] = challenge_hex
# create the response. We need to get
# the HMAC key and calculate a HMAC response for
# the challenge
toks = get_tokens(serial=serial, active=True)
if len(toks) == 1:
# tokenclass is a TimeHmacTokenClass
(_r, _p, otp, _c) = toks[0].get_otp(challenge=challenge_hex,
do_truncation=False)
ret["response"] = otp
else:
log.info("Token %r, type %r is not supported by"
"LUKS application module" % (serial, token_type))
return ret
@classmethod
def get_options(cls):
"""
returns a dictionary with a list of required and optional options
"""
return {'required': [],
'optional': ['slot', 'partition']}
| agpl-3.0 |
snar5/Responder | servers/www/footer.py | 1 | 1220 | page_footer = """
<footer>Happy Hunting </footer>
<div id="hashModal" class="modal">
<div class="modal-content">
<div class="modal-header">
<span class="close">×</span>
<h2>Captured Hashes</h2>
</div>
<div class="modal-body">
<p id='dumped_hashes'></p>
</div>
<div class="modal-footer">
<h3>Copy the hashes from above</h3>
</div>
</div>
</div>
<script>
// Modal Window Stuff in this section Get the modal
var modal = document.getElementById('hashModal');
var btnHash = document.getElementById("btnHash");
var span = document.getElementsByClassName("close")[0];
var btnClearCounters = document.getElementById("btnClearCounters");
btnHash.onclick = function() {
doDumpHashes();
modal.style.display = "block";
}
btnClearCounters.onclick = function() {
var ans = window.confirm("This Action Cannont be Undone!");
if (ans) {
doClearPoisons();
}
}
span.onclick = function() {
modal.style.display = "none";
}
window.onclick = function(event) {
if (event.target == modal) {
modal.style.display = "none";
}
}
// End Modal Window
$( "#tabs").tabs();
$( "#accordion" ).accordion();
</script>
</div>
</body>
</html>
"""
| gpl-3.0 |
flwh/KK_mt6589_iq451 | prebuilts/python/linux-x86/2.7.5/lib/python2.7/distutils/command/install_headers.py | 251 | 1346 | """distutils.command.install_headers
Implements the Distutils 'install_headers' command, to install C/C++ header
files to the Python include directory."""
__revision__ = "$Id$"
from distutils.core import Command
# XXX force is never used
class install_headers(Command):
description = "install C/C++ header files"
user_options = [('install-dir=', 'd',
"directory to install header files to"),
('force', 'f',
"force installation (overwrite existing files)"),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def run(self):
headers = self.distribution.headers
if not headers:
return
self.mkpath(self.install_dir)
for header in headers:
(out, _) = self.copy_file(header, self.install_dir)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
# class install_headers
| gpl-2.0 |
tensorflow/agents | tf_agents/policies/boltzmann_policy.py | 1 | 3143 | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy implementation that applies temperature to a distribution."""
from __future__ import absolute_import
from __future__ import division
# Using Type Annotations.
from __future__ import print_function
from typing import Optional, Text
import gin
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.policies import tf_policy
from tf_agents.typing import types
@gin.configurable
class BoltzmannPolicy(tf_policy.TFPolicy):
"""Returns boltzmann samples of a given policy.
The wrapped policy must expose a distribution parameterized by logits.
"""
def __init__(self,
policy: tf_policy.TFPolicy,
temperature: types.FloatOrReturningFloat = 1.0,
name: Optional[Text] = None):
"""Builds a BoltzmannPolicy wrapping the given policy.
Args:
policy: A policy implementing the tf_policy.TFPolicy interface, using
a distribution parameterized by logits.
temperature: Tensor or function that returns the temperature for sampling
when `action` is called. This parameter applies when the action spec is
discrete. If the temperature is close to 0.0 this is equivalent to
calling `tf.argmax` on the output of the network.
name: The name of this policy. All variables in this module will fall
under that name. Defaults to the class name.
"""
super(BoltzmannPolicy, self).__init__(
policy.time_step_spec,
policy.action_spec,
policy.policy_state_spec,
policy.info_spec,
emit_log_probability=policy.emit_log_probability,
clip=False,
name=name)
self._temperature = temperature
self._wrapped_policy = policy
def _variables(self):
return self._wrapped_policy.variables()
def _get_temperature_value(self):
if callable(self._temperature):
return self._temperature()
return self._temperature
def _apply_temperature(self, dist):
"""Change the action distribution to incorporate the temperature."""
logits = dist.logits / self._get_temperature_value()
return dist.copy(logits=logits)
def _distribution(self, time_step, policy_state):
distribution_step = self._wrapped_policy.distribution(
time_step, policy_state)
if self._temperature is None:
return distribution_step
action_dist = tf.nest.map_structure(self._apply_temperature,
distribution_step.action)
return distribution_step._replace(action=action_dist)
| apache-2.0 |
proxysh/Safejumper-for-Desktop | buildlinux/env32/lib/python2.7/site-packages/twisted/trial/test/test_pyunitcompat.py | 13 | 7670 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import division, absolute_import
import sys
import traceback
from zope.interface import implementer
from twisted.python.failure import Failure
from twisted.trial.unittest import SynchronousTestCase, PyUnitResultAdapter
from twisted.trial.itrial import IReporter, ITestCase
import unittest as pyunit
class PyUnitTestTests(SynchronousTestCase):
class PyUnitTest(pyunit.TestCase):
def test_pass(self):
pass
def setUp(self):
self.original = self.PyUnitTest('test_pass')
self.test = ITestCase(self.original)
def test_callable(self):
"""
Tests must be callable in order to be used with Python's unittest.py.
"""
self.assertTrue(callable(self.test),
"%r is not callable." % (self.test,))
class PyUnitResultTests(SynchronousTestCase):
"""
Tests to show that PyUnitResultAdapter wraps TestResult objects from the
standard library 'unittest' module in such a way as to make them usable and
useful from Trial.
"""
# Once erroneous is ported to Python 3 this can be replaced with
# erroneous.ErrorTest:
class ErrorTest(SynchronousTestCase):
"""
A test case which has a L{test_foo} which will raise an error.
@ivar ran: boolean indicating whether L{test_foo} has been run.
"""
ran = False
def test_foo(self):
"""
Set C{self.ran} to True and raise a C{ZeroDivisionError}
"""
self.ran = True
1/0
def test_dontUseAdapterWhenReporterProvidesIReporter(self):
"""
The L{PyUnitResultAdapter} is only used when the result passed to
C{run} does *not* provide L{IReporter}.
"""
@implementer(IReporter)
class StubReporter(object):
"""
A reporter which records data about calls made to it.
@ivar errors: Errors passed to L{addError}.
@ivar failures: Failures passed to L{addFailure}.
"""
def __init__(self):
self.errors = []
self.failures = []
def startTest(self, test):
"""
Do nothing.
"""
def stopTest(self, test):
"""
Do nothing.
"""
def addError(self, test, error):
"""
Record the error.
"""
self.errors.append(error)
test = self.ErrorTest("test_foo")
result = StubReporter()
test.run(result)
self.assertIsInstance(result.errors[0], Failure)
def test_success(self):
class SuccessTest(SynchronousTestCase):
ran = False
def test_foo(s):
s.ran = True
test = SuccessTest('test_foo')
result = pyunit.TestResult()
test.run(result)
self.assertTrue(test.ran)
self.assertEqual(1, result.testsRun)
self.assertTrue(result.wasSuccessful())
def test_failure(self):
class FailureTest(SynchronousTestCase):
ran = False
def test_foo(s):
s.ran = True
s.fail('boom!')
test = FailureTest('test_foo')
result = pyunit.TestResult()
test.run(result)
self.assertTrue(test.ran)
self.assertEqual(1, result.testsRun)
self.assertEqual(1, len(result.failures))
self.assertFalse(result.wasSuccessful())
def test_error(self):
test = self.ErrorTest('test_foo')
result = pyunit.TestResult()
test.run(result)
self.assertTrue(test.ran)
self.assertEqual(1, result.testsRun)
self.assertEqual(1, len(result.errors))
self.assertFalse(result.wasSuccessful())
def test_setUpError(self):
class ErrorTest(SynchronousTestCase):
ran = False
def setUp(self):
1/0
def test_foo(s):
s.ran = True
test = ErrorTest('test_foo')
result = pyunit.TestResult()
test.run(result)
self.assertFalse(test.ran)
self.assertEqual(1, result.testsRun)
self.assertEqual(1, len(result.errors))
self.assertFalse(result.wasSuccessful())
def test_tracebackFromFailure(self):
"""
Errors added through the L{PyUnitResultAdapter} have the same traceback
information as if there were no adapter at all.
"""
try:
1/0
except ZeroDivisionError:
exc_info = sys.exc_info()
f = Failure()
pyresult = pyunit.TestResult()
result = PyUnitResultAdapter(pyresult)
result.addError(self, f)
self.assertEqual(pyresult.errors[0][1],
''.join(traceback.format_exception(*exc_info)))
def test_traceback(self):
"""
As test_tracebackFromFailure, but covering more code.
"""
class ErrorTest(SynchronousTestCase):
exc_info = None
def test_foo(self):
try:
1/0
except ZeroDivisionError:
self.exc_info = sys.exc_info()
raise
test = ErrorTest('test_foo')
result = pyunit.TestResult()
test.run(result)
# We can't test that the tracebacks are equal, because Trial's
# machinery inserts a few extra frames on the top and we don't really
# want to trim them off without an extremely good reason.
#
# So, we just test that the result's stack ends with the
# exception's stack.
expected_stack = ''.join(traceback.format_tb(test.exc_info[2]))
observed_stack = '\n'.join(result.errors[0][1].splitlines()[:-1])
self.assertEqual(expected_stack.strip(),
observed_stack[-len(expected_stack):].strip())
def test_tracebackFromCleanFailure(self):
"""
Errors added through the L{PyUnitResultAdapter} have the same
traceback information as if there were no adapter at all, even
if the Failure that held the information has been cleaned.
"""
try:
1/0
except ZeroDivisionError:
exc_info = sys.exc_info()
f = Failure()
f.cleanFailure()
pyresult = pyunit.TestResult()
result = PyUnitResultAdapter(pyresult)
result.addError(self, f)
self.assertEqual(pyresult.errors[0][1],
''.join(traceback.format_exception(*exc_info)))
def test_trialSkip(self):
"""
Skips using trial's skipping functionality are reported as skips in
the L{pyunit.TestResult}.
"""
class SkipTest(SynchronousTestCase):
def test_skip(self):
1/0
test_skip.skip = "Let's skip!"
test = SkipTest('test_skip')
result = pyunit.TestResult()
test.run(result)
self.assertEqual(result.skipped, [(test, "Let's skip!")])
def test_pyunitSkip(self):
"""
Skips using pyunit's skipping functionality are reported as skips in
the L{pyunit.TestResult}.
"""
class SkipTest(SynchronousTestCase):
@pyunit.skip("skippy")
def test_skip(self):
1/0
test = SkipTest('test_skip')
result = pyunit.TestResult()
test.run(result)
self.assertEqual(result.skipped, [(test, "skippy")])
| gpl-2.0 |
simonsfoundation/CaImAn | caiman/utils/image_preprocessing_keras.py | 2 | 44852 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
From KERAS package
Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from functools import partial
import multiprocessing.pool
import numpy as np
import os
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import threading
import warnings
try:
from keras import backend as K
except:
pass
try:
from PIL import Image as pil_image
except ImportError:
pil_image = None
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.pi / 180 * np.random.uniform(-rg, rg)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Apply the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=3,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def random_mutiplication(x, range_mult):
x = x * np.random.uniform(range_mult[0], range_mult[1])
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
scale: Whether to rescale image values
to be within [0, 255].
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format.
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def load_img(path, grayscale=False, target_size=None):
"""Loads an image into PIL format.
# Arguments
path: Path to image file
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size:
hw_tuple = (target_size[1], target_size[0])
if img.size != hw_tuple:
img = img.resize(hw_tuple)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate minibatches of image data with real-time data augmentation.
# Arguments
featurewise_center: set input mean to 0 over the dataset.
samplewise_center: set each sample mean to 0.
featurewise_std_normalization: divide inputs by std of the dataset.
samplewise_std_normalization: divide each input by its std.
zca_whitening: apply ZCA whitening.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
rotation_range: degrees (0 to 180).
width_shift_range: fraction of total width.
height_shift_range: fraction of total height.
shear_range: shear intensity (shear angle in radians).
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range.
channel_shift_range: shift range for each channels.
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'.
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally.
vertical_flip: whether to randomly flip images vertically.
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided. This is
applied after the `preprocessing_function` (if any provided)
but before any other transformation.
preprocessing_function: function that will be implied on each input.
The function will run before any other modification on it.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
(the depth) is at index 1, in 'channels_last' mode it is at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
random_mult_range=0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
self.random_mult_range = random_mult_range
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('`data_format` should be `"channels_last"` (channel after row and '
'column) or `"channels_first"` (channel before row and column). '
'Received arg: ', data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
def flow(self, x, y=None, batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='png'):
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False):
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links)
def standardize(self, x):
"""Apply the normalization configuration to a batch of inputs.
# Arguments
x: batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
# x is a single image, so it doesn't have image number at index 0
img_channel_axis = self.channel_axis - 1
if self.samplewise_center:
x -= np.mean(x, axis=img_channel_axis, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, axis=img_channel_axis, keepdims=True) + 1e-7)
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + 1e-7)
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augment a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.pi / 180 * \
np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
if self.height_shift_range:
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range) * x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range) * x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx = np.random.uniform(
self.zoom_range[0], self.zoom_range[1], 1)[0]
zy = zx.copy()
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(
transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(
transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(
transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.random_mult_range != 0:
if np.random.random() < 0.5:
x = random_mutiplication(x, self.random_mult_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Fits internal statistics to some sample data.
Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
# Arguments
x: Numpy array, the data to fit on. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Whether to fit on randomly augmented samples
rounds: If `augment`,
how many augmentation passes to do over the data
seed: random seed.
# Raises
ValueError: in case of invalid input `x`.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' + self.data_format + '" '
'(channels on axis ' + str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' + str(x.shape) +
' (' + str(x.shape[self.channel_axis]) + ' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(tuple([rounds * x.shape[0]] +
list(x.shape)[1:]), dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
self.principal_components = np.dot(
np.dot(u, np.diag(1. / np.sqrt(s + self.zca_epsilon))), u.T)
class Iterator(object):
"""Abstract base class for image data iterators.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_generator = self._flow_index(n, batch_size, shuffle, seed)
def reset(self):
self.batch_index = 0
def _flow_index(self, n, batch_size=32, shuffle=False, seed=None):
# Ensure self.batch_index is 0.
self.reset()
while True:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
if self.batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (self.batch_index * batch_size) % n
if n > current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = n - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (index_array[current_index: current_index + current_batch_size],
current_index, current_batch_size)
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png'):
if y is not None and len(x) != len(y):
raise ValueError('X (images tensor) and y (labels) '
'should have the same length. '
'Found: X.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' +
str(channels_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(channels_axis) + '. '
'However, it was passed an array with shape ' + str(self.x.shape) +
' (' + str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(
x.shape[0], batch_size, shuffle, seed)
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array, current_index, current_batch_size = next(
self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = np.zeros(
tuple([current_batch_size] + list(self.x.shape)[1:]), dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(
1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
if self.y is None:
return batch_x
batch_y = self.y[index_array]
return batch_x, batch_y
def _count_valid_files_in_directory(directory, white_list_formats, follow_links):
"""Count files with extension in `white_list_formats` contained in a directory.
# Arguments
directory: absolute path to the directory containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links), key=lambda tpl: tpl[0])
samples = 0
for _, _, files in _recursive_list(directory):
for fname in files:
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
samples += 1
return samples
def _list_valid_filenames_in_directory(directory, white_list_formats,
class_indices, follow_links):
"""List paths of files in `subdir` relative from `directory` whose extensions are in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
class_indices: dictionary mapping a class name to its index.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be ["class1/file1.jpg", "class1/file2.jpg", ...]).
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links), key=lambda tpl: tpl[0])
classes = []
filenames = []
subdir = os.path.basename(directory)
basedir = os.path.dirname(directory)
for root, _, files in _recursive_list(directory):
for fname in files:
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
classes.append(class_indices[subdir])
# add filename relative to directory
absolute_path = os.path.join(root, fname)
filenames.append(os.path.relpath(absolute_path, basedir))
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of sudirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp', 'ppm'}
# first, count the number of samples and classes
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_class = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_class))
# second, build an index of the images in the different class subfolders
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(
self.samples, batch_size, shuffle, seed)
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array, current_index, current_batch_size = next(
self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = np.zeros((current_batch_size,) +
self.image_shape, dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(
1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_class), dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
| gpl-2.0 |
callmetaste/PHCpack | src/Python/PHCpy3/phcpy/sets.py | 1 | 59284 | """
This module exports routines of PHCpack to manipulate
positive dimensional solution sets of polynomial systems.
"""
def standard_embed(nvar, topdim, pols):
"""
Given in pols a list of strings representing polynomials in nvar
variables, with coefficients in standard double precision,
this function returns an embedding of pols of dimension topdim.
The topdim is the top dimension which equals the expected highest
dimension of a component of the solution set of the system of polynomials.
"""
from phcpy.phcpy2c3 import py2c_syscon_clear_standard_system
from phcpy.phcpy2c3 \
import py2c_syscon_initialize_number_of_standard_polynomials
from phcpy.phcpy2c3 import py2c_syscon_store_standard_polynomial
from phcpy.phcpy2c3 import py2c_syscon_load_standard_polynomial
from phcpy.phcpy2c3 import py2c_embed_standard_system
py2c_syscon_clear_standard_system()
nequ = len(pols)
if nequ > nvar:
py2c_syscon_initialize_number_of_standard_polynomials(nequ)
nbres = nequ
else:
py2c_syscon_initialize_number_of_standard_polynomials(nvar)
nbres = nvar
for i in range(0, nequ):
nchar = len(pols[i])
py2c_syscon_store_standard_polynomial(nchar, nvar, i+1, pols[i])
py2c_embed_standard_system(topdim)
result = []
for i in range(1, nbres+topdim+1):
result.append(py2c_syscon_load_standard_polynomial(i))
return result
def dobldobl_embed(nvar, topdim, pols):
"""
Given in pols a list of strings that represent polynomials in nvar
variables, with coefficients in double double precision,
this function returns an embedding of pols of dimension topdim.
The topdim is the top dimension which equals the expected highest
dimension of a component of the solution set of the system of polynomials.
"""
from phcpy.phcpy2c3 import py2c_syscon_clear_dobldobl_system
from phcpy.phcpy2c3 \
import py2c_syscon_initialize_number_of_dobldobl_polynomials
from phcpy.phcpy2c3 import py2c_syscon_store_dobldobl_polynomial
from phcpy.phcpy2c3 import py2c_syscon_load_dobldobl_polynomial
from phcpy.phcpy2c3 import py2c_embed_dobldobl_system
py2c_syscon_clear_dobldobl_system()
nequ = len(pols)
if nequ > nvar:
py2c_syscon_initialize_number_of_dobldobl_polynomials(nequ)
nbres = nequ
else:
py2c_syscon_initialize_number_of_dobldobl_polynomials(nvar)
nbres = nvar
for i in range(0, nequ):
nchar = len(pols[i])
py2c_syscon_store_dobldobl_polynomial(nchar, nvar, i+1, pols[i])
py2c_embed_dobldobl_system(topdim)
result = []
for i in range(1, nbres+topdim+1):
result.append(py2c_syscon_load_dobldobl_polynomial(i))
return result
def quaddobl_embed(nvar, topdim, pols):
"""
Given in pols a list of strings that represent polynomials in nvar
variables, with coefficients in quad double precision,
this function returns an embedding of pols of dimension topdim.
The topdim is the top dimension which equals the expected highest
dimension of a component of the solution set of the system of polynomials.
"""
from phcpy.phcpy2c3 import py2c_syscon_clear_quaddobl_system
from phcpy.phcpy2c3 \
import py2c_syscon_initialize_number_of_quaddobl_polynomials
from phcpy.phcpy2c3 import py2c_syscon_store_quaddobl_polynomial
from phcpy.phcpy2c3 import py2c_syscon_load_quaddobl_polynomial
from phcpy.phcpy2c3 import py2c_embed_quaddobl_system
py2c_syscon_clear_quaddobl_system()
nequ = len(pols)
if nequ > nvar:
py2c_syscon_initialize_number_of_quaddobl_polynomials(nequ)
nbres = nequ
else:
py2c_syscon_initialize_number_of_quaddobl_polynomials(nvar)
nbres = nvar
for i in range(0, nequ):
nchar = len(pols[i])
py2c_syscon_store_quaddobl_polynomial(nchar, nvar, i+1, pols[i])
py2c_embed_quaddobl_system(topdim)
result = []
for i in range(1, nbres+topdim+1):
result.append(py2c_syscon_load_quaddobl_polynomial(i))
return result
def embed(nvar, topdim, pols, precision='d'):
"""
Given in pols a list of strings that represent polynomials in nvar
variables, this function returns an embedding of pols of dimension topdim.
The topdim is the top dimension which equals the expected highest
dimension of a component of the solution set of the system of polynomials.
The default precision of the coefficients is 'd', for standard double
precision. For double double and quad double precision, set the value
of precision to 'dd' or 'qd' respectively.
"""
if(precision == 'd'):
return standard_embed(nvar, topdim, pols)
elif(precision == 'dd'):
return dobldobl_embed(nvar, topdim, pols)
elif(precision == 'qd'):
return quaddobl_embed(nvar, topdim, pols)
else:
print('wrong argument for precision')
return None
def witness_set_of_hypersurface(nvar, hpol, precision='d'):
"""
Given in hpol the string representation of a polynomial
in nvar variables (ending with a semicolon),
on return is an embedded system and its solutions
which represents a witness set for hpol.
The number of solutions on return should equal
the degree of the polynomial in hpol.
Three different precisions are supported, by default double ('d'),
or otherwise double double ('dd') or quad double ('qd').
"""
if(precision == 'd'):
from phcpy.phcpy2c3 import py2c_standard_witset_of_hypersurface
from phcpy.interface import load_standard_system
from phcpy.interface import load_standard_solutions
py2c_standard_witset_of_hypersurface(nvar, len(hpol), hpol)
return (load_standard_system(), load_standard_solutions())
elif(precision == 'dd'):
from phcpy.phcpy2c3 import py2c_dobldobl_witset_of_hypersurface
from phcpy.interface import load_dobldobl_system
from phcpy.interface import load_dobldobl_solutions
py2c_dobldobl_witset_of_hypersurface(nvar, len(hpol), hpol)
return (load_dobldobl_system(), load_dobldobl_solutions())
elif(precision == 'qd'):
from phcpy.phcpy2c3 import py2c_quaddobl_witset_of_hypersurface
from phcpy.interface import load_quaddobl_system
from phcpy.interface import load_quaddobl_solutions
py2c_quaddobl_witset_of_hypersurface(nvar, len(hpol), hpol)
return (load_quaddobl_system(), load_quaddobl_solutions())
else:
print('wrong argument for precision')
return None
def drop_variable_from_polynomials(pols, svar):
"""
Removes the variable with symbol in the string svar
from the list pols of strings that represented
polynomials in several variables.
"""
from phcpy.phcpy2c3 import py2c_syscon_standard_drop_variable_by_name
from phcpy.phcpy2c3 import py2c_syscon_remove_symbol_name
from phcpy.interface import store_standard_system, load_standard_system
store_standard_system(pols)
py2c_syscon_standard_drop_variable_by_name(len(svar), svar)
py2c_syscon_remove_symbol_name(len(svar), svar)
return load_standard_system()
def drop_coordinate_from_solutions(sols, nbvar, svar):
"""
Removes the variable with symbol in the string svar
from the list sols of strings that represent solutions
in nbvar variables.
"""
from phcpy.phcpy2c3 import py2c_syscon_clear_symbol_table
from phcpy.phcpy2c3 import py2c_solcon_standard_drop_coordinate_by_name
from phcpy.phcpy2c3 import py2c_syscon_remove_symbol_name
from phcpy.interface import store_standard_solutions
from phcpy.interface import load_standard_solutions
py2c_syscon_clear_symbol_table()
store_standard_solutions(nbvar, sols)
py2c_solcon_standard_drop_coordinate_by_name(len(svar), svar)
py2c_syscon_remove_symbol_name(len(svar), svar)
return load_standard_solutions()
def standard_double_cascade_step(embsys, esols, tasks=0):
"""
Given in embsys an embedded polynomial system and
solutions with nonzero slace variables in esols,
does one step in the homotopy cascade,
with standard double precision arithmetic.
The list on return contains witness points on
lower dimensional solution components.
"""
from phcpy.phcpy2c3 import py2c_copy_standard_container_to_start_system
from phcpy.phcpy2c3 import py2c_copy_standard_container_to_start_solutions
from phcpy.phcpy2c3 import py2c_standard_cascade_homotopy
from phcpy.phcpy2c3 import py2c_solve_by_standard_homotopy_continuation
from phcpy.phcpy2c3 import py2c_solcon_clear_standard_solutions
from phcpy.phcpy2c3 import py2c_copy_standard_target_solutions_to_container
from phcpy.interface import store_standard_system
from phcpy.interface import store_standard_solutions
from phcpy.interface import load_standard_solutions
store_standard_system(embsys)
py2c_copy_standard_container_to_start_system()
store_standard_solutions(len(embsys), esols)
py2c_copy_standard_container_to_start_solutions()
py2c_standard_cascade_homotopy()
py2c_solve_by_standard_homotopy_continuation(tasks)
py2c_solcon_clear_standard_solutions()
py2c_copy_standard_target_solutions_to_container()
return load_standard_solutions()
def double_double_cascade_step(embsys, esols, tasks=0):
"""
Given in embsys an embedded polynomial system and
solutions with nonzero slace variables in esols,
does one step in the homotopy cascade,
with double double precision arithmetic.
The list on return contains witness points on
lower dimensional solution components.
"""
from phcpy.phcpy2c3 import py2c_copy_dobldobl_container_to_start_system
from phcpy.phcpy2c3 import py2c_copy_dobldobl_container_to_start_solutions
from phcpy.phcpy2c3 import py2c_dobldobl_cascade_homotopy
from phcpy.phcpy2c3 import py2c_solve_by_dobldobl_homotopy_continuation
from phcpy.phcpy2c3 import py2c_solcon_clear_dobldobl_solutions
from phcpy.phcpy2c3 import py2c_copy_dobldobl_target_solutions_to_container
from phcpy.interface import store_dobldobl_system
from phcpy.interface import store_dobldobl_solutions
from phcpy.interface import load_dobldobl_solutions
store_dobldobl_system(embsys)
py2c_copy_dobldobl_container_to_start_system()
store_dobldobl_solutions(len(embsys), esols)
py2c_copy_dobldobl_container_to_start_solutions()
py2c_dobldobl_cascade_homotopy()
py2c_solve_by_dobldobl_homotopy_continuation(tasks)
py2c_solcon_clear_dobldobl_solutions()
py2c_copy_dobldobl_target_solutions_to_container()
return load_dobldobl_solutions()
def quad_double_cascade_step(embsys, esols, tasks=0):
"""
Given in embsys an embedded polynomial system and
solutions with nonzero slace variables in esols,
does one step in the homotopy cascade,
with quad double precision arithmetic.
The list on return contains witness points on
lower dimensional solution components.
"""
from phcpy.phcpy2c3 import py2c_copy_quaddobl_container_to_start_system
from phcpy.phcpy2c3 import py2c_copy_quaddobl_container_to_start_solutions
from phcpy.phcpy2c3 import py2c_quaddobl_cascade_homotopy
from phcpy.phcpy2c3 import py2c_solve_by_quaddobl_homotopy_continuation
from phcpy.phcpy2c3 import py2c_solcon_clear_quaddobl_solutions
from phcpy.phcpy2c3 import py2c_copy_quaddobl_target_solutions_to_container
from phcpy.interface import store_quaddobl_system
from phcpy.interface import store_quaddobl_solutions
from phcpy.interface import load_quaddobl_solutions
store_quaddobl_system(embsys)
py2c_copy_quaddobl_container_to_start_system()
store_quaddobl_solutions(len(embsys), esols)
py2c_copy_quaddobl_container_to_start_solutions()
py2c_quaddobl_cascade_homotopy()
py2c_solve_by_quaddobl_homotopy_continuation(tasks)
py2c_solcon_clear_quaddobl_solutions()
py2c_copy_quaddobl_target_solutions_to_container()
return load_quaddobl_solutions()
def cascade_step(embsys, esols, precision='d', tasks=0):
"""
Given in embsys an embedded polynomial system and
solutions with nonzero slack variables in esols,
does one step in the homotopy cascade, with precision
'd' : standard double precision (1.1e-15 or 2^(-53)),
'dd' : double double precision (4.9e-32 or 2^(-104)),
'qd' : quad double precision (1.2e-63 or 2^(-209)).
The list on return contains witness points on
lower dimensional solution components.
"""
if(precision == 'd'):
return standard_double_cascade_step(embsys, esols, tasks)
elif(precision == 'dd'):
return double_double_cascade_step(embsys, esols, tasks)
elif(precision == 'qd'):
return quad_double_cascade_step(embsys, esols, tasks)
else:
print('wrong argument for precision')
return None
def test_cascade():
"""
Does one cascade step on simple example.
In the top embedding we first find the 2-dimensional
solution set x = 1. In the cascade step we compute
the three witness points on the twisted cubic.
"""
from phcpy.solver import solve
pols = ['(x - 1)*(y-x^2);', \
'(x - 1)*(z-x^3);', \
'(x^2 - 1)*(y-x^2);' ]
print(pols)
embpols = embed(3, 2, pols)
print('the embedded system :')
print(embpols)
input('hit enter to continue...')
sols = solve(embpols, silent=True)
for sol in sols:
print(sol)
print('number of solutions :', len(sols))
input('hit enter to continue...')
from phcpy.solutions import filter_zero_coordinates, filter_regular
sols0 = filter_zero_coordinates(sols, 'zz1', 1.0e-8, 'select')
sols1 = filter_zero_coordinates(sols, 'zz1', 1.0e-8, 'remove')
print('solutions with zero slack variables :')
for sol in sols0:
print(sol)
print('solutions with nonzero slack variables :')
for sol in sols1:
print(sol)
print(len(sols), '=' , len(sols0), '+', len(sols1))
rs1 = filter_regular(sols1, 1.0e-8, 'select')
print('number of nonsolutions :', len(rs1))
input('hit enter to continue...')
print('... running cascade step ...')
s2c = cascade_step(embpols, rs1, precision='d')
print('... after running the cascade ...')
for sol in s2c:
print(sol)
wp1 = drop_variable_from_polynomials(embpols, 'zz2')
print('the 1-dimensional embedding :')
for pol in wp1:
print(pol)
ws1 = drop_coordinate_from_solutions(s2c, len(embpols), 'zz2')
ws1f1 = filter_zero_coordinates(ws1, 'zz1', 1.0e-8, 'select')
ws1f2 = filter_regular(ws1f1, 1.0e-8, 'select')
print('the witness points :')
for sol in ws1f2:
print(sol)
def standard_membertest(wsys, gpts, dim, point, \
evatol=1.0e-6, memtol=1.0e-6, verbose=True):
"""
Applies the homotopy membership test for a point to belong to
a witness set of dimension dim, given by an embedding polynomial
system in wsys, with corresponding generic points in gpts.
The coordinates of the test point are given in the list point,
as a list of doubles, with the real and imaginary part of each
coordinate of the point. By default, verbose is True.
Calculations happen in standard double precision.
The default values for the evaluation (evatol) and the membership
(memtol) allow for singular values at the end points of the paths
in the homotopy membership test.
"""
from phcpy.interface import store_standard_system as storesys
from phcpy.interface import store_standard_solutions as storesols
from phcpy.phcpy2c3 import py2c_witset_standard_membertest as membtest
storesys(wsys)
storesols(len(wsys), gpts)
nvr = len(point)/2
strpt = str(point)
nbc = len(strpt)
result = membtest(int(verbose), nvr, dim, nbc, evatol, memtol, strpt)
return (result[2] == 1)
def dobldobl_membertest(wsys, gpts, dim, point, \
evatol=1.0e-6, memtol=1.0e-6, verbose=True):
"""
Applies the homotopy membership test for a point to belong to
a witness set of dimension dim, given by an embedding polynomial
system in wsys, with corresponding generic points in gpts.
The coordinates of the test point are given in the list point,
as a list of doubles, with the real and imaginary part of each
coordinate of the point. By default, verbose is True.
Calculations happen in double double precision.
The default values for the evaluation (evatol) and the membership
(memtol) allow for singular values at the end points of the paths
in the homotopy membership test.
"""
from phcpy.interface import store_dobldobl_system as storesys
from phcpy.interface import store_dobldobl_solutions as storesols
from phcpy.phcpy2c3 import py2c_witset_dobldobl_membertest as membtest
storesys(wsys)
storesols(len(wsys), gpts)
nvr = len(point)/4
strpt = str(point)
nbc = len(strpt)
result = membtest(int(verbose), nvr, dim, nbc, evatol, memtol, strpt)
return (result[2] == 1)
def quaddobl_membertest(wsys, gpts, dim, point, \
evatol=1.0e-6, memtol=1.0e-6, verbose=True):
"""
Applies the homotopy membership test for a point to belong to
a witness set of dimension dim, given by an embedding polynomial
system in wsys, with corresponding generic points in gpts.
The coordinates of the test point are given in the list point,
as a list of doubles, with the real and imaginary part of each
coordinate of the point. By default, verbose is True.
Calculations happen in quad double precision.
The default values for the evaluation (evatol) and the membership
(memtol) allow for singular values at the end points of the paths
in the homotopy membership test.
"""
from phcpy.interface import store_quaddobl_system as storesys
from phcpy.interface import store_quaddobl_solutions as storesols
from phcpy.phcpy2c3 import py2c_witset_quaddobl_membertest as membtest
storesys(wsys)
storesols(len(wsys), gpts)
nvr = len(point)/8
strpt = str(point)
nbc = len(strpt)
result = membtest(int(verbose), nvr, dim, nbc, evatol, memtol, strpt)
return (result[2] == 1)
def membertest(wsys, gpts, dim, point, evatol=1.0e-6, memtol=1.0e-6, \
verbose=True, precision='d'):
"""
Applies the homotopy membership test for a point to belong to
a witness set of dimension dim, given by an embedding polynomial
system in wsys, with corresponding generic points in gpts.
The coordinates of the test point are given in the list point,
as a list of doubles, with the real and imaginary part of each
coordinate of the point. By default, verbose is True, and the
working precision is double 'd'. Other levels of precision are
double double precision 'dd' and quad double precision 'qd'.
There are two tolerances: evatol is the tolerance on the residual
of the evaluation of the polynomial equations at the test point.
If the residual of the evalution is not less than evatol,
then the membertest returns False. Otherwise, the homotopy
membership test is called and the memtol is used to compare
the coordinates of the point with the newly computed generic points.
If there is a match between the coordinates within the given
tolerance memtol, then True is returned.
"""
if(precision == 'd'):
return standard_membertest(wsys, gpts, dim, point, \
evatol, memtol, verbose)
elif(precision == 'dd'):
return dobldobl_membertest(wsys, gpts, dim, point, \
evatol, memtol, verbose)
elif(precision == 'qd'):
return quaddobl_membertest(wsys, gpts, dim, point, \
evatol, memtol, verbose)
else:
print('wrong argument for precision')
return None
def test_member(prc='d'):
"""
To test the membership, we take the twisted cubic.
"""
twisted = ['x^2 - y;', 'x^3 - z;']
twiste1 = embed(3, 1, twisted)
twiste1[0] = 'x + y + z - x - y - z + ' + twiste1[0]
from phcpy.solver import solve
twtsols = solve(twiste1, precision=prc)
for sol in twtsols:
print(sol)
if(prc == 'd'):
inpoint = [1, 0, 1, 0, 1, 0]
outpoint = [1, 0, 1, 0, 2, 0]
elif(prc == 'dd'):
inpoint = [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0]
outpoint = [1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0]
elif(prc == 'qd'):
inpoint = [1, 0, 0, 0, 0, 0, 0, 0, \
1, 0, 0, 0, 0, 0, 0, 0, \
1, 0, 0, 0, 0, 0, 0, 0]
outpoint = [1, 0, 0, 0, 0, 0, 0, 0, \
1, 0, 0, 0, 0, 0, 0, 0, \
2, 0, 0, 0, 0, 0, 0, 0, ]
print(membertest(twiste1, twtsols, 1, inpoint, precision=prc))
print(membertest(twiste1, twtsols, 1, outpoint, precision=prc))
def standard_decomposition(deg):
"""
Returns the decomposition as a list of labels of witness points
on the components, computed in standard double precision.
"""
from phcpy.phcpy2c3 import py2c_factor_number_of_standard_components
from phcpy.phcpy2c3 import py2c_factor_witness_points_of_standard_component
from phcpy.phcpy2c3 import py2c_factor_standard_trace_sum_difference as stf
nbcmp = py2c_factor_number_of_standard_components()
result = []
for i in range(1, nbcmp+1):
compnt = py2c_factor_witness_points_of_standard_component(deg, i)
tsd = stf(deg, i, len(compnt), compnt)
result.append((eval(compnt), tsd))
return result
def dobldobl_decomposition(deg):
"""
Returns the decomposition as a list of labels of witness points
on the components, computed in double double precision.
"""
from phcpy.phcpy2c3 import py2c_factor_number_of_dobldobl_components
from phcpy.phcpy2c3 import py2c_factor_witness_points_of_dobldobl_component
from phcpy.phcpy2c3 import py2c_factor_dobldobl_trace_sum_difference as dtf
nbcmp = py2c_factor_number_of_dobldobl_components()
result = []
for i in range(1, nbcmp+1):
compnt = py2c_factor_witness_points_of_dobldobl_component(deg, i)
tsd = dtf(deg, i, len(compnt), compnt)
result.append((eval(compnt), tsd))
return result
def quaddobl_decomposition(deg):
"""
Returns the decomposition as a list of labels of witness points
on the components, computed in quad double precision.
"""
from phcpy.phcpy2c3 import py2c_factor_number_of_quaddobl_components
from phcpy.phcpy2c3 import py2c_factor_witness_points_of_quaddobl_component
from phcpy.phcpy2c3 import py2c_factor_quaddobl_trace_sum_difference as qtf
nbcmp = py2c_factor_number_of_quaddobl_components()
result = []
for i in range(1, nbcmp+1):
compnt = py2c_factor_witness_points_of_quaddobl_component(deg, i)
tsd = qtf(deg, i, len(compnt), compnt)
result.append((eval(compnt), tsd))
return result
def decomposition(deg, precision='d'):
"""
Returns the decomposition as a list of labels of witness points
on the components, computed in precision 'd', 'dd', or 'qd',
respectively for double, double double, or quad double.
"""
if(precision == 'd'):
return standard_decomposition(deg)
elif(precision == 'dd'):
return dobldobl_decomposition(deg)
elif(precision == 'qd'):
return quaddobl_decomposition(deg)
else:
print('wrong level of precision')
return None
def standard_monodromy_breakup(embsys, esols, dim, verbose=True, nbloops=0):
"""
Applies the monodromy breakup algorithm in standard double precision
to factor the d-dimensional algebraic set represented by the
embedded system e and its solutions esols.
If verbose is False, then no output is written.
If nbloops equals zero, then the user is prompted to give
the maximum number of loops.
"""
from phcpy.phcpy2c3 import py2c_factor_set_standard_to_mute
from phcpy.phcpy2c3 import py2c_factor_set_standard_to_verbose
from phcpy.phcpy2c3 import py2c_factor_standard_assign_labels
from phcpy.phcpy2c3 import py2c_factor_initialize_standard_monodromy
from phcpy.phcpy2c3 import py2c_factor_initialize_standard_sampler
from phcpy.phcpy2c3 import py2c_factor_standard_trace_grid_diagnostics
from phcpy.phcpy2c3 import py2c_factor_set_standard_trace_slice
from phcpy.phcpy2c3 import py2c_factor_store_standard_gammas
from phcpy.phcpy2c3 import py2c_factor_standard_track_paths
from phcpy.phcpy2c3 import py2c_factor_store_standard_solutions
from phcpy.phcpy2c3 import py2c_factor_restore_standard_solutions
from phcpy.phcpy2c3 import py2c_factor_new_standard_slices
from phcpy.phcpy2c3 import py2c_factor_swap_standard_slices
from phcpy.phcpy2c3 import py2c_factor_permutation_after_standard_loop
from phcpy.phcpy2c3 import py2c_factor_number_of_standard_components
from phcpy.phcpy2c3 import py2c_factor_update_standard_decomposition
from phcpy.phcpy2c3 import py2c_solcon_write_standard_solutions
from phcpy.phcpy2c3 import py2c_solcon_clear_standard_solutions
from phcpy.interface import store_standard_solutions
if(verbose):
print('... applying monodromy factorization with standard doubles ...')
py2c_factor_set_standard_to_verbose()
else:
py2c_factor_set_standard_to_mute()
deg = len(esols)
nvar = len(embsys)
if(verbose):
print('dim =', dim)
store_standard_solutions(nvar, esols)
py2c_factor_standard_assign_labels(nvar, deg)
if(verbose):
py2c_solcon_write_standard_solutions()
py2c_factor_initialize_standard_sampler(dim)
if(nbloops == 0):
strnbloops = input('give the maximum number of loops : ')
nbloops = int(strnbloops)
py2c_factor_initialize_standard_monodromy(nbloops, deg, dim)
py2c_factor_store_standard_solutions()
if(verbose):
print('... initializing the grid in standard double precision ...')
for i in range(1, 3):
py2c_factor_set_standard_trace_slice(i)
py2c_factor_store_standard_gammas(nvar)
py2c_factor_standard_track_paths()
py2c_factor_store_standard_solutions()
py2c_factor_restore_standard_solutions()
py2c_factor_swap_standard_slices()
(err, dis) = py2c_factor_standard_trace_grid_diagnostics()
print('The diagnostics of the trace grid :')
print(' largest error on the samples :', err)
print(' smallest distance between the samples :', dis)
for i in range(1, nbloops+1):
if(verbose):
print('... starting loop %d ...' % i)
py2c_factor_new_standard_slices(dim, nvar)
py2c_factor_store_standard_gammas(nvar)
py2c_factor_standard_track_paths()
py2c_solcon_clear_standard_solutions()
py2c_factor_store_standard_gammas(nvar)
py2c_factor_standard_track_paths()
py2c_factor_store_standard_solutions()
sprm = py2c_factor_permutation_after_standard_loop(deg)
if(verbose):
perm = eval(sprm)
print('the permutation :', perm)
nb0 = py2c_factor_number_of_standard_components()
done = py2c_factor_update_standard_decomposition(deg, len(sprm), sprm)
nb1 = py2c_factor_number_of_standard_components()
if(verbose):
print('number of factors : %d -> %d' % (nb0, nb1))
deco = decomposition(deg)
print('decomposition :', deco)
if(done == 1):
break
py2c_factor_restore_standard_solutions()
def dobldobl_monodromy_breakup(embsys, esols, dim, verbose=True, nbloops=0):
"""
Applies the monodromy breakup algorithm in double double precision
to factor the d-dimensional algebraic set represented by the embedded
system e and its solutions esols.
If verbose is False, then no output is written.
If nbloops equals zero, then the user is prompted to give
the maximum number of loops.
"""
from phcpy.phcpy2c3 import py2c_factor_set_dobldobl_to_mute
from phcpy.phcpy2c3 import py2c_factor_set_dobldobl_to_verbose
from phcpy.phcpy2c3 import py2c_factor_dobldobl_assign_labels
from phcpy.phcpy2c3 import py2c_factor_initialize_dobldobl_monodromy
from phcpy.phcpy2c3 import py2c_factor_initialize_dobldobl_sampler
from phcpy.phcpy2c3 import py2c_factor_dobldobl_trace_grid_diagnostics
from phcpy.phcpy2c3 import py2c_factor_set_dobldobl_trace_slice
from phcpy.phcpy2c3 import py2c_factor_store_dobldobl_gammas
from phcpy.phcpy2c3 import py2c_factor_dobldobl_track_paths
from phcpy.phcpy2c3 import py2c_factor_store_dobldobl_solutions
from phcpy.phcpy2c3 import py2c_factor_restore_dobldobl_solutions
from phcpy.phcpy2c3 import py2c_factor_new_dobldobl_slices
from phcpy.phcpy2c3 import py2c_factor_swap_dobldobl_slices
from phcpy.phcpy2c3 import py2c_factor_permutation_after_dobldobl_loop
from phcpy.phcpy2c3 import py2c_factor_number_of_dobldobl_components
from phcpy.phcpy2c3 import py2c_factor_update_dobldobl_decomposition
from phcpy.phcpy2c3 import py2c_solcon_write_dobldobl_solutions
from phcpy.phcpy2c3 import py2c_solcon_clear_dobldobl_solutions
from phcpy.interface import store_dobldobl_solutions
if(verbose):
print('... applying monodromy factorization with double doubles ...')
py2c_factor_set_dobldobl_to_verbose()
else:
py2c_factor_set_dobldobl_to_mute()
deg = len(esols)
nvar = len(embsys)
if(verbose):
print('nvar =', nvar, 'dim =', dim, 'deg =', deg)
store_dobldobl_solutions(nvar, esols)
py2c_factor_dobldobl_assign_labels(nvar, deg)
if(verbose):
py2c_solcon_write_dobldobl_solutions()
py2c_factor_initialize_dobldobl_sampler(dim)
if(nbloops == 0):
strnbloops = input('give the maximum number of loops : ')
nbloops = int(strnbloops)
py2c_factor_initialize_dobldobl_monodromy(nbloops, deg, dim)
py2c_factor_store_dobldobl_solutions()
if(verbose):
print('... initializing the grid ...')
for i in range(1, 3):
py2c_factor_set_dobldobl_trace_slice(i)
py2c_factor_store_dobldobl_gammas(nvar)
py2c_factor_dobldobl_track_paths()
py2c_factor_store_dobldobl_solutions()
py2c_factor_restore_dobldobl_solutions()
py2c_factor_swap_dobldobl_slices()
(err, dis) = py2c_factor_dobldobl_trace_grid_diagnostics()
print('The diagnostics of the trace grid :')
print(' largest error on the samples :', err)
print(' smallest distance between the samples :', dis)
for i in range(1, nbloops+1):
if(verbose):
print('... starting loop %d ...' % i)
py2c_factor_new_dobldobl_slices(dim, nvar)
py2c_factor_store_dobldobl_gammas(nvar)
py2c_factor_dobldobl_track_paths()
py2c_solcon_clear_dobldobl_solutions()
py2c_factor_store_dobldobl_gammas(nvar)
py2c_factor_dobldobl_track_paths()
py2c_factor_store_dobldobl_solutions()
sprm = py2c_factor_permutation_after_dobldobl_loop(deg)
if(verbose):
perm = eval(sprm)
print('the permutation :', perm)
nb0 = py2c_factor_number_of_dobldobl_components()
done = py2c_factor_update_dobldobl_decomposition(deg, len(sprm), sprm)
nb1 = py2c_factor_number_of_dobldobl_components()
if(verbose):
print('number of factors : %d -> %d' % (nb0, nb1))
deco = decomposition(deg, 'dd')
print('decomposition :', deco)
if(done == 1):
break
py2c_factor_restore_dobldobl_solutions()
def quaddobl_monodromy_breakup(embsys, esols, dim, verbose=True, nbloops=0):
"""
Applies the monodromy breakup algorithm in quad double precision
to factor the d-dimensional algebraic set represented by the embedded
system e and its solutions esols.
If verbose is False, then no output is written.
If nbloops equals zero, then the user is prompted to give
the maximum number of loops.
"""
from phcpy.phcpy2c3 import py2c_factor_set_quaddobl_to_mute
from phcpy.phcpy2c3 import py2c_factor_set_quaddobl_to_verbose
from phcpy.phcpy2c3 import py2c_factor_quaddobl_assign_labels
from phcpy.phcpy2c3 import py2c_factor_initialize_quaddobl_monodromy
from phcpy.phcpy2c3 import py2c_factor_initialize_quaddobl_sampler
from phcpy.phcpy2c3 import py2c_factor_quaddobl_trace_grid_diagnostics
from phcpy.phcpy2c3 import py2c_factor_set_quaddobl_trace_slice
from phcpy.phcpy2c3 import py2c_factor_store_quaddobl_gammas
from phcpy.phcpy2c3 import py2c_factor_quaddobl_track_paths
from phcpy.phcpy2c3 import py2c_factor_store_quaddobl_solutions
from phcpy.phcpy2c3 import py2c_factor_restore_quaddobl_solutions
from phcpy.phcpy2c3 import py2c_factor_new_quaddobl_slices
from phcpy.phcpy2c3 import py2c_factor_swap_quaddobl_slices
from phcpy.phcpy2c3 import py2c_factor_permutation_after_quaddobl_loop
from phcpy.phcpy2c3 import py2c_factor_number_of_quaddobl_components
from phcpy.phcpy2c3 import py2c_factor_update_quaddobl_decomposition
from phcpy.phcpy2c3 import py2c_solcon_write_quaddobl_solutions
from phcpy.phcpy2c3 import py2c_solcon_clear_quaddobl_solutions
from phcpy.interface import store_quaddobl_solutions
if(verbose):
print('... applying monodromy factorization with quad doubles ...')
py2c_factor_set_quaddobl_to_verbose()
else:
py2c_factor_set_quaddobl_to_mute()
deg = len(esols)
nvar = len(embsys)
if(verbose):
print('dim =', dim)
store_quaddobl_solutions(nvar, esols)
py2c_factor_quaddobl_assign_labels(nvar, deg)
if(verbose):
py2c_solcon_write_quaddobl_solutions()
py2c_factor_initialize_quaddobl_sampler(dim)
if(nbloops == 0):
strnbloops = input('give the maximum number of loops : ')
nbloops = int(strnbloops)
py2c_factor_initialize_quaddobl_monodromy(nbloops, deg, dim)
py2c_factor_store_quaddobl_solutions()
if(verbose):
print('... initializing the grid ...')
for i in range(1, 3):
py2c_factor_set_quaddobl_trace_slice(i)
py2c_factor_store_quaddobl_gammas(nvar)
py2c_factor_quaddobl_track_paths()
py2c_factor_store_quaddobl_solutions()
py2c_factor_restore_quaddobl_solutions()
py2c_factor_swap_quaddobl_slices()
(err, dis) = py2c_factor_quaddobl_trace_grid_diagnostics()
print('The diagnostics of the trace grid :')
print(' largest error on the samples :', err)
print(' smallest distance between the samples :', dis)
for i in range(1, nbloops+1):
if(verbose):
print('... starting loop %d ...' % i)
py2c_factor_new_quaddobl_slices(dim, nvar)
py2c_factor_store_quaddobl_gammas(nvar)
py2c_factor_quaddobl_track_paths()
py2c_solcon_clear_quaddobl_solutions()
py2c_factor_store_quaddobl_gammas(nvar)
py2c_factor_quaddobl_track_paths()
py2c_factor_store_quaddobl_solutions()
sprm = py2c_factor_permutation_after_quaddobl_loop(deg)
if(verbose):
perm = eval(sprm)
print('the permutation :', perm)
nb0 = py2c_factor_number_of_quaddobl_components()
done = py2c_factor_update_quaddobl_decomposition(deg, len(sprm), sprm)
nb1 = py2c_factor_number_of_quaddobl_components()
if(verbose):
print('number of factors : %d -> %d' % (nb0, nb1))
deco = decomposition(deg, 'qd')
print('decomposition :', deco)
if(done == 1):
break
py2c_factor_restore_quaddobl_solutions()
def monodromy_breakup(embsys, esols, dim, verbose=True, nbloops=0, prec='d'):
"""
Applies the monodromy breakup algorithm to factor the d-dimensional
set represented by the embedded system e and its solutions esols.
If verbose is False, then no output is written.
If nbloops equals zero, then the user is prompted to give
the maximum number of loops.
Three different levels of precision are supported: double precision 'd'
(for the value for prec) is the default, the two other precisions are
double double precision 'dd' and quad double precision 'qd'.
"""
if(prec == 'd'):
standard_monodromy_breakup(embsys, esols, dim, verbose, nbloops)
elif(prec == 'dd'):
dobldobl_monodromy_breakup(embsys, esols, dim, verbose, nbloops)
elif(prec == 'qd'):
quaddobl_monodromy_breakup(embsys, esols, dim, verbose, nbloops)
else:
print('wrong argument for precision')
def factor(dim, witsys, witsols, verbose=True, nbloops=20, precision='d'):
"""
Applies monodromy to factor an equidimensional algebraic set,
given as a witness sets, with the embedded polynomials in witsys,
and corresponding generic points in witsols.
The dimension of the algebraic set is given in dim.
The default precision is double 'd'. Other valid values for precision
are 'dd' for double double, or 'qd' for quad double.
"""
if(precision == 'd'):
standard_monodromy_breakup(witsys, witsols, dim, verbose, nbloops)
return decomposition(len(witsols))
elif(precision == 'dd'):
dobldobl_monodromy_breakup(witsys, witsols, dim, verbose, nbloops)
return decomposition(len(witsols), 'dd')
elif(precision == 'dd'):
quaddobl_monodromy_breakup(witsys, witsols, dim, verbose, nbloops)
return decomposition(len(witsols), 'qd')
else:
print('wrong level of precision')
return None
def test_monodromy(prc='d'):
"""
Runs a test on applying monodromy loops
to factor a curve into irreducible components.
"""
from phcpy.solver import solve
pols = ['(x^2 - y)*(x-y);', 'x^3 - z;']
embsys = embed(3, 1, pols, prc)
# patch : make sure zz1 is last symbol!
embsys[0] = 'x - x + y - y + z - z + ' + embsys[0]
print(embsys)
sols = solve(embsys, silent=True, precision=prc)
# for sol in sols: print sol
print('the degree is', len(sols))
monodromy_breakup(embsys, sols, 1, prec=prc)
def test_factor():
"""
Simple test on the factor method.
"""
hyp = '(x+1)*(x^2 + y^2 + 1);'
(wsys, wsols) = witness_set_of_hypersurface(2, hyp)
fac = factor(1, wsys, wsols)
print(fac)
def standard_diagonal_homotopy(dim1, sys1, esols1, dim2, sys2, esols2):
"""
Defines a diagonal homotopy to intersect the witness sets defined
by (sys1, esols1) and (sys2, esols2), respectively of dimensions
dim1 and dim2. The systems sys1 and sys2 are assumed to be square
and with as many slack variables as the dimension of the solution sets.
The data is stored in standard double precision.
"""
from phcpy.interface import store_standard_system as storesys
from phcpy.interface import store_standard_solutions as storesols
from phcpy.phcpy2c3 import py2c_copy_standard_container_to_target_system
from phcpy.phcpy2c3 import py2c_copy_standard_container_to_target_solutions
from phcpy.phcpy2c3 import py2c_copy_standard_container_to_start_system
from phcpy.phcpy2c3 import py2c_copy_standard_container_to_start_solutions
from phcpy.phcpy2c3 import py2c_standard_diagonal_homotopy
from phcpy.phcpy2c3 import py2c_syscon_number_of_symbols
from phcpy.phcpy2c3 import py2c_syscon_string_of_symbols
from phcpy.phcpy2c3 import py2c_diagonal_symbols_doubler
storesys(sys1)
symbols = py2c_syscon_string_of_symbols()
nbsymbs = py2c_syscon_number_of_symbols()
print('number of symbols :', nbsymbs)
print('names of variables :', symbols)
storesols(len(sys1), esols1)
if(dim1 >= dim2):
py2c_copy_standard_container_to_target_system()
py2c_copy_standard_container_to_target_solutions()
else:
py2c_copy_standard_container_to_start_system()
py2c_copy_standard_container_to_start_solutions()
storesys(sys2)
storesols(len(sys2), esols2)
if(dim1 >= dim2):
py2c_copy_standard_container_to_start_system()
py2c_copy_standard_container_to_start_solutions()
else:
py2c_copy_standard_container_to_target_system()
py2c_copy_standard_container_to_target_solutions()
if(dim1 >= dim2):
py2c_standard_diagonal_homotopy(dim1, dim2)
else:
py2c_standard_diagonal_homotopy(dim2, dim1)
py2c_diagonal_symbols_doubler(nbsymbs-dim1, dim1, len(symbols), symbols)
def dobldobl_diagonal_homotopy(dim1, sys1, esols1, dim2, sys2, esols2):
"""
Defines a diagonal homotopy to intersect the witness sets defined
by (sys1, esols1) and (sys2, esols2), respectively of dimensions
dim1 and dim2. The systems sys1 and sys2 are assumed to be square
and with as many slack variables as the dimension of the solution sets.
The data is stored in double double precision.
"""
from phcpy.interface import store_dobldobl_system as storesys
from phcpy.interface import store_dobldobl_solutions as storesols
from phcpy.phcpy2c3 import py2c_copy_dobldobl_container_to_target_system
from phcpy.phcpy2c3 import py2c_copy_dobldobl_container_to_target_solutions
from phcpy.phcpy2c3 import py2c_copy_dobldobl_container_to_start_system
from phcpy.phcpy2c3 import py2c_copy_dobldobl_container_to_start_solutions
from phcpy.phcpy2c3 import py2c_dobldobl_diagonal_homotopy
from phcpy.phcpy2c3 import py2c_syscon_number_of_symbols
from phcpy.phcpy2c3 import py2c_syscon_string_of_symbols
from phcpy.phcpy2c3 import py2c_diagonal_symbols_doubler
storesys(sys1)
symbols = py2c_syscon_string_of_symbols()
nbsymbs = py2c_syscon_number_of_symbols()
print('number of symbols :', nbsymbs)
print('names of variables :', symbols)
storesols(len(sys1), esols1)
if(dim1 >= dim2):
py2c_copy_dobldobl_container_to_target_system()
py2c_copy_dobldobl_container_to_target_solutions()
else:
py2c_copy_dobldobl_container_to_start_system()
py2c_copy_dobldobl_container_to_start_solutions()
storesys(sys2)
storesols(len(sys2), esols2)
if(dim1 >= dim2):
py2c_copy_dobldobl_container_to_start_system()
py2c_copy_dobldobl_container_to_start_solutions()
else:
py2c_copy_dobldobl_container_to_target_system()
py2c_copy_dobldobl_container_to_target_solutions()
if(dim1 >= dim2):
py2c_dobldobl_diagonal_homotopy(dim1, dim2)
else:
py2c_dobldobl_diagonal_homotopy(dim2, dim1)
py2c_diagonal_symbols_doubler(nbsymbs-dim1, dim1, len(symbols), symbols)
def quaddobl_diagonal_homotopy(dim1, sys1, esols1, dim2, sys2, esols2):
"""
Defines a diagonal homotopy to intersect the witness sets defined
by (sys1, esols1) and (sys2, esols2), respectively of dimensions
dim1 and dim2. The systems sys1 and sys2 are assumed to be square
and with as many slack variables as the dimension of the solution sets.
The data is stored in quad double precision.
"""
from phcpy.interface import store_quaddobl_system as storesys
from phcpy.interface import store_quaddobl_solutions as storesols
from phcpy.phcpy2c3 import py2c_copy_quaddobl_container_to_target_system
from phcpy.phcpy2c3 import py2c_copy_quaddobl_container_to_target_solutions
from phcpy.phcpy2c3 import py2c_copy_quaddobl_container_to_start_system
from phcpy.phcpy2c3 import py2c_copy_quaddobl_container_to_start_solutions
from phcpy.phcpy2c3 import py2c_quaddobl_diagonal_homotopy
from phcpy.phcpy2c3 import py2c_syscon_number_of_symbols
from phcpy.phcpy2c3 import py2c_syscon_string_of_symbols
from phcpy.phcpy2c3 import py2c_diagonal_symbols_doubler
storesys(sys1)
symbols = py2c_syscon_string_of_symbols()
nbsymbs = py2c_syscon_number_of_symbols()
print('number of symbols :', nbsymbs)
print('names of variables :', symbols)
storesols(len(sys1), esols1)
if(dim1 >= dim2):
py2c_copy_quaddobl_container_to_target_system()
py2c_copy_quaddobl_container_to_target_solutions()
else:
py2c_copy_quaddobl_container_to_start_system()
py2c_copy_quaddobl_container_to_start_solutions()
storesys(sys2)
storesols(len(sys2), esols2)
if(dim1 >= dim2):
py2c_copy_quaddobl_container_to_start_system()
py2c_copy_quaddobl_container_to_start_solutions()
else:
py2c_copy_quaddobl_container_to_target_system()
py2c_copy_quaddobl_container_to_target_solutions()
if(dim1 >= dim2):
py2c_quaddobl_diagonal_homotopy(dim1, dim2)
else:
py2c_quaddobl_diagonal_homotopy(dim2, dim1)
py2c_diagonal_symbols_doubler(nbsymbs-dim1, dim1, len(symbols), symbols)
def standard_diagonal_cascade_solutions(dim1, dim2):
"""
Defines the start solutions in the cascade to start the diagonal
homotopy to intersect a set of dimension dim1 with another set
of dimension dim2, in standard double precision. For this to work,
standard_diagonal_homotopy must have been executed successfully.
"""
from phcpy.phcpy2c3 import py2c_standard_diagonal_cascade_solutions
if(dim1 >= dim2):
py2c_standard_diagonal_cascade_solutions(dim1, dim2)
else:
py2c_standard_diagonal_cascade_solutions(dim2, dim1)
def dobldobl_diagonal_cascade_solutions(dim1, dim2):
"""
Defines the start solutions in the cascade to start the diagonal
homotopy to intersect a set of dimension dim1 with another set
of dimension dim2, in double double precision. For this to work,
dobldobl_diagonal_homotopy must have been executed successfully.
"""
from phcpy.phcpy2c3 import py2c_dobldobl_diagonal_cascade_solutions
if(dim1 >= dim2):
py2c_dobldobl_diagonal_cascade_solutions(dim1, dim2)
else:
py2c_dobldobl_diagonal_cascade_solutions(dim2, dim1)
def quaddobl_diagonal_cascade_solutions(dim1, dim2):
"""
Defines the start solutions in the cascade to start the diagonal
homotopy to intersect a set of dimension dim1 with another set
of dimension dim2, in quad double precision. For this to work,
quaddobl_diagonal_homotopy must have been executed successfully.
"""
from phcpy.phcpy2c3 import py2c_quaddobl_diagonal_cascade_solutions
if(dim1 >= dim2):
py2c_quaddobl_diagonal_cascade_solutions(dim1, dim2)
else:
py2c_quaddobl_diagonal_cascade_solutions(dim2, dim1)
def standard_start_diagonal_cascade(gamma=0, tasks=0):
"""
Does the path tracking to start a diagonal cascade in standard double
precision. For this to work, the functions standard_diagonal_homotopy
and standard_diagonal_cascade_solutions must be executed successfully.
If gamma equals 0 on input, then a random gamma constant is generated,
otherwise, the given complex gamma will be used in the homotopy.
Multitasking is available, and activated by the tasks parameter.
Returns the target (system and its corresponding) solutions.
"""
from phcpy.phcpy2c3 import py2c_create_standard_homotopy
from phcpy.phcpy2c3 import py2c_create_standard_homotopy_with_gamma
from phcpy.phcpy2c3 import py2c_solve_by_standard_homotopy_continuation
from phcpy.phcpy2c3 import py2c_solcon_clear_standard_solutions
from phcpy.phcpy2c3 import py2c_syscon_clear_standard_system
from phcpy.phcpy2c3 import py2c_copy_standard_target_solutions_to_container
from phcpy.phcpy2c3 import py2c_copy_standard_target_system_to_container
from phcpy.interface import load_standard_solutions
from phcpy.interface import load_standard_system
if(gamma == 0):
py2c_create_standard_homotopy()
else:
py2c_create_standard_homotopy_with_gamma(gamma.real, gamma.imag)
py2c_solve_by_standard_homotopy_continuation(tasks)
py2c_solcon_clear_standard_solutions()
py2c_syscon_clear_standard_system()
py2c_copy_standard_target_solutions_to_container()
# from phcpy.phcpy2c3 import py2c_write_standard_target_system
# print 'the standard target system :'
# py2c_write_standard_target_system()
py2c_copy_standard_target_system_to_container()
tsys = load_standard_system()
sols = load_standard_solutions()
return (tsys, sols)
def dobldobl_start_diagonal_cascade(gamma=0, tasks=0):
"""
Does the path tracking to start a diagonal cascade in double double
precision. For this to work, the functions dobldobl_diagonal_homotopy
and dobldobl_diagonal_cascade_solutions must be executed successfully.
If gamma equals 0 on input, then a random gamma constant is generated,
otherwise, the given complex gamma will be used in the homotopy.
Multitasking is available, and activated by the tasks parameter.
Returns the target (system and its corresponding) solutions.
"""
from phcpy.phcpy2c3 import py2c_create_dobldobl_homotopy
from phcpy.phcpy2c3 import py2c_create_dobldobl_homotopy_with_gamma
from phcpy.phcpy2c3 import py2c_solve_by_dobldobl_homotopy_continuation
from phcpy.phcpy2c3 import py2c_solcon_clear_dobldobl_solutions
from phcpy.phcpy2c3 import py2c_syscon_clear_dobldobl_system
from phcpy.phcpy2c3 import py2c_copy_dobldobl_target_solutions_to_container
from phcpy.phcpy2c3 import py2c_copy_dobldobl_target_system_to_container
from phcpy.interface import load_dobldobl_solutions
from phcpy.interface import load_dobldobl_system
if(gamma == 0):
py2c_create_dobldobl_homotopy()
else:
py2c_create_dobldobl_homotopy_with_gamma(gamma.real, gamma.imag)
py2c_solve_by_dobldobl_homotopy_continuation(tasks)
py2c_solcon_clear_dobldobl_solutions()
py2c_syscon_clear_dobldobl_system()
py2c_copy_dobldobl_target_solutions_to_container()
# from phcpy.phcpy2c3 import py2c_write_dobldobl_target_system
# print 'the dobldobl target system :'
# py2c_write_dobldobl_target_system()
py2c_copy_dobldobl_target_system_to_container()
tsys = load_dobldobl_system()
sols = load_dobldobl_solutions()
return (tsys, sols)
def quaddobl_start_diagonal_cascade(gamma=0, tasks=0):
"""
Does the path tracking to start a diagonal cascade in quad double
precision. For this to work, the functions quaddobl_diagonal_homotopy
and quaddobl_diagonal_cascade_solutions must be executed successfully.
If gamma equals 0 on input, then a random gamma constant is generated,
otherwise, the given complex gamma will be used in the homotopy.
Multitasking is available, and is activated by the tasks parameter.
Returns the target (system and its corresponding) solutions.
"""
from phcpy.phcpy2c3 import py2c_create_quaddobl_homotopy
from phcpy.phcpy2c3 import py2c_create_quaddobl_homotopy_with_gamma
from phcpy.phcpy2c3 import py2c_solve_by_quaddobl_homotopy_continuation
from phcpy.phcpy2c3 import py2c_solcon_clear_quaddobl_solutions
from phcpy.phcpy2c3 import py2c_syscon_clear_quaddobl_system
from phcpy.phcpy2c3 import py2c_copy_quaddobl_target_solutions_to_container
from phcpy.phcpy2c3 import py2c_copy_quaddobl_target_system_to_container
from phcpy.interface import load_quaddobl_solutions
from phcpy.interface import load_quaddobl_system
if(gamma == 0):
py2c_create_quaddobl_homotopy()
else:
py2c_create_quaddobl_homotopy_with_gamma(gamma.real, gamma.imag)
py2c_solve_by_quaddobl_homotopy_continuation(tasks)
py2c_solcon_clear_quaddobl_solutions()
py2c_syscon_clear_quaddobl_system()
py2c_copy_quaddobl_target_solutions_to_container()
# from phcpy.phcpy2c3 import py2c_write_quaddobl_target_system
# print 'the quaddobl target system :'
# py2c_write_quaddobl_target_system()
py2c_copy_quaddobl_target_system_to_container()
tsys = load_quaddobl_system()
sols = load_quaddobl_solutions()
return (tsys, sols)
def standard_diagonal_solver(dim, dm1, sys1, sols1, dm2, sys2, sols2, tasks=0):
"""
Runs the diagonal homotopies in standard double precision
to intersect two witness sets stored in (sys1, sols1) and
(sys2, sols2), of respective dimensions dm1 and dm2.
The ambient dimension equals dim.
Multitasking is available, and is activated by the tasks parameter.
Returns the last system in the cascade and its solutions.
"""
from phcpy.phcpy2c3 import py2c_standard_collapse_diagonal
from phcpy.interface import store_standard_solutions as storesols
from phcpy.interface import load_standard_solutions as loadsols
from phcpy.interface import load_standard_system as loadsys
from phcpy.phcpy2c3 import py2c_extrinsic_top_diagonal_dimension
topdim = py2c_extrinsic_top_diagonal_dimension(dim+dm1, dim+dm2, dm1, dm2)
print('the top dimension :', topdim)
standard_diagonal_homotopy(dm1, sys1, sols1, dm2, sys2, sols2)
print('defining the start solutions')
standard_diagonal_cascade_solutions(dm1, dm2)
print('starting the diagonal cascade')
(topsys, startsols) = standard_start_diagonal_cascade()
print('the system solved in the start of the cascade :')
for pol in topsys:
print(pol)
print('the solutions after starting the diagonal cascade :')
for sol in startsols:
print(sol)
endsols = standard_double_cascade_step(topsys, startsols)
print('after running one cascade step :')
for sol in endsols:
print(sol)
storesols(len(topsys), endsols)
py2c_standard_collapse_diagonal(topdim - 2*dim, 0)
result = (loadsys(), loadsols())
return result
def dobldobl_diagonal_solver(dim, dm1, sys1, sols1, dm2, sys2, sols2, tasks=0):
"""
Runs the diagonal homotopies in double double precision
to intersect two witness sets stored in (sys1, sols1) and
(sys2, sols2), of respective dimensions dm1 and dm2.
The ambient dimension equals dim.
Multitasking is available, and is activated by the tasks parameter.
Returns the last system in the cascade and its solutions.
"""
from phcpy.phcpy2c3 import py2c_dobldobl_collapse_diagonal
from phcpy.interface import store_dobldobl_solutions as storesols
from phcpy.interface import load_dobldobl_solutions as loadsols
from phcpy.interface import load_dobldobl_system as loadsys
from phcpy.phcpy2c3 import py2c_extrinsic_top_diagonal_dimension
topdim = py2c_extrinsic_top_diagonal_dimension(dim+dm1, dim+dm2, dm1, dm2)
print('the top dimension :', topdim)
dobldobl_diagonal_homotopy(dm1, sys1, sols1, dm2, sys2, sols2)
print('defining the start solutions')
dobldobl_diagonal_cascade_solutions(dm1, dm2)
print('starting the diagonal cascade')
(topsys, startsols) = dobldobl_start_diagonal_cascade()
print('the system solved in the start of the cascade :')
for pol in topsys:
print(pol)
print('the solutions after starting the diagonal cascade :')
for sol in startsols:
print(sol)
endsols = double_double_cascade_step(topsys, startsols)
print('after running one cascade step :')
for sol in endsols:
print(sol)
storesols(len(topsys), endsols)
py2c_dobldobl_collapse_diagonal(topdim - 2*dim, 0)
result = (loadsys(), loadsols())
return result
def quaddobl_diagonal_solver(dim, dm1, sys1, sols1, dm2, sys2, sols2, tasks=0):
"""
Runs the diagonal homotopies in quad double precision
to intersect two witness sets stored in (sys1, sols1) and
(sys2, sols2), of respective dimensions dm1 and dm2.
The ambient dimension equals dim.
Multitasking is available, and is activated by the tasks parameter.
Returns the last system in the cascade and its solutions.
"""
from phcpy.phcpy2c3 import py2c_quaddobl_collapse_diagonal
from phcpy.interface import store_quaddobl_solutions as storesols
from phcpy.interface import load_quaddobl_solutions as loadsols
from phcpy.interface import load_quaddobl_system as loadsys
from phcpy.phcpy2c3 import py2c_extrinsic_top_diagonal_dimension
topdim = py2c_extrinsic_top_diagonal_dimension(dim+dm1, dim+dm2, dm1, dm2)
print('the top dimension :', topdim)
quaddobl_diagonal_homotopy(dm1, sys1, sols1, dm2, sys2, sols2)
print('defining the start solutions')
quaddobl_diagonal_cascade_solutions(dm1, dm2)
print('starting the diagonal cascade')
(topsys, startsols) = quaddobl_start_diagonal_cascade()
print('the system solved in the start of the cascade :')
for pol in topsys:
print(pol)
print('the solutions after starting the diagonal cascade :')
for sol in startsols:
print(sol)
endsols = quad_double_cascade_step(topsys, startsols)
print('after running one cascade step :')
for sol in endsols:
print(sol)
storesols(len(topsys), endsols)
py2c_quaddobl_collapse_diagonal(topdim - 2*dim, 0)
result = (loadsys(), loadsols())
return result
def diagonal_solver(dim, dm1, sys1, sols1, dm2, sys2, sols2, tasks=0, prc='d'):
"""
Runs the diagonal homotopies to intersect two witness sets stored in
(sys1, sols1) and (sys2, sols2), of respective dimensions dim1 and dim2.
The ambient dimension equals dim.
Multitasking is available, and is activated by the tasks parameter.
The precision is set by the parameter prc, which takes the default
value 'd' for standard double, 'dd' for double double, or 'qd' for
quad double precision.
Returns the last system in the cascade and its solutions.
"""
if(prc == 'd'):
return standard_diagonal_solver\
(dim, dm1, sys1, sols1, dm2, sys2, sols2, tasks)
elif(prc == 'dd'):
return dobldobl_diagonal_solver\
(dim, dm1, sys1, sols1, dm2, sys2, sols2, tasks)
elif(prc == 'qd'):
return quaddobl_diagonal_solver\
(dim, dm1, sys1, sols1, dm2, sys2, sols2, tasks)
else:
print('wrong argument for precision')
return None
def test_diaghom(precision='d'):
"""
Test on the diagonal homotopy.
"""
hyp1 = 'x1*x2;'
hyp2 = 'x1 - x2;'
(w1sys, w1sols) = witness_set_of_hypersurface(2, hyp1, precision)
print('the witness sets for', hyp1)
for pol in w1sys:
print(pol)
for sol in w1sols:
print(sol)
(w2sys, w2sols) = witness_set_of_hypersurface(2, hyp2, precision)
print('the witness sets for', hyp2)
for pol in w2sys:
print(pol)
for sol in w2sols:
print(sol)
(sys, sols) = diagonal_solver\
(2, 1, w1sys, w1sols, 1, w2sys, w2sols, 0, precision)
print('the end system :')
for pol in sys:
print(pol)
print('the solutions of the diagonal solver :')
for sol in sols:
print(sol)
def test():
"""
Runs a test on algebraic sets.
"""
from phcpy.phcpy2c3 import py2c_set_seed
py2c_set_seed(234798272)
# test_cascade()
# test_monodromy()
test_diaghom('d')
if __name__ == "__main__":
test()
| gpl-3.0 |
Parlin-Galanodel/scrapy | scrapy/utils/response.py | 16 | 2815 | """
This module provides some useful functions for working with
scrapy.http.Response objects
"""
import os
import weakref
import webbrowser
import tempfile
from twisted.web import http
from scrapy.utils.python import to_bytes, to_native_str
from w3lib import html
from scrapy.utils.decorators import deprecated
@deprecated
def body_or_str(*a, **kw):
from scrapy.utils.iterators import _body_or_str
return _body_or_str(*a, **kw)
_baseurl_cache = weakref.WeakKeyDictionary()
def get_base_url(response):
"""Return the base url of the given response, joined with the response url"""
if response not in _baseurl_cache:
text = response.text[0:4096]
_baseurl_cache[response] = html.get_base_url(text, response.url,
response.encoding)
return _baseurl_cache[response]
_metaref_cache = weakref.WeakKeyDictionary()
def get_meta_refresh(response):
"""Parse the http-equiv refrsh parameter from the given response"""
if response not in _metaref_cache:
text = response.text[0:4096]
_metaref_cache[response] = html.get_meta_refresh(text, response.url,
response.encoding, ignore_tags=('script', 'noscript'))
return _metaref_cache[response]
def response_status_message(status):
"""Return status code plus status text descriptive message
"""
message = http.RESPONSES.get(int(status), "Unknown Status")
return '%s %s' % (status, to_native_str(message))
def response_httprepr(response):
"""Return raw HTTP representation (as bytes) of the given response. This
is provided only for reference, since it's not the exact stream of bytes
that was received (that's not exposed by Twisted).
"""
s = b"HTTP/1.1 " + to_bytes(str(response.status)) + b" " + \
to_bytes(http.RESPONSES.get(response.status, b'')) + b"\r\n"
if response.headers:
s += response.headers.to_string() + b"\r\n"
s += b"\r\n"
s += response.body
return s
def open_in_browser(response, _openfunc=webbrowser.open):
"""Open the given response in a local web browser, populating the <base>
tag for external links to work
"""
from scrapy.http import HtmlResponse, TextResponse
# XXX: this implementation is a bit dirty and could be improved
body = response.body
if isinstance(response, HtmlResponse):
if b'<base' not in body:
repl = '<head><base href="%s">' % response.url
body = body.replace(b'<head>', to_bytes(repl))
ext = '.html'
elif isinstance(response, TextResponse):
ext = '.txt'
else:
raise TypeError("Unsupported response type: %s" %
response.__class__.__name__)
fd, fname = tempfile.mkstemp(ext)
os.write(fd, body)
os.close(fd)
return _openfunc("file://%s" % fname)
| bsd-3-clause |
skyddv/neutron | neutron/tests/unit/extensions/foxinsocks.py | 24 | 3568 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_serialization import jsonutils
from neutron.api import extensions
from neutron import wsgi
class FoxInSocksController(wsgi.Controller):
def index(self, request):
return "Try to say this Mr. Knox, sir..."
class FoxInSocksPluginInterface(extensions.PluginInterface):
@abc.abstractmethod
def method_to_support_foxnsox_extension(self):
pass
class Foxinsocks(object):
def __init__(self):
pass
def get_plugin_interface(self):
return FoxInSocksPluginInterface
def get_name(self):
return "Fox In Socks"
def get_alias(self):
return "FOXNSOX"
def get_description(self):
return "The Fox In Socks Extension"
def get_updated(self):
return "2011-01-22T13:25:27-06:00"
def get_resources(self):
resources = []
resource = extensions.ResourceExtension('foxnsocks',
FoxInSocksController())
resources.append(resource)
return resources
def get_actions(self):
return [extensions.ActionExtension('dummy_resources',
'FOXNSOX:add_tweedle',
self._add_tweedle_handler),
extensions.ActionExtension('dummy_resources',
'FOXNSOX:delete_tweedle',
self._delete_tweedle_handler)]
def get_request_extensions(self):
request_exts = []
def _goose_handler(req, res):
#NOTE: This only handles JSON responses.
# You can use content type header to test for XML.
data = jsonutils.loads(res.body)
data['FOXNSOX:googoose'] = req.GET.get('chewing')
res.body = jsonutils.dumps(data).encode('utf-8')
return res
req_ext1 = extensions.RequestExtension('GET', '/dummy_resources/:(id)',
_goose_handler)
request_exts.append(req_ext1)
def _bands_handler(req, res):
#NOTE: This only handles JSON responses.
# You can use content type header to test for XML.
data = jsonutils.loads(res.body)
data['FOXNSOX:big_bands'] = 'Pig Bands!'
res.body = jsonutils.dumps(data).encode('utf-8')
return res
req_ext2 = extensions.RequestExtension('GET', '/dummy_resources/:(id)',
_bands_handler)
request_exts.append(req_ext2)
return request_exts
def _add_tweedle_handler(self, input_dict, req, id):
return "Tweedle {0} Added.".format(
input_dict['FOXNSOX:add_tweedle']['name'])
def _delete_tweedle_handler(self, input_dict, req, id):
return "Tweedle {0} Deleted.".format(
input_dict['FOXNSOX:delete_tweedle']['name'])
| apache-2.0 |
GaussDing/django | tests/gis_tests/gdal_tests/test_ds.py | 21 | 11450 | import os
import unittest
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from ..test_data import TEST_DATA, TestDS, get_ds_file
if HAS_GDAL:
from django.contrib.gis.gdal import DataSource, Envelope, OGRGeometry, GDALException, OGRIndexError, GDAL_VERSION
from django.contrib.gis.gdal.field import OFTReal, OFTInteger, OFTString
# List of acceptable data sources.
ds_list = (
TestDS('test_point', nfeat=5, nfld=3, geom='POINT', gtype=1, driver='ESRI Shapefile',
fields={'dbl': OFTReal, 'int': OFTInteger, 'str': OFTString},
extent=(-1.35011, 0.166623, -0.524093, 0.824508), # Got extent from QGIS
srs_wkt=(
'GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",'
'6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",'
'0.017453292519943295]]'
),
field_values={
'dbl': [float(i) for i in range(1, 6)],
'int': list(range(1, 6)),
'str': [str(i) for i in range(1, 6)],
},
fids=range(5)
),
TestDS('test_vrt', ext='vrt', nfeat=3, nfld=3, geom='POINT', gtype='Point25D', driver='VRT',
fields={
'POINT_X': OFTString,
'POINT_Y': OFTString,
'NUM': OFTString,
}, # VRT uses CSV, which all types are OFTString.
extent=(1.0, 2.0, 100.0, 523.5), # Min/Max from CSV
field_values={
'POINT_X': ['1.0', '5.0', '100.0'],
'POINT_Y': ['2.0', '23.0', '523.5'],
'NUM': ['5', '17', '23'],
},
fids=range(1, 4)
),
TestDS('test_poly', nfeat=3, nfld=3, geom='POLYGON', gtype=3,
driver='ESRI Shapefile',
fields={'float': OFTReal, 'int': OFTInteger, 'str': OFTString},
extent=(-1.01513, -0.558245, 0.161876, 0.839637), # Got extent from QGIS
srs_wkt=(
'GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",'
'6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",'
'0.017453292519943295]]'
),
)
)
bad_ds = (TestDS('foo'),)
@skipUnless(HAS_GDAL, "GDAL is required")
class DataSourceTest(unittest.TestCase):
def test01_valid_shp(self):
"Testing valid SHP Data Source files."
for source in ds_list:
# Loading up the data source
ds = DataSource(source.ds)
# Making sure the layer count is what's expected (only 1 layer in a SHP file)
self.assertEqual(1, len(ds))
# Making sure GetName works
self.assertEqual(source.ds, ds.name)
# Making sure the driver name matches up
self.assertEqual(source.driver, str(ds.driver))
# Making sure indexing works
try:
ds[len(ds)]
except OGRIndexError:
pass
else:
self.fail('Expected an IndexError!')
def test02_invalid_shp(self):
"Testing invalid SHP files for the Data Source."
for source in bad_ds:
self.assertRaises(GDALException, DataSource, source.ds)
def test03a_layers(self):
"Testing Data Source Layers."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer, this tests DataSource.__iter__
for layer in ds:
# Making sure we get the number of features we expect
self.assertEqual(len(layer), source.nfeat)
# Making sure we get the number of fields we expect
self.assertEqual(source.nfld, layer.num_fields)
self.assertEqual(source.nfld, len(layer.fields))
# Testing the layer's extent (an Envelope), and its properties
if source.driver == 'VRT' and (GDAL_VERSION >= (1, 7, 0) and GDAL_VERSION < (1, 7, 3)):
# There's a known GDAL regression with retrieving the extent
# of a VRT layer in versions 1.7.0-1.7.2:
# http://trac.osgeo.org/gdal/ticket/3783
pass
else:
self.assertEqual(True, isinstance(layer.extent, Envelope))
self.assertAlmostEqual(source.extent[0], layer.extent.min_x, 5)
self.assertAlmostEqual(source.extent[1], layer.extent.min_y, 5)
self.assertAlmostEqual(source.extent[2], layer.extent.max_x, 5)
self.assertAlmostEqual(source.extent[3], layer.extent.max_y, 5)
# Now checking the field names.
flds = layer.fields
for f in flds:
self.assertEqual(True, f in source.fields)
# Negative FIDs are not allowed.
self.assertRaises(OGRIndexError, layer.__getitem__, -1)
self.assertRaises(OGRIndexError, layer.__getitem__, 50000)
if hasattr(source, 'field_values'):
fld_names = source.field_values.keys()
# Testing `Layer.get_fields` (which uses Layer.__iter__)
for fld_name in fld_names:
self.assertEqual(source.field_values[fld_name], layer.get_fields(fld_name))
# Testing `Layer.__getitem__`.
for i, fid in enumerate(source.fids):
feat = layer[fid]
self.assertEqual(fid, feat.fid)
# Maybe this should be in the test below, but we might as well test
# the feature values here while in this loop.
for fld_name in fld_names:
self.assertEqual(source.field_values[fld_name][i], feat.get(fld_name))
def test03b_layer_slice(self):
"Test indexing and slicing on Layers."
# Using the first data-source because the same slice
# can be used for both the layer and the control values.
source = ds_list[0]
ds = DataSource(source.ds)
sl = slice(1, 3)
feats = ds[0][sl]
for fld_name in ds[0].fields:
test_vals = [feat.get(fld_name) for feat in feats]
control_vals = source.field_values[fld_name][sl]
self.assertEqual(control_vals, test_vals)
def test03c_layer_references(self):
"""
Ensure OGR objects keep references to the objects they belong to.
"""
source = ds_list[0]
# See ticket #9448.
def get_layer():
# This DataSource object is not accessible outside this
# scope. However, a reference should still be kept alive
# on the `Layer` returned.
ds = DataSource(source.ds)
return ds[0]
# Making sure we can call OGR routines on the Layer returned.
lyr = get_layer()
self.assertEqual(source.nfeat, len(lyr))
self.assertEqual(source.gtype, lyr.geom_type.num)
# Same issue for Feature/Field objects, see #18640
self.assertEqual(str(lyr[0]['str']), "1")
def test04_features(self):
"Testing Data Source Features."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer
for layer in ds:
# Incrementing through each feature in the layer
for feat in layer:
# Making sure the number of fields, and the geometry type
# are what's expected.
self.assertEqual(source.nfld, len(list(feat)))
self.assertEqual(source.gtype, feat.geom_type)
# Making sure the fields match to an appropriate OFT type.
for k, v in source.fields.items():
# Making sure we get the proper OGR Field instance, using
# a string value index for the feature.
self.assertEqual(True, isinstance(feat[k], v))
# Testing Feature.__iter__
for fld in feat:
self.assertEqual(True, fld.name in source.fields.keys())
def test05_geometries(self):
"Testing Geometries from Data Source Features."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer and feature.
for layer in ds:
for feat in layer:
g = feat.geom
# Making sure we get the right Geometry name & type
self.assertEqual(source.geom, g.geom_name)
self.assertEqual(source.gtype, g.geom_type)
# Making sure the SpatialReference is as expected.
if hasattr(source, 'srs_wkt'):
self.assertEqual(
source.srs_wkt,
# Depending on lib versions, WGS_84 might be WGS_1984
g.srs.wkt.replace('SPHEROID["WGS_84"', 'SPHEROID["WGS_1984"')
)
def test06_spatial_filter(self):
"Testing the Layer.spatial_filter property."
ds = DataSource(get_ds_file('cities', 'shp'))
lyr = ds[0]
# When not set, it should be None.
self.assertEqual(None, lyr.spatial_filter)
# Must be set a/an OGRGeometry or 4-tuple.
self.assertRaises(TypeError, lyr._set_spatial_filter, 'foo')
# Setting the spatial filter with a tuple/list with the extent of
# a buffer centering around Pueblo.
self.assertRaises(ValueError, lyr._set_spatial_filter, list(range(5)))
filter_extent = (-105.609252, 37.255001, -103.609252, 39.255001)
lyr.spatial_filter = (-105.609252, 37.255001, -103.609252, 39.255001)
self.assertEqual(OGRGeometry.from_bbox(filter_extent), lyr.spatial_filter)
feats = [feat for feat in lyr]
self.assertEqual(1, len(feats))
self.assertEqual('Pueblo', feats[0].get('Name'))
# Setting the spatial filter with an OGRGeometry for buffer centering
# around Houston.
filter_geom = OGRGeometry(
'POLYGON((-96.363151 28.763374,-94.363151 28.763374,'
'-94.363151 30.763374,-96.363151 30.763374,-96.363151 28.763374))'
)
lyr.spatial_filter = filter_geom
self.assertEqual(filter_geom, lyr.spatial_filter)
feats = [feat for feat in lyr]
self.assertEqual(1, len(feats))
self.assertEqual('Houston', feats[0].get('Name'))
# Clearing the spatial filter by setting it to None. Now
# should indicate that there are 3 features in the Layer.
lyr.spatial_filter = None
self.assertEqual(3, len(lyr))
def test07_integer_overflow(self):
"Testing that OFTReal fields, treated as OFTInteger, do not overflow."
# Using *.dbf from Census 2010 TIGER Shapefile for Texas,
# which has land area ('ALAND10') stored in a Real field
# with no precision.
ds = DataSource(os.path.join(TEST_DATA, 'texas.dbf'))
feat = ds[0][0]
# Reference value obtained using `ogrinfo`.
self.assertEqual(676586997978, feat.get('ALAND10'))
| bsd-3-clause |
guerrerocarlos/odoo | addons/l10n_pl/__init__.py | 340 | 1155 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# l10n_pl module improved for Poland
# by Grzegorz Grzelak [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mapzen/vector-datasource | test/test_meta.py | 2 | 26453 | # -*- coding: utf-8 -*-
import unittest
def memoize(f):
result = {}
def wrapped(*args, **kwargs):
cache_key = tuple(args)
if not result:
result[cache_key] = f(*args, **kwargs)
return result[cache_key]
return wrapped
@memoize
def parse_layers_props(yaml_path):
from vectordatasource.meta.python import parse_layers, output_kind, \
make_function_name_props
return parse_layers(yaml_path, output_kind, make_function_name_props)
@memoize
def parse_layers_min_zoom(yaml_path):
from vectordatasource.meta.python import parse_layers, output_min_zoom, \
make_function_name_min_zoom
return parse_layers(
yaml_path, output_min_zoom, make_function_name_min_zoom)
@memoize
def find_yaml_path():
from vectordatasource.meta import find_yaml_path
return find_yaml_path()
@memoize
def make_test_metadata():
from tilequeue.query.fixture import Metadata
from tilequeue.process import Source
return Metadata(Source('test', 'test'), [], [])
@memoize
def make_layer_data_props():
yaml_path = find_yaml_path()
layer_parse_result = parse_layers_props(yaml_path)
by_name = {}
for layer_datum in layer_parse_result.layer_data:
by_name[layer_datum.layer] = layer_datum
return layer_parse_result.layer_data, by_name
@memoize
def make_layer_data_min_zoom():
yaml_path = find_yaml_path()
layer_parse_result = parse_layers_min_zoom(yaml_path)
by_name = {}
for layer_datum in layer_parse_result.layer_data:
by_name[layer_datum.layer] = layer_datum
return layer_parse_result.layer_data, by_name
def _make_metadata(name):
from tilequeue.process import make_metadata
from tilequeue.process import Source
sources = {
'osm': Source('osm', 'openstreetmap.org'),
'ne': Source('ne', 'naturalearthdata.com'),
'wof': Source('wof', 'whosonfirst.org'),
'shp': Source('shp', 'osmdata.openstreetmap.de'),
}
return make_metadata(sources[name])
class CallFuncTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_props()
def test_layer_data_count(self):
self.assertEquals(10, len(self.layer_data))
def test_layer_names(self):
exp_layers = set(('landuse', 'pois', 'transit', 'water', 'places',
'boundaries', 'buildings', 'roads', 'earth',
'admin_areas'))
self.assertEquals(exp_layers, set(self.by_name.keys()))
def test_layers_called_empty_feature(self):
import shapely.geometry
shape = shapely.geometry.Point((0, 0))
props = {}
fid = 42
meta = make_test_metadata()
for layer_datum in self.layer_data:
fn = layer_datum.fn
result = fn(shape, props, fid, meta)
self.assertTrue(isinstance(result, (dict, None.__class__)))
class BuildingsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_props()
cls.buildings = cls.by_name['buildings']
def test_building_basic(self):
import shapely.geometry
shape = shapely.geometry.Point((0, 0))
props = dict(building='yes')
fid = 42
meta = make_test_metadata()
out_props = self.buildings.fn(shape, props, fid, meta)
self.assertEquals('building', out_props.get('kind'))
self.assertIsNone(out_props.get('kind_detail'))
def test_building_kind_detail(self):
import shapely.geometry
shape = shapely.geometry.Polygon([(0, 0), (1, 1), (1, 0)])
props = {
'building': 'beach_hut',
'building:part': 'passageway',
}
fid = 42
meta = make_test_metadata()
out_props = self.buildings.fn(shape, props, fid, meta)
self.assertEquals('building', out_props.get('kind'))
self.assertEquals('beach_hut', out_props.get('kind_detail'))
self.assertEquals('passageway', out_props.get('building_part'))
def test_area(self):
import shapely.geometry
shape = shapely.geometry.Polygon([(0, 0), (1, 1), (1, 0)])
props = dict(building='yes', area=3.14159)
meta = make_test_metadata()
out_props = self.buildings.fn(shape, props, None, meta)
area = out_props.get('area')
self.assertIsNotNone(area)
self.assertTrue(isinstance(area, int))
self.assertEquals(3, area)
class BoundariesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_props()
cls.boundaries = cls.by_name['boundaries']
def test_osm(self):
from shapely.geometry import Point
shape = Point(0, 0).buffer(1.0)
props = {
'boundary': 'administrative',
'boundary:type': 'aboriginal_lands',
'admin_level': '2',
}
meta = make_test_metadata()
out_props = self.boundaries.fn(shape, props, None, meta)
self.assertEquals('aboriginal_lands', out_props.get('kind'))
self.assertEquals('2', out_props.get('kind_detail'))
def test_ne(self):
from shapely.geometry import Point
shape = Point(0, 0).buffer(1.0)
props = {
'featurecla': 'Admin-1 region boundary',
}
meta = make_test_metadata()
out_props = self.boundaries.fn(shape, props, None, meta)
self.assertEquals('macroregion', out_props.get('kind'))
self.assertEquals('3', out_props.get('kind_detail'))
def test_osm_linestring(self):
from shapely.geometry import LineString
shape = LineString([(0, 0), (1, 1)])
props = {
'boundary': 'administrative',
'boundary:type': 'aboriginal_lands',
'admin_level': '2',
}
meta = make_test_metadata()
out_props = self.boundaries.fn(shape, props, None, meta)
# we get most admin boundaries from the planet_osm_polygons table, as
# the (linestring) boundaries of the country polygons. this means we
# need to distinguish between three cases: 1) linestrings from the
# lines table, 2) polygons from the polygons table, and 3) linestrings
# derived from polygons in the polygons table. we do this with a little
# hack, by setting mz_boundary_from_polygon on the derived linestrings.
# without the hack, shouldn't match (i.e: as if it were from
# planet_osm_line)
self.assertIsNone(out_props)
# if we add the hack, it should now match (i.e: as if it were
# from planet_osm_polygon with the boundary/RHR query).
props['mz_boundary_from_polygon'] = True
out_props = self.boundaries.fn(shape, props, None, meta)
self.assertEquals('aboriginal_lands', out_props.get('kind'))
self.assertEquals('2', out_props.get('kind_detail'))
class EarthTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_props()
cls.earth = cls.by_name['earth']
def test_osm(self):
props = {
'natural': 'arete',
}
meta = make_test_metadata()
out_props = self.earth.fn(None, props, None, meta)
self.assertEquals('arete', out_props.get('kind'))
def test_ne(self):
props = {
'gid': 42,
}
# this rule depends on a particular source being set
meta = _make_metadata('ne')
out_props = self.earth.fn(None, props, None, meta)
self.assertEquals('earth', out_props.get('kind'))
def test_osmdata_area(self):
meta = _make_metadata('shp')
props = dict(area=3.14159)
out_props = self.earth.fn(None, props, None, meta)
area = out_props.get('area')
self.assertIsNotNone(area)
self.assertTrue(isinstance(area, int))
self.assertEquals(3, area)
class LanduseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_props()
cls.landuse = cls.by_name['landuse']
def test_osm(self):
props = {
'natural': 'scree',
}
meta = make_test_metadata()
out_props = self.landuse.fn(None, props, None, meta)
self.assertEquals('scree', out_props.get('kind'))
def test_mz_is_building(self):
meta = make_test_metadata()
props = {
'leisure': 'park',
'building': 'yes'
}
out_props = self.landuse.fn(None, props, None, meta)
self.assertTrue(out_props.get('mz_is_building'))
props = {
'leisure': 'park',
'building:part': 'yes'
}
out_props = self.landuse.fn(None, props, None, meta)
self.assertTrue(out_props.get('mz_is_building'))
props = {
'leisure': 'park',
'building': 'office'
}
out_props = self.landuse.fn(None, props, None, meta)
self.assertTrue(out_props.get('mz_is_building'))
props = {
'leisure': 'park',
'building': 'no'
}
out_props = self.landuse.fn(None, props, None, meta)
self.assertIsNone(out_props.get('mz_is_building'))
props = {
'leisure': 'park',
'building:part': 'no'
}
out_props = self.landuse.fn(None, props, None, meta)
self.assertIsNone(out_props.get('mz_is_building'))
def test_ne_area(self):
meta = _make_metadata('ne')
props = dict(area=3.14159)
out_props = self.landuse.fn(None, props, None, meta)
area = out_props.get('area')
self.assertIsNotNone(area)
self.assertTrue(isinstance(area, int))
self.assertEquals(3, area)
def test_ne_min_zoom(self):
meta = _make_metadata('ne')
props = dict(featurecla='Urban area')
out_props = self.landuse.fn(None, props, None, meta)
self.assertEquals(4, out_props.get('min_zoom'))
def test_area_tag(self):
props = dict(barrier='fence', area='no')
meta = make_test_metadata()
out_props = self.landuse.fn(None, props, None, meta)
self.assertIsNone(out_props.get('area'))
class PlacesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_props()
cls.places = cls.by_name['places']
def test_osm(self):
props = {
'name': 'foo',
'place': 'isolated_dwelling',
}
meta = make_test_metadata()
out_props = self.places.fn(None, props, None, meta)
self.assertEquals('locality', out_props.get('kind'))
self.assertEquals('isolated_dwelling', out_props.get('kind_detail'))
def test_ne(self):
props = {
'scalerank': 42,
'featurecla': 'Scientific station',
}
meta = make_test_metadata()
out_props = self.places.fn(None, props, None, meta)
self.assertEquals('locality', out_props.get('kind'))
self.assertEquals('scientific_station', out_props.get('kind_detail'))
def test_wof_is_landuse_aoi(self):
meta = _make_metadata('wof')
props = dict(is_landuse_aoi=True, placetype='neighbourhood')
out_props = self.places.fn(None, props, None, meta)
self.assertTrue(out_props.get('is_landuse_aoi'))
props = dict(is_landuse_aoi=False, placetype='neighbourhood')
out_props = self.places.fn(None, props, None, meta)
self.assertIsNone(out_props.get('is_landuse_aoi'))
props = dict(is_landuse_aoi=None, placetype='neighbourhood')
out_props = self.places.fn(None, props, None, meta)
self.assertIsNone(out_props.get('is_landuse_aoi'))
props = dict(placetype='neighbourhood')
out_props = self.places.fn(None, props, None, meta)
self.assertIsNone(out_props.get('is_landuse_aoi'))
def test_wof_area(self):
meta = _make_metadata('wof')
props = dict(area=3.14159, placetype='neighbourhood')
out_props = self.places.fn(None, props, None, meta)
area = out_props.get('area')
self.assertIsNotNone(area)
self.assertTrue(isinstance(area, int))
self.assertEquals(3, area)
props = dict(area=None, placetype='neighbourhood')
out_props = self.places.fn(None, props, None, meta)
self.assertIsNone(out_props.get('area'))
def test_wof_kind(self):
meta = _make_metadata('wof')
props = dict(placetype='neighbourhood')
out_props = self.places.fn(None, props, None, meta)
self.assertEquals('neighbourhood', out_props.get('kind'))
def test_capital(self):
meta = make_test_metadata()
props = dict(place='country', name='foo',
capital='yes', state_capital='yes')
out_props = self.places.fn(None, props, None, meta)
self.assertTrue(out_props.get('country_capital'))
self.assertTrue(out_props.get('region_capital'))
props = dict(place='state', name='foo', state_capital='no')
out_props = self.places.fn(None, props, None, meta)
self.assertIsNone(out_props.get('region_capital'))
class PoisTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_props()
cls.pois = cls.by_name['pois']
def test_disused(self):
props = dict(disused='yes')
meta = make_test_metadata()
out_props = self.pois.fn(None, props, None, meta)
self.assertIsNone(out_props.get('kind'))
props = dict(disused='no', name='foo', leisure='playground')
out_props = self.pois.fn(None, props, None, meta)
self.assertEquals('playground', out_props.get('kind'))
def test_no_name_ok(self):
props = dict(historic='landmark')
meta = make_test_metadata()
out_props = self.pois.fn(None, props, None, meta)
self.assertEquals('landmark', out_props.get('kind'))
def test_no_name_none(self):
props = dict(tourism='aquarium')
meta = make_test_metadata()
out_props = self.pois.fn(None, props, None, meta)
self.assertIsNone(out_props.get('kind'))
def test_area(self):
props = dict(name='foo', leisure='park', area=3.14159)
meta = make_test_metadata()
out_props = self.pois.fn(None, props, None, meta)
area = out_props.get('area')
self.assertIsNotNone(area)
self.assertTrue(isinstance(area, int))
self.assertEquals(3, area)
class RoadsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
_, props_by_name = make_layer_data_props()
_, min_zoom_by_name = make_layer_data_min_zoom()
cls.roads = props_by_name['roads']
cls.roads_min_zoom = min_zoom_by_name['roads']
def test_osm(self):
props = {
'name': 'foo',
'highway': 'motorway',
}
meta = make_test_metadata()
out_props = self.roads.fn(None, props, None, meta)
self.assertEquals('highway', out_props.get('kind'))
self.assertEquals('motorway', out_props.get('kind_detail'))
def test_ne(self):
props = {
'featurecla': 'Road',
'type': 'Road',
'min_zoom': 3,
}
meta = make_test_metadata()
out_props = self.roads.fn(None, props, None, meta)
min_zoom = self.roads_min_zoom.fn(None, props, None, meta)
self.assertEquals('major_road', out_props.get('kind'))
self.assertEquals('secondary', out_props.get('kind_detail'))
# NOTE: there is a 'min_zoom' in the out_props, but it gets
# overwritten with the result of the min_zoom function, which is what
# we test here.
self.assertEquals(5, min_zoom)
class TransitTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_props()
cls.transit = cls.by_name['transit']
def test_osm(self):
props = {
'route': 'subway',
}
meta = make_test_metadata()
out_props = self.transit.fn(None, props, None, meta)
self.assertEquals('subway', out_props.get('kind'))
class WaterTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_props()
cls.water = cls.by_name['water']
def test_osm(self):
props = dict(waterway='riverbank')
meta = make_test_metadata()
out_props = self.water.fn(None, props, None, meta)
self.assertEquals('riverbank', out_props.get('kind'))
props = dict(waterway='riverbank', intermittent='yes')
out_props = self.water.fn(None, props, None, meta)
self.assertEquals('riverbank', out_props.get('kind'))
self.assertTrue(out_props.get('intermittent'))
def test_ne(self):
props = dict(featurecla='Lake')
meta = make_test_metadata()
out_props = self.water.fn(None, props, None, meta)
self.assertEquals('lake', out_props.get('kind'))
def test_ne_area(self):
meta = _make_metadata('ne')
props = dict(featurecla='Lake', area=3.14159)
out_props = self.water.fn(None, props, None, meta)
area = out_props.get('area')
self.assertIsNotNone(area)
self.assertTrue(isinstance(area, int))
self.assertEquals(3, area)
def test_osmdata_area(self):
meta = _make_metadata('shp')
props = dict(area=3.14159)
out_props = self.water.fn(None, props, None, meta)
area = out_props.get('area')
self.assertIsNotNone(area)
self.assertTrue(isinstance(area, int))
self.assertEquals(3, area)
class LanduseMinZoomTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_min_zoom()
cls.landuse = cls.by_name['landuse']
def test_small_zoo(self):
import shapely.geometry
shape = shapely.geometry.Polygon([(0, 0), (1, 1), (1, 0)])
props = {
'zoo': 'enclosure',
}
meta = make_test_metadata()
out_min_zoom = self.landuse.fn(shape, props, None, meta)
self.assertEquals(16, out_min_zoom)
def test_large_zoo(self):
import shapely.geometry
s = 100000
shape = shapely.geometry.Polygon([(0, 0), (s, s), (s, 0)])
props = {
'zoo': 'enclosure',
}
meta = make_test_metadata()
out_min_zoom = self.landuse.fn(shape, props, None, meta)
self.assertEquals(13, out_min_zoom)
def test_medium_zoo(self):
import shapely.geometry
from vectordatasource.util import calculate_way_area, \
calculate_1px_zoom
import math
target_zoom = 14.0
# want a zoom 14 feature, so make one with a triangle.
target_area = math.exp((17.256 - target_zoom) * math.log(4))
# make area with a half-square triangle.
s = math.sqrt(target_area * 2.0)
shape = shapely.geometry.Polygon([(0, 0), (s, s), (s, 0)])
props = {
'zoo': 'enclosure',
}
# test the utility functions we're relying on
util_way_area = calculate_way_area(shape)
self.assertAlmostEqual(target_area, util_way_area)
util_min_zoom = calculate_1px_zoom(shape.area)
self.assertAlmostEqual(target_zoom, util_min_zoom)
meta = make_test_metadata()
out_min_zoom = self.landuse.fn(shape, props, None, meta)
self.assertAlmostEqual(target_zoom, out_min_zoom)
class BoundariesMinZoomTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_min_zoom()
cls.boundaries = cls.by_name['boundaries']
def test_feature(self):
import shapely.geometry
shape = shapely.geometry.LineString([(0, 0), (1, 1), (1, 0)])
props = {
'boundary': 'administrative',
'admin_level': '2',
'mz_boundary_from_polygon': True, # need this for hack
}
meta = make_test_metadata()
out_min_zoom = self.boundaries.fn(shape, props, None, meta)
self.assertEquals(8, out_min_zoom)
class BuildingsMinZoomTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_min_zoom()
cls.buildings = cls.by_name['buildings']
def test_feature(self):
import shapely.geometry
shape = shapely.geometry.Polygon([(0, 0), (0, 1), (1, 1), (1, 0)])
props = {
'building': 'yes',
}
meta = make_test_metadata()
out_min_zoom = self.buildings.fn(shape, props, None, meta)
self.assertEquals(17, out_min_zoom)
class EarthMinZoomTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_min_zoom()
cls.earth = cls.by_name['earth']
def test_feature(self):
import shapely.geometry
shape = shapely.geometry.Polygon([(0, 0), (1, 1), (1, 0)])
props = {
'place': 'island',
'name': 'An Island',
}
meta = make_test_metadata()
out_min_zoom = self.earth.fn(shape, props, None, meta)
self.assertEquals(15, out_min_zoom)
class PlacesMinZoomTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_min_zoom()
cls.places = cls.by_name['places']
def test_feature(self):
import shapely.geometry
shape = shapely.geometry.Point(0, 0)
props = {
'place': 'country',
'name': 'A Country',
}
meta = make_test_metadata()
out_min_zoom = self.places.fn(shape, props, None, meta)
self.assertEquals(1, out_min_zoom)
class PoisMinZoomTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_min_zoom()
cls.pois = cls.by_name['pois']
def test_feature(self):
import shapely.geometry
shape = shapely.geometry.Polygon([(0, 0), (1, 1), (1, 0)])
props = {
'boundary': 'national_park',
'operator': 'US Forest Service',
'name': 'A Forest',
}
meta = make_test_metadata()
out_min_zoom = self.pois.fn(shape, props, None, meta)
self.assertEquals(16, out_min_zoom)
class RoadsMinZoomTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_min_zoom()
cls.roads = cls.by_name['roads']
def test_feature(self):
import shapely.geometry
shape = shapely.geometry.LineString([(0, 0), (1, 1), (1, 0)])
props = {
'highway': 'service',
}
meta = make_test_metadata()
out_min_zoom = self.roads.fn(shape, props, None, meta)
self.assertEquals(14, out_min_zoom)
class TransitMinZoomTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_min_zoom()
cls.transit = cls.by_name['transit']
def test_feature(self):
import shapely.geometry
shape = shapely.geometry.LineString([(0, 0), (1, 1), (1, 0)])
props = {
'route': 'train',
'service': 'high_speed',
}
meta = make_test_metadata()
out_min_zoom = self.transit.fn(shape, props, None, meta)
self.assertEquals(5, out_min_zoom)
class WaterMinZoomTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.layer_data, cls.by_name = make_layer_data_min_zoom()
cls.water = cls.by_name['water']
def test_feature(self):
import shapely.geometry
shape = shapely.geometry.Polygon([(0, 0), (1, 1), (1, 0)])
props = {
'leisure': 'swimming_pool',
}
meta = make_test_metadata()
out_min_zoom = self.water.fn(shape, props, None, meta)
self.assertEquals(17, out_min_zoom)
class RoundTripRuleTest(unittest.TestCase):
def test_not_rule_roundtrip_through_astformatter(self):
yaml_data = dict(
filters=[dict(
filter={
'foo': 'bar',
'not': {
'any': [
dict(baz='quux'),
dict(fleem='morx'),
]
}
},
min_zoom=7,
output=dict(kind='triggered')
)],
)
from vectordatasource.meta.python import FilterCompiler
from vectordatasource.meta.python import create_matcher
from vectordatasource.meta.python import output_kind
matchers = []
for yaml_datum in yaml_data['filters']:
matcher = create_matcher(yaml_datum, output_kind)
matchers.append(matcher)
fc = FilterCompiler()
ast_fn, compiled_fn = fc.compile(matchers, 'fn_name_props')
shape = None
props = dict(some='value')
fid = 42
meta = make_test_metadata()
result = compiled_fn(shape, props, fid, meta)
self.assertIsNone(result)
# now, round trip it through the ast formatter
# and see if we get the same result
import astformatter
formatter = astformatter.ASTFormatter()
code_str = formatter.format(ast_fn, mode='exec')
import ast
mod = ast.parse(code_str)
mod_with_linenos = ast.fix_missing_locations(mod)
code = compile(mod_with_linenos, '<string>', 'exec')
scope = {}
exec code in scope
fn = scope['fn_name_props']
result = fn(shape, props, fid, meta)
self.assertIsNone(result)
class GenerateSQLTest(unittest.TestCase):
def test_generate_sql(self):
from vectordatasource.meta.sql import write_sql
from cStringIO import StringIO
io = StringIO()
# this should throw if there's an error.
write_sql(io)
if __name__ == '__main__':
unittest.main()
| mit |
openshift/openshift-tools | openshift/installer/vendored/openshift-ansible-3.9.40/roles/openshift_health_checker/openshift_checks/logging/kibana.py | 39 | 9211 | """
Module for performing checks on a Kibana logging deployment
"""
import json
import ssl
# pylint can't find the package when its installed in virtualenv
# pylint: disable=import-error,no-name-in-module
from ansible.module_utils.six.moves.urllib import request
# pylint: disable=import-error,no-name-in-module
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from openshift_checks.logging.logging import LoggingCheck, OpenShiftCheckException
class Kibana(LoggingCheck):
"""Module that checks an integrated logging Kibana deployment"""
name = "kibana"
tags = ["health", "logging"]
def run(self):
"""Check various things and gather errors. Returns: result as hash"""
kibana_pods = self.get_pods_for_component("kibana")
self.check_kibana(kibana_pods)
self.check_kibana_route()
# TODO(lmeyer): run it all again for the ops cluster
return {}
def _verify_url_internal(self, url):
"""
Try to reach a URL from the host.
Returns: success (bool), reason (for failure)
"""
args = dict(
url=url,
follow_redirects='none',
validate_certs='no', # likely to be signed with internal CA
# TODO(lmeyer): give users option to validate certs
status_code=302,
)
result = self.execute_module('uri', args)
if result.get('failed'):
return result['msg']
return None
@staticmethod
def _verify_url_external(url):
"""
Try to reach a URL from ansible control host.
Raise an OpenShiftCheckException if anything goes wrong.
"""
# This actually checks from the ansible control host, which may or may not
# really be "external" to the cluster.
# Disable SSL cert validation to work around internally signed certs
ctx = ssl.create_default_context()
ctx.check_hostname = False # or setting CERT_NONE is refused
ctx.verify_mode = ssl.CERT_NONE
# Verify that the url is returning a valid response
try:
# We only care if the url connects and responds
return_code = request.urlopen(url, context=ctx).getcode()
except HTTPError as httperr:
return httperr.reason
except URLError as urlerr:
return str(urlerr)
# there appears to be no way to prevent urlopen from following redirects
if return_code != 200:
return 'Expected success (200) but got return code {}'.format(int(return_code))
return None
def check_kibana(self, pods):
"""Check to see if Kibana is up and working. Raises OpenShiftCheckException if not."""
if not pods:
raise OpenShiftCheckException(
"MissingComponentPods",
"There are no Kibana pods deployed, so no access to the logging UI."
)
not_running = self.not_running_pods(pods)
if len(not_running) == len(pods):
raise OpenShiftCheckException(
"NoRunningPods",
"No Kibana pod is in a running state, so there is no access to the logging UI."
)
elif not_running:
raise OpenShiftCheckException(
"PodNotRunning",
"The following Kibana pods are not currently in a running state:\n"
" {pods}\n"
"However at least one is, so service may not be impacted.".format(
pods="\n ".join(pod['metadata']['name'] for pod in not_running)
)
)
def _get_kibana_url(self):
"""
Get kibana route or report error.
Returns: url
"""
# Get logging url
get_route = self.exec_oc("get route logging-kibana -o json", [])
if not get_route:
raise OpenShiftCheckException(
'no_route_exists',
'No route is defined for Kibana in the logging namespace,\n'
'so the logging stack is not accessible. Is logging deployed?\n'
'Did something remove the logging-kibana route?'
)
try:
route = json.loads(get_route)
# check that the route has been accepted by a router
ingress = route["status"]["ingress"]
except (ValueError, KeyError):
raise OpenShiftCheckException(
'get_route_failed',
'"oc get route" returned an unexpected response:\n' + get_route
)
# ingress can be null if there is no router, or empty if not routed
if not ingress or not ingress[0]:
raise OpenShiftCheckException(
'route_not_accepted',
'The logging-kibana route is not being routed by any router.\n'
'Is the router deployed and working?'
)
host = route.get("spec", {}).get("host")
if not host:
raise OpenShiftCheckException(
'route_missing_host',
'The logging-kibana route has no hostname defined,\n'
'which should never happen. Did something alter its definition?'
)
return 'https://{}/'.format(host)
def check_kibana_route(self):
"""
Check to see if kibana route is up and working.
Raises exception if not.
"""
kibana_url = self._get_kibana_url()
# first, check that kibana is reachable from the master.
error = self._verify_url_internal(kibana_url)
if error:
if 'urlopen error [Errno 111] Connection refused' in error:
raise OpenShiftCheckException(
'FailedToConnectInternal',
'Failed to connect from this master to Kibana URL {url}\n'
'Is kibana running, and is at least one router routing to it?'.format(url=kibana_url)
)
elif 'urlopen error [Errno -2] Name or service not known' in error:
raise OpenShiftCheckException(
'FailedToResolveInternal',
'Failed to connect from this master to Kibana URL {url}\n'
'because the hostname does not resolve.\n'
'Is DNS configured for the Kibana hostname?'.format(url=kibana_url)
)
elif 'Status code was not' in error:
raise OpenShiftCheckException(
'WrongReturnCodeInternal',
'A request from this master to the Kibana URL {url}\n'
'did not return the correct status code (302).\n'
'This could mean that Kibana is malfunctioning, the hostname is\n'
'resolving incorrectly, or other network issues. The output was:\n'
' {error}'.format(url=kibana_url, error=error)
)
raise OpenShiftCheckException(
'MiscRouteErrorInternal',
'Error validating the logging Kibana route internally:\n' + error
)
# in production we would like the kibana route to work from outside the
# cluster too; but that may not be the case, so allow disabling just this part.
if self.get_var("openshift_check_efk_kibana_external", default="True").lower() != "true":
return
error = self._verify_url_external(kibana_url)
if not error:
return
error_fmt = (
'Error validating the logging Kibana route:\n{error}\n'
'To disable external Kibana route validation, set the variable:\n'
' openshift_check_efk_kibana_external=False'
)
if 'urlopen error [Errno 111] Connection refused' in error:
msg = (
'Failed to connect from the Ansible control host to Kibana URL {url}\n'
'Is the router for the Kibana hostname exposed externally?'
).format(url=kibana_url)
raise OpenShiftCheckException('FailedToConnect', error_fmt.format(error=msg))
elif 'urlopen error [Errno -2] Name or service not known' in error:
msg = (
'Failed to resolve the Kibana hostname in {url}\n'
'from the Ansible control host.\n'
'Is DNS configured to resolve this Kibana hostname externally?'
).format(url=kibana_url)
raise OpenShiftCheckException('FailedToResolve', error_fmt.format(error=msg))
elif 'Expected success (200)' in error:
msg = (
'A request to Kibana at {url}\n'
'returned the wrong error code:\n'
' {error}\n'
'This could mean that Kibana is malfunctioning, the hostname is\n'
'resolving incorrectly, or other network issues.'
).format(url=kibana_url, error=error)
raise OpenShiftCheckException('WrongReturnCode', error_fmt.format(error=msg))
raise OpenShiftCheckException(
'MiscRouteError',
'Error validating the logging Kibana route externally:\n' + error
)
| apache-2.0 |
brittanystoroz/kitsune | kitsune/gallery/tests/test_templates.py | 6 | 5854 | from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.gallery.models import Image, Video
from kitsune.gallery.tests import ImageFactory, VideoFactory
from kitsune.sumo.templatetags.jinja_helpers import urlparams
from kitsune.sumo.tests import TestCase, get, LocalizingClient, post
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import UserFactory
class GalleryPageCase(TestCase):
def tearDown(self):
Image.objects.all().delete()
super(GalleryPageCase, self).tearDown()
def test_gallery_images(self):
"""Test that all images show up on images gallery page.
Also, Make sure they don't show up on videos page.
"""
img = ImageFactory()
response = get(self.client, 'gallery.gallery', args=['image'])
eq_(200, response.status_code)
doc = pq(response.content)
imgs = doc('#media-list li img')
eq_(1, len(imgs))
eq_(img.thumbnail_url_if_set(), imgs[0].attrib['src'])
def test_gallery_locale(self):
"""Test that images only show for their set locale."""
ImageFactory(locale='es')
url = reverse('gallery.gallery', args=['image'])
response = self.client.get(url, follow=True)
eq_(200, response.status_code)
doc = pq(response.content)
imgs = doc('#media-list li img')
eq_(0, len(imgs))
locale_url = reverse('gallery.gallery', locale='es',
args=['image'])
response = self.client.get(locale_url, follow=True)
eq_(200, response.status_code)
doc = pq(response.content)
imgs = doc('#media-list li img')
eq_(1, len(imgs))
class GalleryAsyncCase(TestCase):
def tearDown(self):
Image.objects.all().delete()
super(GalleryAsyncCase, self).tearDown()
def test_gallery_image_list(self):
"""Test for ajax endpoint without search parameter."""
img = ImageFactory()
url = urlparams(reverse('gallery.async'), type='image')
response = self.client.get(url, follow=True)
eq_(200, response.status_code)
doc = pq(response.content)
imgs = doc('#media-list li img')
eq_(1, len(imgs))
eq_(img.thumbnail_url_if_set(), imgs[0].attrib['src'])
def test_gallery_image_search(self):
"""Test for ajax endpoint with search parameter."""
img = ImageFactory()
url = urlparams(reverse('gallery.async'), type='image', q='foobar')
response = self.client.get(url, follow=True)
eq_(200, response.status_code)
doc = pq(response.content)
imgs = doc('#media-list li img')
eq_(0, len(imgs))
url = urlparams(reverse('gallery.async'), type='image', q=img.title)
response = self.client.get(url, follow=True)
eq_(200, response.status_code)
doc = pq(response.content)
imgs = doc('#media-list li img')
eq_(1, len(imgs))
eq_(img.thumbnail_url_if_set(), imgs[0].attrib['src'])
class GalleryUploadTestCase(TestCase):
client_class = LocalizingClient
def setUp(self):
super(GalleryUploadTestCase, self).setUp()
self.u = UserFactory()
self.client.login(username=self.u.username, password='testpass')
def tearDown(self):
Image.objects.all().delete()
Video.objects.all().delete()
super(GalleryUploadTestCase, self).tearDown()
def test_image_draft_shows(self):
"""The image draft is loaded for this user."""
img = ImageFactory(is_draft=True, creator=self.u)
response = get(self.client, 'gallery.gallery', args=['image'])
eq_(200, response.status_code)
doc = pq(response.content)
assert doc('.file.preview img').attr('src').endswith(img.file.name)
eq_(1, doc('.file.preview img').length)
def test_image_draft_post(self):
"""Posting to the page saves the field values for the image draft."""
ImageFactory(is_draft=True, creator=self.u)
response = post(self.client, 'gallery.gallery',
{'description': '??', 'title': 'test'}, args=['image'])
eq_(200, response.status_code)
doc = pq(response.content)
# Preview for all 3 video formats: flv, ogv, webm
eq_('??', doc('#gallery-upload-modal textarea').html().strip())
eq_('test', doc('#gallery-upload-modal input[name="title"]').val())
def test_video_draft_post(self):
"""Posting to the page saves the field values for the video draft."""
VideoFactory(is_draft=True, creator=self.u)
response = post(self.client, 'gallery.gallery',
{'title': 'zTestz'}, args=['image'])
eq_(200, response.status_code)
doc = pq(response.content)
# Preview for all 3 video formats: flv, ogv, webm
eq_('zTestz', doc('#gallery-upload-modal input[name="title"]').val())
def test_modal_locale_selected(self):
"""Locale value is selected for upload modal."""
response = get(self.client, 'gallery.gallery', args=['image'],
locale='fr')
doc = pq(response.content)
eq_('fr',
doc('#gallery-upload-image option[selected="selected"]').val())
class MediaPageCase(TestCase):
def tearDown(self):
Image.objects.all().delete()
super(MediaPageCase, self).tearDown()
def test_image_media_page(self):
"""Test the media page."""
img = ImageFactory()
response = self.client.get(img.get_absolute_url(), follow=True)
eq_(200, response.status_code)
doc = pq(response.content)
eq_(img.title, doc('h1').text())
eq_(img.description, doc('#media-object div.description').text())
eq_(img.file.url, doc('#media-view img')[0].attrib['src'])
| bsd-3-clause |
mmoya/ansible | v2/test/parsing/vault/test_vault.py | 87 | 5672 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import os
import shutil
import time
import tempfile
import six
from binascii import unhexlify
from binascii import hexlify
from nose.plugins.skip import SkipTest
from ansible.compat.tests import unittest
from ansible.utils.unicode import to_bytes, to_unicode
from ansible import errors
from ansible.parsing.vault import VaultLib
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
class TestVaultLib(unittest.TestCase):
def test_methods_exist(self):
v = VaultLib('ansible')
slots = ['is_encrypted',
'encrypt',
'decrypt',
'_add_header',
'_split_header',]
for slot in slots:
assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
def test_is_encrypted(self):
v = VaultLib(None)
assert not v.is_encrypted(u"foobar"), "encryption check on plaintext failed"
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
assert v.is_encrypted(data), "encryption check on headered text failed"
def test_add_header(self):
v = VaultLib('ansible')
v.cipher_name = "TEST"
sensitive_data = "ansible"
data = v._add_header(sensitive_data)
lines = data.split(b'\n')
assert len(lines) > 1, "failed to properly add header"
header = to_unicode(lines[0])
assert header.endswith(';TEST'), "header does end with cipher name"
header_parts = header.split(';')
assert len(header_parts) == 3, "header has the wrong number of parts"
assert header_parts[0] == '$ANSIBLE_VAULT', "header does not start with $ANSIBLE_VAULT"
assert header_parts[1] == v.version, "header version is incorrect"
assert header_parts[2] == 'TEST', "header does end with cipher name"
def test_split_header(self):
v = VaultLib('ansible')
data = b"$ANSIBLE_VAULT;9.9;TEST\nansible"
rdata = v._split_header(data)
lines = rdata.split(b'\n')
assert lines[0] == b"ansible"
assert v.cipher_name == 'TEST', "cipher name was not set"
assert v.version == "9.9"
def test_encrypt_decrypt_aes(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = u'AES'
enc_data = v.encrypt("foobar")
dec_data = v.decrypt(enc_data)
assert enc_data != "foobar", "encryption failed"
assert dec_data == "foobar", "decryption failed"
def test_encrypt_decrypt_aes256(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = 'AES256'
enc_data = v.encrypt("foobar")
dec_data = v.decrypt(enc_data)
assert enc_data != "foobar", "encryption failed"
assert dec_data == "foobar", "decryption failed"
def test_encrypt_encrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = 'AES'
data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(six.b("ansible"))
error_hit = False
try:
enc_data = v.encrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert error_hit, "No error was thrown when trying to encrypt data with a header"
def test_decrypt_decrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
data = "ansible"
error_hit = False
try:
dec_data = v.decrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert error_hit, "No error was thrown when trying to decrypt data without a header"
def test_cipher_not_set(self):
# not setting the cipher should default to AES256
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
data = "ansible"
error_hit = False
try:
enc_data = v.encrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set"
assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name
| gpl-3.0 |
switchkiller/ProjDjanko | lib/python2.7/site-packages/django/contrib/contenttypes/views.py | 124 | 3575 | from __future__ import unicode_literals
from django import http
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.requests import RequestSite
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
def shortcut(request, content_type_id, object_id):
"""
Redirect to an object's page based on a content-type ID and an object ID.
"""
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
if not content_type.model_class():
raise http.Http404(_("Content type %(ct_id)s object has no associated model") %
{'ct_id': content_type_id})
obj = content_type.get_object_for_this_type(pk=object_id)
except (ObjectDoesNotExist, ValueError):
raise http.Http404(_("Content type %(ct_id)s object %(obj_id)s doesn't exist") %
{'ct_id': content_type_id, 'obj_id': object_id})
try:
get_absolute_url = obj.get_absolute_url
except AttributeError:
raise http.Http404(_("%(ct_name)s objects don't have a get_absolute_url() method") %
{'ct_name': content_type.name})
absurl = get_absolute_url()
# Try to figure out the object's domain, so we can do a cross-site redirect
# if necessary.
# If the object actually defines a domain, we're done.
if absurl.startswith(('http://', 'https://', '//')):
return http.HttpResponseRedirect(absurl)
# Otherwise, we need to introspect the object's relationships for a
# relation to the Site object
object_domain = None
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
opts = obj._meta
# First, look for an many-to-many relationship to Site.
for field in opts.many_to_many:
if field.rel.to is Site:
try:
# Caveat: In the case of multiple related Sites, this just
# selects the *first* one, which is arbitrary.
object_domain = getattr(obj, field.name).all()[0].domain
except IndexError:
pass
if object_domain is not None:
break
# Next, look for a many-to-one relationship to Site.
if object_domain is None:
for field in obj._meta.fields:
if field.rel and field.rel.to is Site:
try:
object_domain = getattr(obj, field.name).domain
except Site.DoesNotExist:
pass
if object_domain is not None:
break
# Fall back to the current site (if possible).
if object_domain is None:
try:
object_domain = Site.objects.get_current(request).domain
except Site.DoesNotExist:
pass
else:
# Fall back to the current request's site.
object_domain = RequestSite(request).domain
# If all that malarkey found an object domain, use it. Otherwise, fall back
# to whatever get_absolute_url() returned.
if object_domain is not None:
protocol = request.scheme
return http.HttpResponseRedirect('%s://%s%s'
% (protocol, object_domain, absurl))
else:
return http.HttpResponseRedirect(absurl)
| gpl-2.0 |
kbrebanov/ansible | lib/ansible/modules/network/nxos/nxos_ntp_options.py | 2 | 5577 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_ntp_options
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages NTP options.
description:
- Manages NTP options, e.g. authoritative server and logging.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- At least one of C(master) or C(logging) params must be supplied.
- When C(state=absent), boolean parameters are flipped,
e.g. C(master=true) will disable the authoritative server.
- When C(state=absent) and C(master=true), the stratum will be removed as well.
- When C(state=absent) and C(master=false), the stratum will be configured
to its default value, 8.
options:
master:
description:
- Sets whether the device is an authoritative NTP server.
required: false
default: null
choices: ['true','false']
stratum:
description:
- If C(master=true), an optional stratum can be supplied (1-15).
The device default is 8.
required: false
default: null
logging:
description:
- Sets whether NTP logging is enabled on the device.
required: false
default: null
choices: ['true','false']
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Basic NTP options configuration
- nxos_ntp_options:
master: true
stratum: 12
logging: false
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
updates:
description: command sent to the device
returned: always
type: list
sample: ["no ntp logging", "ntp master 11"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def get_current(module):
cmd = ('show running-config', 'show ntp logging')
output = run_commands(module, ({'command': cmd[0], 'output': 'text'},
{'command': cmd[1], 'output': 'text'}))
match = re.search(r"^ntp master(?: (\d+))", output[0], re.M)
if match:
master = True
stratum = match.group(1)
else:
master = False
stratum = None
logging = 'enabled' in output[1].lower()
return {'master': master, 'stratum': stratum, 'logging': logging}
def main():
argument_spec = dict(
master=dict(required=False, type='bool'),
stratum=dict(required=False, type='str', default='8'),
logging=dict(required=False, type='bool'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
master = module.params['master']
stratum = module.params['stratum']
logging = module.params['logging']
state = module.params['state']
if stratum:
try:
stratum_int = int(stratum)
if stratum_int < 1 or stratum_int > 15:
raise ValueError
except ValueError:
module.fail_json(msg='stratum must be an integer between 1 and 15')
desired = {'master': master, 'stratum': stratum, 'logging': logging}
current = get_current(module)
result = {'changed': False}
commands = list()
if state == 'absent':
if current['master']:
commands.append('no ntp master')
if current['logging']:
commands.append('no ntp logging')
elif state == 'present':
if desired['master'] and desired['master'] != current['master']:
if desired['stratum']:
commands.append('ntp master %s' % stratum)
else:
commands.append('ntp master')
elif desired['stratum'] and desired['stratum'] != current['stratum']:
commands.append('ntp master %s' % stratum)
if desired['logging'] and desired['logging'] != current['logging']:
if desired['logging']:
commands.append('ntp logging')
else:
commands.append('no ntp logging')
result['commands'] = commands
result['updates'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
result['warnings'] = warnings
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Code4SA/odac-victim-empowerment | msg_handler/menu.py | 1 | 22821 | menu = {
"title": "Main menu",
"content": [
{
"title": "I've been raped / sexually abused what do I do now?",
"content": [
{
"title": "Important Information",
"content": [
"Make sure that you are safe. You can call the police to come to help you",
"It is important to go to a hospital or clinic as soon as possible. HIV medications need to be taken within 72 hours (3 days) of the offence",
"You may go directly to a hospital for treatment, or to the police to report the crime, or you may report to a social worker",
"Try not to wash, shower or change clothes before you report to the hospital or clinic or the police. If a condom was used keep it for evidence",
"If there was contact in your mouth, try not to smoke, rinse your mouth, brush teeth, eat or drink anything. There may be evidence to collect.",
]
},
{
"title": "Reporting",
"content": [
"You do not have to pay any money to report a case",
"You can take someone you trust with you for support",
"You may go directly to a hospital for treatment, or to the nearest police station to report the crime, or you may report to a social worker",
"A family member, friend, psychologist or teacher can report on your behalf, but you will have to give a statement later if you want to lay a criminal charge",
]
},
{
"title": "Medical Treatment",
"content": [
"It is important to go to a hospital or clinic as soon as possible. HIV medications must be tas soon as possible but within 72 hours / 3 days of the rape",
"You don't need to report to the police first to get medical services. If you get medical treatment first, you can still report to the police later",
"If you go to a police station first the police should take you to the hospital as soon as possible",
]
},
{
"title": "Evidence",
"content": [
"This information is about how to preserve evidence about the rapist(s) and crime. If you can't or haven't followed these steps, you can still report the rape.",
"Part of the police investigation is a medical examination for evidence. Try not to wash, shower or change clothes before you go to the hospital/police",
"If you changed your clothes, put them in a paper bag or wrap them in newspaper.",
"If there was contact in your mouth, try not to smoke, rinse your mouth, brush teeth , eat or drink anything. There may be evidence to collect.",
"The clothes you were wearing during the rape will be taken for evidence. Try to take extra clothes along with you.",
"If you told someone about what happened to you, after the rape, write down the name and number of that person. He/she may be a witness.",
]
},
{
"title": "Alcohol / Drugs",
"content": [
"Rape is still a crime even if you were under the influence of drugs or alcohol at the time.",
"Even if you were drinking or using drugs before, during or after the rape, you may still report.",
"You should tell the police and health worker about using drugs or alcohol. This information can not be used against you.",
]
},
]
},
{
"title": "Hospital / Clinic",
"content": [
{
"title": "What can I expect?",
"content": [
"When you go to the hospital or clinic, or have the forensic exam, you have the right to have someone of your choice with you to support you.",
"You may be offered counselling at the hospital/clinic. The counsellor is there for support and to tell you your rights and what to expect there",
"If you have serious injuries (e.g. broken bones, heavy bleeding) the health worker will treat these first.",
"The law says that you have a right to medical treatment, a forensic exam and medication to prevent HIV, STIs and pregnancy.",
"You should be examined by a health worker, even if you can't see or feel injuries. Write down the name(s) and contact details of the health worker( s).",
"You do not have to report to the police before getting medical services.",
"All of these treatments are free of charge at public hospitals/clinics.",
"It is your right to ask the health worker questions about the treatments and procedures. They must explain them to you.",
"Some of these medications may make you feel nauseous. Ask your health worker for medicine to prevent you from feeling nauseous.",
]
},
{
"title": "HIV",
"content": [
"It is important to start the medication to prevent you from getting HIV as soon as possible. This must be taken within 72 hours (3 days) of the rape.",
"The counsellor or health worker will ask if they can do an HIV test because you can't take medication to prevent HIV if you are already HIV positive.",
"Write down when you started the medication. Set a reminder for yourself in your cellphone or diary to make sure you take your medication every day.",
"Set a reminder when the follow up visits are, and write down the address of the hospital/clinic to go to. You may get more medication at these visits.",
"This HIV prevention medication must be taken every day for 28 days (4 weeks).",
]
},
{
"title": "Pregnancy",
"content": [
"You have the right to medication to prevent pregnancy because of the rape. This medication must be taken within 5 days.",
"If you become pregnant because of rape you can have an abortion free of charge if you are less than 20 weeks pregnant. Ask the health worker about this.",
"If you become pregnant because of rape and you don't want an abortion, talk to your social worker about adoption or other options.",
]
},
{
"title": "Sexually Transmitted Infection (STI/STD)",
"content": [
"You have the right to medication to treat and prevent an STI/STD after rape.",
"There is no time limit to starting this medicine, but taking it soonest is best.",
]
},
{
"title": "Forensic Exam",
"content": [
"You should have a forensic exam for evidence even if you haven't yet decided to open a case. This evidence can be used if you decide to lay a charge",
"The health worker will ask if they can do a forensic exam on you to find evidence for the case. He/she will examine where you were touched.",
"The forensic exam has two parts: first, a physical exam where the doctor records your injuries and second taking biological evidence (like swabs)",
"If you are wearing the clothes from the rape, the nurse will take your clothes and underwear for evidence.",
"After the forensic examination is finished, you may bath/shower, smoke and brush your teeth.",
]
},
{
"title": "Counselling",
"content": [
"You may need ongoing counselling and support. Ask the health worker/police/prosecutor to refer you for counselling. Write down where you should go.",
"Counselling is safe and confidential, and can help you deal with the trauma of having been raped.",
"Counselling can also support you through the police investigation and court process if you decide to open a case.",
]
},]
},
{
"title": "Police Station",
"content": [
{
"title": "Reporting to the police",
"content": [
"To lay a criminal charge, you can report the offence to the police. By law, they must assist you immediately even if the rape occured long ago",
"You do not have to pay money to report a case.",
"Try not to wash or change clothes before you report to the hospital, clinic or SAPS.Keep any sanitary pads, tampons and condoms for evidence",
"If you must change, put the clothes in a paper bag or wrap the clothes in newspaper.",
"If there was contact in your mouth, try not to smoke, rinse your mouth, brush teeth, eat or drink anything. There may be evidence to collect.",
"The police must assist you even if you live in a different area to the police station or if the rape happened in different area",
"The police must take you to a private room to hear what happened, take the basic details of your case, and call the investigating officer",
"The police will arrange to transport you to a hospital/clinic for a medical examination and treatment.",
"You may need ongoing counselling and support. Ask the police to refer you for counselling. Write down the where you should go.",
]
},
{
"title": "Case number",
"content": [
"A police case number is called a CAS number. It will be sent to you by SMS. You will get an SMS with the name of the investigating officer",
]
},
{
"title": "Investigation",
"content": [
"You will get an SMS with the name and contact number of the investigating officer for your case. Write down or save his/her name and contact details.",
"The investigating officer should contact you within 36 hours to take down the full details of what happened to you. This is called a statement.",
"Let the investigating officer know if your contact details change. This is very important because they need to contact you throughout your case.",
]
},
{
"title": "Statement",
"content": [
"You may have told the police the basic details of what happened. Within 36 hours you will be asked give all the information. This is called a statement.",
"You have the right to have someone that you trust there to support you when you give your statement.",
"You have the right to give the statement in your own language. Ask the police to get someone to help you do this.",
"When you make your statement try to remember as many details as possible. The police will ask you questions to help get all the information.",
"The investigating officer will write everything down and ask you to sign it. Only sign the statement if you agree with what is written.",
"If anything is left out of your statement, or if there are any mistakes, point these out and ask the police officer to correct it.",
"You should receive a copy of your statement from the police.",
"If you remember something else after you have made your statement, call the investigating officer to add this. Ask for a new copy of your statement.",
]
},
{
"title": "Arrest and Safety",
"content": [
"The investigating officer must let you know if the rapist(s) has been arrested or not.",
"The rapist may be out of prison while awaiting the court case. This is called bail. The investigating officer will notify you if the rapist got bail.",
"If the rapist(s) has not been arrested and you see them, make sure that you are safe and then call the investigating officer to arrest them.",
"If the rapist or his family/friends contacts, follows or threatens you, phone the investigating officer immediately.",
"If you're worried about the rapist(s) or their family/friends contacting, following or threatening you, apply for a protection order from the court.",
]
},
{
"title": "Identity parade",
"content": [
"If the police have arrested the rapist you may be asked to point them out from a group of people.",
"This might be scary, but the rapist will not be able to see you because they can't see through the glass.",
]
},
{
"title": "Testing the rapist(s) for HIV",
"content": [
"You can ask to have the rapist tested for HIV. This must be done within 90 days. Ask the investigating officer to help you with this.",
"When the results of the test come back they will be given to you privately. You can talk to your counsellor about what the results mean",
]
},
{
"title": "Staying in Contact",
"content": [
"It is very important to give the investigating officer the right phone number and address so that they can stay in touch with you about your case.",
"Remember to let them know if your phone number or address changes.",
]
},
]
},
{
"title": "Court",
"content": [
{
"title": "Overview",
"content": [
"When the police have investigated your case it will be sent to the lawyers at the court. This person is called the prosecutor.",
"A case can only go to trial if there is enough evidence. The prosecutor will tell you whether your case will go to trial or not.",
"Many factors go into the decision to prosecute a case. If your case doesn't go to trial, it doesn't mean that no-one believes you, or that you were not raped.",
"You will not get your own lawyer. The prosecutor presents the case against the rapist in court, and tells what happened to you.",
"It is important to write down the name and contact details of the prosecutor and to let him/her know if your own contact details have changed.",
"You may need ongoing counselling and support. Ask the prosecutor to refer you for counselling. Write down the where you should go.",
]
},
{
"title": "Bail",
"content": [
"After being arrested, the rapist(s) will appear in court to apply for bail. Bail is a temporary release from prison while waiting for the court case.",
"You do not have to go to the bail hearing, but you may if you want to. It may be upsetting to see the rapist so take someone with you for support.",
"Bail usually has conditions. The rapist may have to give a deposit to guarantee that they will appear at the trial. They may have to promise not to contact you.",
"Before the bail hearing you should tell the investigating officer or prosecutor any reasons that the rapist(s) should not get bail (e.g. threats or stalking).",
"If you're worried about the rapist or their family/friends contacting, following or threatening you, you can apply for a protection order from the court.",
"If the rapist or his family/friends contacts, follows or threatens you, phone the investigating officer immediately.",
"If the rapist(s) does not receive bail he/she will go to prison until the trial.",
]
},
{
"title": "Preparing for Trial",
"content": [
"The prosecutor will ask you to meet, to go over your statement. If you don't agree with anything in the statement, point it out to him /her.",
"Once the trial date is set the prosecutor should thoroughly discuss your case and explain your role. Write down the date of this meeting.",
"Ask the prosecutor if there are any services that can prepare you for trial and support you in court on the day.",
"You can also take a friend or family with you to court for support, but they may not be able to be with you in court when you testify.",
"Ask the prosecutor if you can testify by CCTV (camera) so that you won't have to tell what happened to you in front of the rapist.",
"You will receive a written notice to testify in court. This is called a subpoena, and it must be hand-delivered so keep your address up to date.",
"The prosecutor should contact you before the trial to get a 'victim impact statement' that explains the effect of the rape on your life ",
"If you are not contacted about a victim impact statement you should call the investigating officer/prosecutor.",
"Write down the date that you gave your victim impact statement.",
]
},
{
"title": "Trial",
"content": [
"At the trial, you will be asked to tell what happened to you in court in front of the magistrate and the lawyers, and the rapist.",
"At the trial you will hear if you will be allowed to testify by CCTV (camera) so that you won't have to tell what happened to you in front of the rapist.",
"Once the magistrate has heard evidence from the prosecutor and the rapist's lawyer, he/she will decide if there is enough proof to find the accused guilty.",
"If the rapist is not found guilty it doesn't mean that no-one believed you, or that the rape did not happen. Ask the prosecutor to explain the outcome.",
"If the rapist(s) is found guilty there will be a hearing to decide on their punishment.",
"The magistrate will use your 'victim impact statement' to help him/her decide on the punishment for the rapist.",
"If you have not given a victim impact statement you should call the prosecutor.",
]
},
]
},
{
"title": "Welfare / NGO",
"content": [
{
"title": "Counselling",
"content": [
"Counselling is a safe space for you to talk about how the rape has affected you and prepare you for what to expect during the court process.",
"Ask the investigating officer or health worker at the hospital to give you the contact details of an organization near you and write these down.",
"Counselling can help you understand what you are going through, overcome the effects of trauma and cope with reporting the rape.",
"Call the Rape Crisis 24 hour Helpline to speak to a rape counsellor or to an appointment: 021 447-9762, 021 633-9229 or 021 361-9085.",
"Email [email protected] to email a rape counsellor or if you have any concerns about your case and a counsellor will reply to you.",
]
},
{
"title": "Safety",
"content": [
"You can also report to a social worker. If you do report to a social worker write their name and telephone number down.",
"The social worker cannot make you report to the police, but you can report later if you want to.",
"If you are under 18 years old, a social worker must tell the police about the offence.",
"If you are living with the rapist(s), your social worker will try to protect you against further abuse and arrange for you and your family to be safe",
"If the rapist is the breadwinner and you are worried that you will not have a house and food the social worker can help to make alternative arrangements",
]
},
]
},
{
"title": "Complaint",
"content": [
{
"title": "Counselling",
"content": [
"If a service provider has treated you unfairly or didn't do what they are legally required to do you can make a complaint.",
"You can call Rape Crisis to help you with the compliant",
"Please call Rape Crisis: Observatory 021 447 9762 / Athlone 021 633 9229 / Khayelitsha 021 361 9085",
]
},
]
},
]
}
| apache-2.0 |
winningsix/hive | metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py | 3 | 317252 | #
# Autogenerated by Thrift Compiler (0.9.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import fb303.ttypes
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class HiveObjectType:
GLOBAL = 1
DATABASE = 2
TABLE = 3
PARTITION = 4
COLUMN = 5
_VALUES_TO_NAMES = {
1: "GLOBAL",
2: "DATABASE",
3: "TABLE",
4: "PARTITION",
5: "COLUMN",
}
_NAMES_TO_VALUES = {
"GLOBAL": 1,
"DATABASE": 2,
"TABLE": 3,
"PARTITION": 4,
"COLUMN": 5,
}
class PrincipalType:
USER = 1
ROLE = 2
GROUP = 3
_VALUES_TO_NAMES = {
1: "USER",
2: "ROLE",
3: "GROUP",
}
_NAMES_TO_VALUES = {
"USER": 1,
"ROLE": 2,
"GROUP": 3,
}
class PartitionEventType:
LOAD_DONE = 1
_VALUES_TO_NAMES = {
1: "LOAD_DONE",
}
_NAMES_TO_VALUES = {
"LOAD_DONE": 1,
}
class TxnState:
COMMITTED = 1
ABORTED = 2
OPEN = 3
_VALUES_TO_NAMES = {
1: "COMMITTED",
2: "ABORTED",
3: "OPEN",
}
_NAMES_TO_VALUES = {
"COMMITTED": 1,
"ABORTED": 2,
"OPEN": 3,
}
class LockLevel:
DB = 1
TABLE = 2
PARTITION = 3
_VALUES_TO_NAMES = {
1: "DB",
2: "TABLE",
3: "PARTITION",
}
_NAMES_TO_VALUES = {
"DB": 1,
"TABLE": 2,
"PARTITION": 3,
}
class LockState:
ACQUIRED = 1
WAITING = 2
ABORT = 3
NOT_ACQUIRED = 4
_VALUES_TO_NAMES = {
1: "ACQUIRED",
2: "WAITING",
3: "ABORT",
4: "NOT_ACQUIRED",
}
_NAMES_TO_VALUES = {
"ACQUIRED": 1,
"WAITING": 2,
"ABORT": 3,
"NOT_ACQUIRED": 4,
}
class LockType:
SHARED_READ = 1
SHARED_WRITE = 2
EXCLUSIVE = 3
_VALUES_TO_NAMES = {
1: "SHARED_READ",
2: "SHARED_WRITE",
3: "EXCLUSIVE",
}
_NAMES_TO_VALUES = {
"SHARED_READ": 1,
"SHARED_WRITE": 2,
"EXCLUSIVE": 3,
}
class CompactionType:
MINOR = 1
MAJOR = 2
_VALUES_TO_NAMES = {
1: "MINOR",
2: "MAJOR",
}
_NAMES_TO_VALUES = {
"MINOR": 1,
"MAJOR": 2,
}
class GrantRevokeType:
GRANT = 1
REVOKE = 2
_VALUES_TO_NAMES = {
1: "GRANT",
2: "REVOKE",
}
_NAMES_TO_VALUES = {
"GRANT": 1,
"REVOKE": 2,
}
class EventRequestType:
INSERT = 1
UPDATE = 2
DELETE = 3
_VALUES_TO_NAMES = {
1: "INSERT",
2: "UPDATE",
3: "DELETE",
}
_NAMES_TO_VALUES = {
"INSERT": 1,
"UPDATE": 2,
"DELETE": 3,
}
class FunctionType:
JAVA = 1
_VALUES_TO_NAMES = {
1: "JAVA",
}
_NAMES_TO_VALUES = {
"JAVA": 1,
}
class ResourceType:
JAR = 1
FILE = 2
ARCHIVE = 3
_VALUES_TO_NAMES = {
1: "JAR",
2: "FILE",
3: "ARCHIVE",
}
_NAMES_TO_VALUES = {
"JAR": 1,
"FILE": 2,
"ARCHIVE": 3,
}
class Version:
"""
Attributes:
- version
- comments
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'version', None, None, ), # 1
(2, TType.STRING, 'comments', None, None, ), # 2
)
def __init__(self, version=None, comments=None,):
self.version = version
self.comments = comments
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.version = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.comments = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Version')
if self.version is not None:
oprot.writeFieldBegin('version', TType.STRING, 1)
oprot.writeString(self.version)
oprot.writeFieldEnd()
if self.comments is not None:
oprot.writeFieldBegin('comments', TType.STRING, 2)
oprot.writeString(self.comments)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class FieldSchema:
"""
Attributes:
- name
- type
- comment
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'type', None, None, ), # 2
(3, TType.STRING, 'comment', None, None, ), # 3
)
def __init__(self, name=None, type=None, comment=None,):
self.name = name
self.type = type
self.comment = comment
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.type = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.comment = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('FieldSchema')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.type is not None:
oprot.writeFieldBegin('type', TType.STRING, 2)
oprot.writeString(self.type)
oprot.writeFieldEnd()
if self.comment is not None:
oprot.writeFieldBegin('comment', TType.STRING, 3)
oprot.writeString(self.comment)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Type:
"""
Attributes:
- name
- type1
- type2
- fields
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'type1', None, None, ), # 2
(3, TType.STRING, 'type2', None, None, ), # 3
(4, TType.LIST, 'fields', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 4
)
def __init__(self, name=None, type1=None, type2=None, fields=None,):
self.name = name
self.type1 = type1
self.type2 = type2
self.fields = fields
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.type1 = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.type2 = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.fields = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = FieldSchema()
_elem5.read(iprot)
self.fields.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Type')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.type1 is not None:
oprot.writeFieldBegin('type1', TType.STRING, 2)
oprot.writeString(self.type1)
oprot.writeFieldEnd()
if self.type2 is not None:
oprot.writeFieldBegin('type2', TType.STRING, 3)
oprot.writeString(self.type2)
oprot.writeFieldEnd()
if self.fields is not None:
oprot.writeFieldBegin('fields', TType.LIST, 4)
oprot.writeListBegin(TType.STRUCT, len(self.fields))
for iter6 in self.fields:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class HiveObjectRef:
"""
Attributes:
- objectType
- dbName
- objectName
- partValues
- columnName
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'objectType', None, None, ), # 1
(2, TType.STRING, 'dbName', None, None, ), # 2
(3, TType.STRING, 'objectName', None, None, ), # 3
(4, TType.LIST, 'partValues', (TType.STRING,None), None, ), # 4
(5, TType.STRING, 'columnName', None, None, ), # 5
)
def __init__(self, objectType=None, dbName=None, objectName=None, partValues=None, columnName=None,):
self.objectType = objectType
self.dbName = dbName
self.objectName = objectName
self.partValues = partValues
self.columnName = columnName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.objectType = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.dbName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.objectName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.partValues = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in xrange(_size7):
_elem12 = iprot.readString();
self.partValues.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.columnName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('HiveObjectRef')
if self.objectType is not None:
oprot.writeFieldBegin('objectType', TType.I32, 1)
oprot.writeI32(self.objectType)
oprot.writeFieldEnd()
if self.dbName is not None:
oprot.writeFieldBegin('dbName', TType.STRING, 2)
oprot.writeString(self.dbName)
oprot.writeFieldEnd()
if self.objectName is not None:
oprot.writeFieldBegin('objectName', TType.STRING, 3)
oprot.writeString(self.objectName)
oprot.writeFieldEnd()
if self.partValues is not None:
oprot.writeFieldBegin('partValues', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.partValues))
for iter13 in self.partValues:
oprot.writeString(iter13)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.columnName is not None:
oprot.writeFieldBegin('columnName', TType.STRING, 5)
oprot.writeString(self.columnName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PrivilegeGrantInfo:
"""
Attributes:
- privilege
- createTime
- grantor
- grantorType
- grantOption
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'privilege', None, None, ), # 1
(2, TType.I32, 'createTime', None, None, ), # 2
(3, TType.STRING, 'grantor', None, None, ), # 3
(4, TType.I32, 'grantorType', None, None, ), # 4
(5, TType.BOOL, 'grantOption', None, None, ), # 5
)
def __init__(self, privilege=None, createTime=None, grantor=None, grantorType=None, grantOption=None,):
self.privilege = privilege
self.createTime = createTime
self.grantor = grantor
self.grantorType = grantorType
self.grantOption = grantOption
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.privilege = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.createTime = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.grantor = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.grantorType = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.grantOption = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('PrivilegeGrantInfo')
if self.privilege is not None:
oprot.writeFieldBegin('privilege', TType.STRING, 1)
oprot.writeString(self.privilege)
oprot.writeFieldEnd()
if self.createTime is not None:
oprot.writeFieldBegin('createTime', TType.I32, 2)
oprot.writeI32(self.createTime)
oprot.writeFieldEnd()
if self.grantor is not None:
oprot.writeFieldBegin('grantor', TType.STRING, 3)
oprot.writeString(self.grantor)
oprot.writeFieldEnd()
if self.grantorType is not None:
oprot.writeFieldBegin('grantorType', TType.I32, 4)
oprot.writeI32(self.grantorType)
oprot.writeFieldEnd()
if self.grantOption is not None:
oprot.writeFieldBegin('grantOption', TType.BOOL, 5)
oprot.writeBool(self.grantOption)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class HiveObjectPrivilege:
"""
Attributes:
- hiveObject
- principalName
- principalType
- grantInfo
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'hiveObject', (HiveObjectRef, HiveObjectRef.thrift_spec), None, ), # 1
(2, TType.STRING, 'principalName', None, None, ), # 2
(3, TType.I32, 'principalType', None, None, ), # 3
(4, TType.STRUCT, 'grantInfo', (PrivilegeGrantInfo, PrivilegeGrantInfo.thrift_spec), None, ), # 4
)
def __init__(self, hiveObject=None, principalName=None, principalType=None, grantInfo=None,):
self.hiveObject = hiveObject
self.principalName = principalName
self.principalType = principalType
self.grantInfo = grantInfo
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.hiveObject = HiveObjectRef()
self.hiveObject.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.principalName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.principalType = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.grantInfo = PrivilegeGrantInfo()
self.grantInfo.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('HiveObjectPrivilege')
if self.hiveObject is not None:
oprot.writeFieldBegin('hiveObject', TType.STRUCT, 1)
self.hiveObject.write(oprot)
oprot.writeFieldEnd()
if self.principalName is not None:
oprot.writeFieldBegin('principalName', TType.STRING, 2)
oprot.writeString(self.principalName)
oprot.writeFieldEnd()
if self.principalType is not None:
oprot.writeFieldBegin('principalType', TType.I32, 3)
oprot.writeI32(self.principalType)
oprot.writeFieldEnd()
if self.grantInfo is not None:
oprot.writeFieldBegin('grantInfo', TType.STRUCT, 4)
self.grantInfo.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PrivilegeBag:
"""
Attributes:
- privileges
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'privileges', (TType.STRUCT,(HiveObjectPrivilege, HiveObjectPrivilege.thrift_spec)), None, ), # 1
)
def __init__(self, privileges=None,):
self.privileges = privileges
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.privileges = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in xrange(_size14):
_elem19 = HiveObjectPrivilege()
_elem19.read(iprot)
self.privileges.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('PrivilegeBag')
if self.privileges is not None:
oprot.writeFieldBegin('privileges', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.privileges))
for iter20 in self.privileges:
iter20.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PrincipalPrivilegeSet:
"""
Attributes:
- userPrivileges
- groupPrivileges
- rolePrivileges
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'userPrivileges', (TType.STRING,None,TType.LIST,(TType.STRUCT,(PrivilegeGrantInfo, PrivilegeGrantInfo.thrift_spec))), None, ), # 1
(2, TType.MAP, 'groupPrivileges', (TType.STRING,None,TType.LIST,(TType.STRUCT,(PrivilegeGrantInfo, PrivilegeGrantInfo.thrift_spec))), None, ), # 2
(3, TType.MAP, 'rolePrivileges', (TType.STRING,None,TType.LIST,(TType.STRUCT,(PrivilegeGrantInfo, PrivilegeGrantInfo.thrift_spec))), None, ), # 3
)
def __init__(self, userPrivileges=None, groupPrivileges=None, rolePrivileges=None,):
self.userPrivileges = userPrivileges
self.groupPrivileges = groupPrivileges
self.rolePrivileges = rolePrivileges
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.userPrivileges = {}
(_ktype22, _vtype23, _size21 ) = iprot.readMapBegin()
for _i25 in xrange(_size21):
_key26 = iprot.readString();
_val27 = []
(_etype31, _size28) = iprot.readListBegin()
for _i32 in xrange(_size28):
_elem33 = PrivilegeGrantInfo()
_elem33.read(iprot)
_val27.append(_elem33)
iprot.readListEnd()
self.userPrivileges[_key26] = _val27
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.groupPrivileges = {}
(_ktype35, _vtype36, _size34 ) = iprot.readMapBegin()
for _i38 in xrange(_size34):
_key39 = iprot.readString();
_val40 = []
(_etype44, _size41) = iprot.readListBegin()
for _i45 in xrange(_size41):
_elem46 = PrivilegeGrantInfo()
_elem46.read(iprot)
_val40.append(_elem46)
iprot.readListEnd()
self.groupPrivileges[_key39] = _val40
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.rolePrivileges = {}
(_ktype48, _vtype49, _size47 ) = iprot.readMapBegin()
for _i51 in xrange(_size47):
_key52 = iprot.readString();
_val53 = []
(_etype57, _size54) = iprot.readListBegin()
for _i58 in xrange(_size54):
_elem59 = PrivilegeGrantInfo()
_elem59.read(iprot)
_val53.append(_elem59)
iprot.readListEnd()
self.rolePrivileges[_key52] = _val53
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('PrincipalPrivilegeSet')
if self.userPrivileges is not None:
oprot.writeFieldBegin('userPrivileges', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.userPrivileges))
for kiter60,viter61 in self.userPrivileges.items():
oprot.writeString(kiter60)
oprot.writeListBegin(TType.STRUCT, len(viter61))
for iter62 in viter61:
iter62.write(oprot)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.groupPrivileges is not None:
oprot.writeFieldBegin('groupPrivileges', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.groupPrivileges))
for kiter63,viter64 in self.groupPrivileges.items():
oprot.writeString(kiter63)
oprot.writeListBegin(TType.STRUCT, len(viter64))
for iter65 in viter64:
iter65.write(oprot)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.rolePrivileges is not None:
oprot.writeFieldBegin('rolePrivileges', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.rolePrivileges))
for kiter66,viter67 in self.rolePrivileges.items():
oprot.writeString(kiter66)
oprot.writeListBegin(TType.STRUCT, len(viter67))
for iter68 in viter67:
iter68.write(oprot)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GrantRevokePrivilegeRequest:
"""
Attributes:
- requestType
- privileges
- revokeGrantOption
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'requestType', None, None, ), # 1
(2, TType.STRUCT, 'privileges', (PrivilegeBag, PrivilegeBag.thrift_spec), None, ), # 2
(3, TType.BOOL, 'revokeGrantOption', None, None, ), # 3
)
def __init__(self, requestType=None, privileges=None, revokeGrantOption=None,):
self.requestType = requestType
self.privileges = privileges
self.revokeGrantOption = revokeGrantOption
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.requestType = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.privileges = PrivilegeBag()
self.privileges.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.revokeGrantOption = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GrantRevokePrivilegeRequest')
if self.requestType is not None:
oprot.writeFieldBegin('requestType', TType.I32, 1)
oprot.writeI32(self.requestType)
oprot.writeFieldEnd()
if self.privileges is not None:
oprot.writeFieldBegin('privileges', TType.STRUCT, 2)
self.privileges.write(oprot)
oprot.writeFieldEnd()
if self.revokeGrantOption is not None:
oprot.writeFieldBegin('revokeGrantOption', TType.BOOL, 3)
oprot.writeBool(self.revokeGrantOption)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GrantRevokePrivilegeResponse:
"""
Attributes:
- success
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'success', None, None, ), # 1
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GrantRevokePrivilegeResponse')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 1)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Role:
"""
Attributes:
- roleName
- createTime
- ownerName
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'roleName', None, None, ), # 1
(2, TType.I32, 'createTime', None, None, ), # 2
(3, TType.STRING, 'ownerName', None, None, ), # 3
)
def __init__(self, roleName=None, createTime=None, ownerName=None,):
self.roleName = roleName
self.createTime = createTime
self.ownerName = ownerName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.roleName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.createTime = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.ownerName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Role')
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 1)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.createTime is not None:
oprot.writeFieldBegin('createTime', TType.I32, 2)
oprot.writeI32(self.createTime)
oprot.writeFieldEnd()
if self.ownerName is not None:
oprot.writeFieldBegin('ownerName', TType.STRING, 3)
oprot.writeString(self.ownerName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RolePrincipalGrant:
"""
Attributes:
- roleName
- principalName
- principalType
- grantOption
- grantTime
- grantorName
- grantorPrincipalType
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'roleName', None, None, ), # 1
(2, TType.STRING, 'principalName', None, None, ), # 2
(3, TType.I32, 'principalType', None, None, ), # 3
(4, TType.BOOL, 'grantOption', None, None, ), # 4
(5, TType.I32, 'grantTime', None, None, ), # 5
(6, TType.STRING, 'grantorName', None, None, ), # 6
(7, TType.I32, 'grantorPrincipalType', None, None, ), # 7
)
def __init__(self, roleName=None, principalName=None, principalType=None, grantOption=None, grantTime=None, grantorName=None, grantorPrincipalType=None,):
self.roleName = roleName
self.principalName = principalName
self.principalType = principalType
self.grantOption = grantOption
self.grantTime = grantTime
self.grantorName = grantorName
self.grantorPrincipalType = grantorPrincipalType
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.roleName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.principalName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.principalType = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.grantOption = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.grantTime = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.grantorName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.grantorPrincipalType = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RolePrincipalGrant')
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 1)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.principalName is not None:
oprot.writeFieldBegin('principalName', TType.STRING, 2)
oprot.writeString(self.principalName)
oprot.writeFieldEnd()
if self.principalType is not None:
oprot.writeFieldBegin('principalType', TType.I32, 3)
oprot.writeI32(self.principalType)
oprot.writeFieldEnd()
if self.grantOption is not None:
oprot.writeFieldBegin('grantOption', TType.BOOL, 4)
oprot.writeBool(self.grantOption)
oprot.writeFieldEnd()
if self.grantTime is not None:
oprot.writeFieldBegin('grantTime', TType.I32, 5)
oprot.writeI32(self.grantTime)
oprot.writeFieldEnd()
if self.grantorName is not None:
oprot.writeFieldBegin('grantorName', TType.STRING, 6)
oprot.writeString(self.grantorName)
oprot.writeFieldEnd()
if self.grantorPrincipalType is not None:
oprot.writeFieldBegin('grantorPrincipalType', TType.I32, 7)
oprot.writeI32(self.grantorPrincipalType)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetRoleGrantsForPrincipalRequest:
"""
Attributes:
- principal_name
- principal_type
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'principal_name', None, None, ), # 1
(2, TType.I32, 'principal_type', None, None, ), # 2
)
def __init__(self, principal_name=None, principal_type=None,):
self.principal_name = principal_name
self.principal_type = principal_type
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.principal_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.principal_type = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetRoleGrantsForPrincipalRequest')
if self.principal_name is not None:
oprot.writeFieldBegin('principal_name', TType.STRING, 1)
oprot.writeString(self.principal_name)
oprot.writeFieldEnd()
if self.principal_type is not None:
oprot.writeFieldBegin('principal_type', TType.I32, 2)
oprot.writeI32(self.principal_type)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.principal_name is None:
raise TProtocol.TProtocolException(message='Required field principal_name is unset!')
if self.principal_type is None:
raise TProtocol.TProtocolException(message='Required field principal_type is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetRoleGrantsForPrincipalResponse:
"""
Attributes:
- principalGrants
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'principalGrants', (TType.STRUCT,(RolePrincipalGrant, RolePrincipalGrant.thrift_spec)), None, ), # 1
)
def __init__(self, principalGrants=None,):
self.principalGrants = principalGrants
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.principalGrants = []
(_etype72, _size69) = iprot.readListBegin()
for _i73 in xrange(_size69):
_elem74 = RolePrincipalGrant()
_elem74.read(iprot)
self.principalGrants.append(_elem74)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetRoleGrantsForPrincipalResponse')
if self.principalGrants is not None:
oprot.writeFieldBegin('principalGrants', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.principalGrants))
for iter75 in self.principalGrants:
iter75.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.principalGrants is None:
raise TProtocol.TProtocolException(message='Required field principalGrants is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetPrincipalsInRoleRequest:
"""
Attributes:
- roleName
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'roleName', None, None, ), # 1
)
def __init__(self, roleName=None,):
self.roleName = roleName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.roleName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetPrincipalsInRoleRequest')
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 1)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetPrincipalsInRoleResponse:
"""
Attributes:
- principalGrants
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'principalGrants', (TType.STRUCT,(RolePrincipalGrant, RolePrincipalGrant.thrift_spec)), None, ), # 1
)
def __init__(self, principalGrants=None,):
self.principalGrants = principalGrants
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.principalGrants = []
(_etype79, _size76) = iprot.readListBegin()
for _i80 in xrange(_size76):
_elem81 = RolePrincipalGrant()
_elem81.read(iprot)
self.principalGrants.append(_elem81)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetPrincipalsInRoleResponse')
if self.principalGrants is not None:
oprot.writeFieldBegin('principalGrants', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.principalGrants))
for iter82 in self.principalGrants:
iter82.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.principalGrants is None:
raise TProtocol.TProtocolException(message='Required field principalGrants is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GrantRevokeRoleRequest:
"""
Attributes:
- requestType
- roleName
- principalName
- principalType
- grantor
- grantorType
- grantOption
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'requestType', None, None, ), # 1
(2, TType.STRING, 'roleName', None, None, ), # 2
(3, TType.STRING, 'principalName', None, None, ), # 3
(4, TType.I32, 'principalType', None, None, ), # 4
(5, TType.STRING, 'grantor', None, None, ), # 5
(6, TType.I32, 'grantorType', None, None, ), # 6
(7, TType.BOOL, 'grantOption', None, None, ), # 7
)
def __init__(self, requestType=None, roleName=None, principalName=None, principalType=None, grantor=None, grantorType=None, grantOption=None,):
self.requestType = requestType
self.roleName = roleName
self.principalName = principalName
self.principalType = principalType
self.grantor = grantor
self.grantorType = grantorType
self.grantOption = grantOption
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.requestType = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.roleName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.principalName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.principalType = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.grantor = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.grantorType = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.grantOption = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GrantRevokeRoleRequest')
if self.requestType is not None:
oprot.writeFieldBegin('requestType', TType.I32, 1)
oprot.writeI32(self.requestType)
oprot.writeFieldEnd()
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 2)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.principalName is not None:
oprot.writeFieldBegin('principalName', TType.STRING, 3)
oprot.writeString(self.principalName)
oprot.writeFieldEnd()
if self.principalType is not None:
oprot.writeFieldBegin('principalType', TType.I32, 4)
oprot.writeI32(self.principalType)
oprot.writeFieldEnd()
if self.grantor is not None:
oprot.writeFieldBegin('grantor', TType.STRING, 5)
oprot.writeString(self.grantor)
oprot.writeFieldEnd()
if self.grantorType is not None:
oprot.writeFieldBegin('grantorType', TType.I32, 6)
oprot.writeI32(self.grantorType)
oprot.writeFieldEnd()
if self.grantOption is not None:
oprot.writeFieldBegin('grantOption', TType.BOOL, 7)
oprot.writeBool(self.grantOption)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GrantRevokeRoleResponse:
"""
Attributes:
- success
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'success', None, None, ), # 1
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GrantRevokeRoleResponse')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 1)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Database:
"""
Attributes:
- name
- description
- locationUri
- parameters
- privileges
- ownerName
- ownerType
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'description', None, None, ), # 2
(3, TType.STRING, 'locationUri', None, None, ), # 3
(4, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 4
(5, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 5
(6, TType.STRING, 'ownerName', None, None, ), # 6
(7, TType.I32, 'ownerType', None, None, ), # 7
)
def __init__(self, name=None, description=None, locationUri=None, parameters=None, privileges=None, ownerName=None, ownerType=None,):
self.name = name
self.description = description
self.locationUri = locationUri
self.parameters = parameters
self.privileges = privileges
self.ownerName = ownerName
self.ownerType = ownerType
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.description = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.locationUri = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.parameters = {}
(_ktype84, _vtype85, _size83 ) = iprot.readMapBegin()
for _i87 in xrange(_size83):
_key88 = iprot.readString();
_val89 = iprot.readString();
self.parameters[_key88] = _val89
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.privileges = PrincipalPrivilegeSet()
self.privileges.read(iprot)
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.ownerName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.ownerType = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Database')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.description is not None:
oprot.writeFieldBegin('description', TType.STRING, 2)
oprot.writeString(self.description)
oprot.writeFieldEnd()
if self.locationUri is not None:
oprot.writeFieldBegin('locationUri', TType.STRING, 3)
oprot.writeString(self.locationUri)
oprot.writeFieldEnd()
if self.parameters is not None:
oprot.writeFieldBegin('parameters', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))
for kiter90,viter91 in self.parameters.items():
oprot.writeString(kiter90)
oprot.writeString(viter91)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.privileges is not None:
oprot.writeFieldBegin('privileges', TType.STRUCT, 5)
self.privileges.write(oprot)
oprot.writeFieldEnd()
if self.ownerName is not None:
oprot.writeFieldBegin('ownerName', TType.STRING, 6)
oprot.writeString(self.ownerName)
oprot.writeFieldEnd()
if self.ownerType is not None:
oprot.writeFieldBegin('ownerType', TType.I32, 7)
oprot.writeI32(self.ownerType)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SerDeInfo:
"""
Attributes:
- name
- serializationLib
- parameters
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'serializationLib', None, None, ), # 2
(3, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 3
)
def __init__(self, name=None, serializationLib=None, parameters=None,):
self.name = name
self.serializationLib = serializationLib
self.parameters = parameters
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.serializationLib = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.parameters = {}
(_ktype93, _vtype94, _size92 ) = iprot.readMapBegin()
for _i96 in xrange(_size92):
_key97 = iprot.readString();
_val98 = iprot.readString();
self.parameters[_key97] = _val98
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SerDeInfo')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.serializationLib is not None:
oprot.writeFieldBegin('serializationLib', TType.STRING, 2)
oprot.writeString(self.serializationLib)
oprot.writeFieldEnd()
if self.parameters is not None:
oprot.writeFieldBegin('parameters', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))
for kiter99,viter100 in self.parameters.items():
oprot.writeString(kiter99)
oprot.writeString(viter100)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Order:
"""
Attributes:
- col
- order
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'col', None, None, ), # 1
(2, TType.I32, 'order', None, None, ), # 2
)
def __init__(self, col=None, order=None,):
self.col = col
self.order = order
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.col = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.order = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Order')
if self.col is not None:
oprot.writeFieldBegin('col', TType.STRING, 1)
oprot.writeString(self.col)
oprot.writeFieldEnd()
if self.order is not None:
oprot.writeFieldBegin('order', TType.I32, 2)
oprot.writeI32(self.order)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SkewedInfo:
"""
Attributes:
- skewedColNames
- skewedColValues
- skewedColValueLocationMaps
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'skewedColNames', (TType.STRING,None), None, ), # 1
(2, TType.LIST, 'skewedColValues', (TType.LIST,(TType.STRING,None)), None, ), # 2
(3, TType.MAP, 'skewedColValueLocationMaps', (TType.LIST,(TType.STRING,None),TType.STRING,None), None, ), # 3
)
def __init__(self, skewedColNames=None, skewedColValues=None, skewedColValueLocationMaps=None,):
self.skewedColNames = skewedColNames
self.skewedColValues = skewedColValues
self.skewedColValueLocationMaps = skewedColValueLocationMaps
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.skewedColNames = []
(_etype104, _size101) = iprot.readListBegin()
for _i105 in xrange(_size101):
_elem106 = iprot.readString();
self.skewedColNames.append(_elem106)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.skewedColValues = []
(_etype110, _size107) = iprot.readListBegin()
for _i111 in xrange(_size107):
_elem112 = []
(_etype116, _size113) = iprot.readListBegin()
for _i117 in xrange(_size113):
_elem118 = iprot.readString();
_elem112.append(_elem118)
iprot.readListEnd()
self.skewedColValues.append(_elem112)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.skewedColValueLocationMaps = {}
(_ktype120, _vtype121, _size119 ) = iprot.readMapBegin()
for _i123 in xrange(_size119):
_key124 = []
(_etype129, _size126) = iprot.readListBegin()
for _i130 in xrange(_size126):
_elem131 = iprot.readString();
_key124.append(_elem131)
iprot.readListEnd()
_val125 = iprot.readString();
self.skewedColValueLocationMaps[_key124] = _val125
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SkewedInfo')
if self.skewedColNames is not None:
oprot.writeFieldBegin('skewedColNames', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.skewedColNames))
for iter132 in self.skewedColNames:
oprot.writeString(iter132)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.skewedColValues is not None:
oprot.writeFieldBegin('skewedColValues', TType.LIST, 2)
oprot.writeListBegin(TType.LIST, len(self.skewedColValues))
for iter133 in self.skewedColValues:
oprot.writeListBegin(TType.STRING, len(iter133))
for iter134 in iter133:
oprot.writeString(iter134)
oprot.writeListEnd()
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.skewedColValueLocationMaps is not None:
oprot.writeFieldBegin('skewedColValueLocationMaps', TType.MAP, 3)
oprot.writeMapBegin(TType.LIST, TType.STRING, len(self.skewedColValueLocationMaps))
for kiter135,viter136 in self.skewedColValueLocationMaps.items():
oprot.writeListBegin(TType.STRING, len(kiter135))
for iter137 in kiter135:
oprot.writeString(iter137)
oprot.writeListEnd()
oprot.writeString(viter136)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class StorageDescriptor:
"""
Attributes:
- cols
- location
- inputFormat
- outputFormat
- compressed
- numBuckets
- serdeInfo
- bucketCols
- sortCols
- parameters
- skewedInfo
- storedAsSubDirectories
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'cols', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 1
(2, TType.STRING, 'location', None, None, ), # 2
(3, TType.STRING, 'inputFormat', None, None, ), # 3
(4, TType.STRING, 'outputFormat', None, None, ), # 4
(5, TType.BOOL, 'compressed', None, None, ), # 5
(6, TType.I32, 'numBuckets', None, None, ), # 6
(7, TType.STRUCT, 'serdeInfo', (SerDeInfo, SerDeInfo.thrift_spec), None, ), # 7
(8, TType.LIST, 'bucketCols', (TType.STRING,None), None, ), # 8
(9, TType.LIST, 'sortCols', (TType.STRUCT,(Order, Order.thrift_spec)), None, ), # 9
(10, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 10
(11, TType.STRUCT, 'skewedInfo', (SkewedInfo, SkewedInfo.thrift_spec), None, ), # 11
(12, TType.BOOL, 'storedAsSubDirectories', None, None, ), # 12
)
def __init__(self, cols=None, location=None, inputFormat=None, outputFormat=None, compressed=None, numBuckets=None, serdeInfo=None, bucketCols=None, sortCols=None, parameters=None, skewedInfo=None, storedAsSubDirectories=None,):
self.cols = cols
self.location = location
self.inputFormat = inputFormat
self.outputFormat = outputFormat
self.compressed = compressed
self.numBuckets = numBuckets
self.serdeInfo = serdeInfo
self.bucketCols = bucketCols
self.sortCols = sortCols
self.parameters = parameters
self.skewedInfo = skewedInfo
self.storedAsSubDirectories = storedAsSubDirectories
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.cols = []
(_etype141, _size138) = iprot.readListBegin()
for _i142 in xrange(_size138):
_elem143 = FieldSchema()
_elem143.read(iprot)
self.cols.append(_elem143)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.location = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.inputFormat = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.outputFormat = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.compressed = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.numBuckets = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRUCT:
self.serdeInfo = SerDeInfo()
self.serdeInfo.read(iprot)
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.LIST:
self.bucketCols = []
(_etype147, _size144) = iprot.readListBegin()
for _i148 in xrange(_size144):
_elem149 = iprot.readString();
self.bucketCols.append(_elem149)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.LIST:
self.sortCols = []
(_etype153, _size150) = iprot.readListBegin()
for _i154 in xrange(_size150):
_elem155 = Order()
_elem155.read(iprot)
self.sortCols.append(_elem155)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.MAP:
self.parameters = {}
(_ktype157, _vtype158, _size156 ) = iprot.readMapBegin()
for _i160 in xrange(_size156):
_key161 = iprot.readString();
_val162 = iprot.readString();
self.parameters[_key161] = _val162
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.STRUCT:
self.skewedInfo = SkewedInfo()
self.skewedInfo.read(iprot)
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.BOOL:
self.storedAsSubDirectories = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('StorageDescriptor')
if self.cols is not None:
oprot.writeFieldBegin('cols', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.cols))
for iter163 in self.cols:
iter163.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.location is not None:
oprot.writeFieldBegin('location', TType.STRING, 2)
oprot.writeString(self.location)
oprot.writeFieldEnd()
if self.inputFormat is not None:
oprot.writeFieldBegin('inputFormat', TType.STRING, 3)
oprot.writeString(self.inputFormat)
oprot.writeFieldEnd()
if self.outputFormat is not None:
oprot.writeFieldBegin('outputFormat', TType.STRING, 4)
oprot.writeString(self.outputFormat)
oprot.writeFieldEnd()
if self.compressed is not None:
oprot.writeFieldBegin('compressed', TType.BOOL, 5)
oprot.writeBool(self.compressed)
oprot.writeFieldEnd()
if self.numBuckets is not None:
oprot.writeFieldBegin('numBuckets', TType.I32, 6)
oprot.writeI32(self.numBuckets)
oprot.writeFieldEnd()
if self.serdeInfo is not None:
oprot.writeFieldBegin('serdeInfo', TType.STRUCT, 7)
self.serdeInfo.write(oprot)
oprot.writeFieldEnd()
if self.bucketCols is not None:
oprot.writeFieldBegin('bucketCols', TType.LIST, 8)
oprot.writeListBegin(TType.STRING, len(self.bucketCols))
for iter164 in self.bucketCols:
oprot.writeString(iter164)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.sortCols is not None:
oprot.writeFieldBegin('sortCols', TType.LIST, 9)
oprot.writeListBegin(TType.STRUCT, len(self.sortCols))
for iter165 in self.sortCols:
iter165.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.parameters is not None:
oprot.writeFieldBegin('parameters', TType.MAP, 10)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))
for kiter166,viter167 in self.parameters.items():
oprot.writeString(kiter166)
oprot.writeString(viter167)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.skewedInfo is not None:
oprot.writeFieldBegin('skewedInfo', TType.STRUCT, 11)
self.skewedInfo.write(oprot)
oprot.writeFieldEnd()
if self.storedAsSubDirectories is not None:
oprot.writeFieldBegin('storedAsSubDirectories', TType.BOOL, 12)
oprot.writeBool(self.storedAsSubDirectories)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Table:
"""
Attributes:
- tableName
- dbName
- owner
- createTime
- lastAccessTime
- retention
- sd
- partitionKeys
- parameters
- viewOriginalText
- viewExpandedText
- tableType
- privileges
- temporary
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'dbName', None, None, ), # 2
(3, TType.STRING, 'owner', None, None, ), # 3
(4, TType.I32, 'createTime', None, None, ), # 4
(5, TType.I32, 'lastAccessTime', None, None, ), # 5
(6, TType.I32, 'retention', None, None, ), # 6
(7, TType.STRUCT, 'sd', (StorageDescriptor, StorageDescriptor.thrift_spec), None, ), # 7
(8, TType.LIST, 'partitionKeys', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 8
(9, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 9
(10, TType.STRING, 'viewOriginalText', None, None, ), # 10
(11, TType.STRING, 'viewExpandedText', None, None, ), # 11
(12, TType.STRING, 'tableType', None, None, ), # 12
(13, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 13
(14, TType.BOOL, 'temporary', None, False, ), # 14
)
def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4],):
self.tableName = tableName
self.dbName = dbName
self.owner = owner
self.createTime = createTime
self.lastAccessTime = lastAccessTime
self.retention = retention
self.sd = sd
self.partitionKeys = partitionKeys
self.parameters = parameters
self.viewOriginalText = viewOriginalText
self.viewExpandedText = viewExpandedText
self.tableType = tableType
self.privileges = privileges
self.temporary = temporary
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.dbName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.owner = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.createTime = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.lastAccessTime = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.retention = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRUCT:
self.sd = StorageDescriptor()
self.sd.read(iprot)
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.LIST:
self.partitionKeys = []
(_etype171, _size168) = iprot.readListBegin()
for _i172 in xrange(_size168):
_elem173 = FieldSchema()
_elem173.read(iprot)
self.partitionKeys.append(_elem173)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.MAP:
self.parameters = {}
(_ktype175, _vtype176, _size174 ) = iprot.readMapBegin()
for _i178 in xrange(_size174):
_key179 = iprot.readString();
_val180 = iprot.readString();
self.parameters[_key179] = _val180
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.STRING:
self.viewOriginalText = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.STRING:
self.viewExpandedText = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.STRING:
self.tableType = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.STRUCT:
self.privileges = PrincipalPrivilegeSet()
self.privileges.read(iprot)
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.BOOL:
self.temporary = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Table')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.dbName is not None:
oprot.writeFieldBegin('dbName', TType.STRING, 2)
oprot.writeString(self.dbName)
oprot.writeFieldEnd()
if self.owner is not None:
oprot.writeFieldBegin('owner', TType.STRING, 3)
oprot.writeString(self.owner)
oprot.writeFieldEnd()
if self.createTime is not None:
oprot.writeFieldBegin('createTime', TType.I32, 4)
oprot.writeI32(self.createTime)
oprot.writeFieldEnd()
if self.lastAccessTime is not None:
oprot.writeFieldBegin('lastAccessTime', TType.I32, 5)
oprot.writeI32(self.lastAccessTime)
oprot.writeFieldEnd()
if self.retention is not None:
oprot.writeFieldBegin('retention', TType.I32, 6)
oprot.writeI32(self.retention)
oprot.writeFieldEnd()
if self.sd is not None:
oprot.writeFieldBegin('sd', TType.STRUCT, 7)
self.sd.write(oprot)
oprot.writeFieldEnd()
if self.partitionKeys is not None:
oprot.writeFieldBegin('partitionKeys', TType.LIST, 8)
oprot.writeListBegin(TType.STRUCT, len(self.partitionKeys))
for iter181 in self.partitionKeys:
iter181.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.parameters is not None:
oprot.writeFieldBegin('parameters', TType.MAP, 9)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))
for kiter182,viter183 in self.parameters.items():
oprot.writeString(kiter182)
oprot.writeString(viter183)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.viewOriginalText is not None:
oprot.writeFieldBegin('viewOriginalText', TType.STRING, 10)
oprot.writeString(self.viewOriginalText)
oprot.writeFieldEnd()
if self.viewExpandedText is not None:
oprot.writeFieldBegin('viewExpandedText', TType.STRING, 11)
oprot.writeString(self.viewExpandedText)
oprot.writeFieldEnd()
if self.tableType is not None:
oprot.writeFieldBegin('tableType', TType.STRING, 12)
oprot.writeString(self.tableType)
oprot.writeFieldEnd()
if self.privileges is not None:
oprot.writeFieldBegin('privileges', TType.STRUCT, 13)
self.privileges.write(oprot)
oprot.writeFieldEnd()
if self.temporary is not None:
oprot.writeFieldBegin('temporary', TType.BOOL, 14)
oprot.writeBool(self.temporary)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Partition:
"""
Attributes:
- values
- dbName
- tableName
- createTime
- lastAccessTime
- sd
- parameters
- privileges
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'values', (TType.STRING,None), None, ), # 1
(2, TType.STRING, 'dbName', None, None, ), # 2
(3, TType.STRING, 'tableName', None, None, ), # 3
(4, TType.I32, 'createTime', None, None, ), # 4
(5, TType.I32, 'lastAccessTime', None, None, ), # 5
(6, TType.STRUCT, 'sd', (StorageDescriptor, StorageDescriptor.thrift_spec), None, ), # 6
(7, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 7
(8, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 8
)
def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None,):
self.values = values
self.dbName = dbName
self.tableName = tableName
self.createTime = createTime
self.lastAccessTime = lastAccessTime
self.sd = sd
self.parameters = parameters
self.privileges = privileges
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.values = []
(_etype187, _size184) = iprot.readListBegin()
for _i188 in xrange(_size184):
_elem189 = iprot.readString();
self.values.append(_elem189)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.dbName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.createTime = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.lastAccessTime = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.sd = StorageDescriptor()
self.sd.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.MAP:
self.parameters = {}
(_ktype191, _vtype192, _size190 ) = iprot.readMapBegin()
for _i194 in xrange(_size190):
_key195 = iprot.readString();
_val196 = iprot.readString();
self.parameters[_key195] = _val196
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRUCT:
self.privileges = PrincipalPrivilegeSet()
self.privileges.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Partition')
if self.values is not None:
oprot.writeFieldBegin('values', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.values))
for iter197 in self.values:
oprot.writeString(iter197)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.dbName is not None:
oprot.writeFieldBegin('dbName', TType.STRING, 2)
oprot.writeString(self.dbName)
oprot.writeFieldEnd()
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 3)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.createTime is not None:
oprot.writeFieldBegin('createTime', TType.I32, 4)
oprot.writeI32(self.createTime)
oprot.writeFieldEnd()
if self.lastAccessTime is not None:
oprot.writeFieldBegin('lastAccessTime', TType.I32, 5)
oprot.writeI32(self.lastAccessTime)
oprot.writeFieldEnd()
if self.sd is not None:
oprot.writeFieldBegin('sd', TType.STRUCT, 6)
self.sd.write(oprot)
oprot.writeFieldEnd()
if self.parameters is not None:
oprot.writeFieldBegin('parameters', TType.MAP, 7)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))
for kiter198,viter199 in self.parameters.items():
oprot.writeString(kiter198)
oprot.writeString(viter199)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.privileges is not None:
oprot.writeFieldBegin('privileges', TType.STRUCT, 8)
self.privileges.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PartitionWithoutSD:
"""
Attributes:
- values
- createTime
- lastAccessTime
- relativePath
- parameters
- privileges
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'values', (TType.STRING,None), None, ), # 1
(2, TType.I32, 'createTime', None, None, ), # 2
(3, TType.I32, 'lastAccessTime', None, None, ), # 3
(4, TType.STRING, 'relativePath', None, None, ), # 4
(5, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 5
(6, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 6
)
def __init__(self, values=None, createTime=None, lastAccessTime=None, relativePath=None, parameters=None, privileges=None,):
self.values = values
self.createTime = createTime
self.lastAccessTime = lastAccessTime
self.relativePath = relativePath
self.parameters = parameters
self.privileges = privileges
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.values = []
(_etype203, _size200) = iprot.readListBegin()
for _i204 in xrange(_size200):
_elem205 = iprot.readString();
self.values.append(_elem205)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.createTime = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.lastAccessTime = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.relativePath = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.parameters = {}
(_ktype207, _vtype208, _size206 ) = iprot.readMapBegin()
for _i210 in xrange(_size206):
_key211 = iprot.readString();
_val212 = iprot.readString();
self.parameters[_key211] = _val212
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.privileges = PrincipalPrivilegeSet()
self.privileges.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('PartitionWithoutSD')
if self.values is not None:
oprot.writeFieldBegin('values', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.values))
for iter213 in self.values:
oprot.writeString(iter213)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.createTime is not None:
oprot.writeFieldBegin('createTime', TType.I32, 2)
oprot.writeI32(self.createTime)
oprot.writeFieldEnd()
if self.lastAccessTime is not None:
oprot.writeFieldBegin('lastAccessTime', TType.I32, 3)
oprot.writeI32(self.lastAccessTime)
oprot.writeFieldEnd()
if self.relativePath is not None:
oprot.writeFieldBegin('relativePath', TType.STRING, 4)
oprot.writeString(self.relativePath)
oprot.writeFieldEnd()
if self.parameters is not None:
oprot.writeFieldBegin('parameters', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))
for kiter214,viter215 in self.parameters.items():
oprot.writeString(kiter214)
oprot.writeString(viter215)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.privileges is not None:
oprot.writeFieldBegin('privileges', TType.STRUCT, 6)
self.privileges.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PartitionSpecWithSharedSD:
"""
Attributes:
- partitions
- sd
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'partitions', (TType.STRUCT,(PartitionWithoutSD, PartitionWithoutSD.thrift_spec)), None, ), # 1
(2, TType.STRUCT, 'sd', (StorageDescriptor, StorageDescriptor.thrift_spec), None, ), # 2
)
def __init__(self, partitions=None, sd=None,):
self.partitions = partitions
self.sd = sd
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.partitions = []
(_etype219, _size216) = iprot.readListBegin()
for _i220 in xrange(_size216):
_elem221 = PartitionWithoutSD()
_elem221.read(iprot)
self.partitions.append(_elem221)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sd = StorageDescriptor()
self.sd.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('PartitionSpecWithSharedSD')
if self.partitions is not None:
oprot.writeFieldBegin('partitions', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.partitions))
for iter222 in self.partitions:
iter222.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.sd is not None:
oprot.writeFieldBegin('sd', TType.STRUCT, 2)
self.sd.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PartitionListComposingSpec:
"""
Attributes:
- partitions
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 1
)
def __init__(self, partitions=None,):
self.partitions = partitions
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.partitions = []
(_etype226, _size223) = iprot.readListBegin()
for _i227 in xrange(_size223):
_elem228 = Partition()
_elem228.read(iprot)
self.partitions.append(_elem228)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('PartitionListComposingSpec')
if self.partitions is not None:
oprot.writeFieldBegin('partitions', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.partitions))
for iter229 in self.partitions:
iter229.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PartitionSpec:
"""
Attributes:
- dbName
- tableName
- rootPath
- sharedSDPartitionSpec
- partitionList
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'dbName', None, None, ), # 1
(2, TType.STRING, 'tableName', None, None, ), # 2
(3, TType.STRING, 'rootPath', None, None, ), # 3
(4, TType.STRUCT, 'sharedSDPartitionSpec', (PartitionSpecWithSharedSD, PartitionSpecWithSharedSD.thrift_spec), None, ), # 4
(5, TType.STRUCT, 'partitionList', (PartitionListComposingSpec, PartitionListComposingSpec.thrift_spec), None, ), # 5
)
def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None,):
self.dbName = dbName
self.tableName = tableName
self.rootPath = rootPath
self.sharedSDPartitionSpec = sharedSDPartitionSpec
self.partitionList = partitionList
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.dbName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.rootPath = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.sharedSDPartitionSpec = PartitionSpecWithSharedSD()
self.sharedSDPartitionSpec.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.partitionList = PartitionListComposingSpec()
self.partitionList.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('PartitionSpec')
if self.dbName is not None:
oprot.writeFieldBegin('dbName', TType.STRING, 1)
oprot.writeString(self.dbName)
oprot.writeFieldEnd()
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 2)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.rootPath is not None:
oprot.writeFieldBegin('rootPath', TType.STRING, 3)
oprot.writeString(self.rootPath)
oprot.writeFieldEnd()
if self.sharedSDPartitionSpec is not None:
oprot.writeFieldBegin('sharedSDPartitionSpec', TType.STRUCT, 4)
self.sharedSDPartitionSpec.write(oprot)
oprot.writeFieldEnd()
if self.partitionList is not None:
oprot.writeFieldBegin('partitionList', TType.STRUCT, 5)
self.partitionList.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Index:
"""
Attributes:
- indexName
- indexHandlerClass
- dbName
- origTableName
- createTime
- lastAccessTime
- indexTableName
- sd
- parameters
- deferredRebuild
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'indexName', None, None, ), # 1
(2, TType.STRING, 'indexHandlerClass', None, None, ), # 2
(3, TType.STRING, 'dbName', None, None, ), # 3
(4, TType.STRING, 'origTableName', None, None, ), # 4
(5, TType.I32, 'createTime', None, None, ), # 5
(6, TType.I32, 'lastAccessTime', None, None, ), # 6
(7, TType.STRING, 'indexTableName', None, None, ), # 7
(8, TType.STRUCT, 'sd', (StorageDescriptor, StorageDescriptor.thrift_spec), None, ), # 8
(9, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 9
(10, TType.BOOL, 'deferredRebuild', None, None, ), # 10
)
def __init__(self, indexName=None, indexHandlerClass=None, dbName=None, origTableName=None, createTime=None, lastAccessTime=None, indexTableName=None, sd=None, parameters=None, deferredRebuild=None,):
self.indexName = indexName
self.indexHandlerClass = indexHandlerClass
self.dbName = dbName
self.origTableName = origTableName
self.createTime = createTime
self.lastAccessTime = lastAccessTime
self.indexTableName = indexTableName
self.sd = sd
self.parameters = parameters
self.deferredRebuild = deferredRebuild
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.indexName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.indexHandlerClass = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.dbName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.origTableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.createTime = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.lastAccessTime = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.indexTableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRUCT:
self.sd = StorageDescriptor()
self.sd.read(iprot)
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.MAP:
self.parameters = {}
(_ktype231, _vtype232, _size230 ) = iprot.readMapBegin()
for _i234 in xrange(_size230):
_key235 = iprot.readString();
_val236 = iprot.readString();
self.parameters[_key235] = _val236
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.BOOL:
self.deferredRebuild = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Index')
if self.indexName is not None:
oprot.writeFieldBegin('indexName', TType.STRING, 1)
oprot.writeString(self.indexName)
oprot.writeFieldEnd()
if self.indexHandlerClass is not None:
oprot.writeFieldBegin('indexHandlerClass', TType.STRING, 2)
oprot.writeString(self.indexHandlerClass)
oprot.writeFieldEnd()
if self.dbName is not None:
oprot.writeFieldBegin('dbName', TType.STRING, 3)
oprot.writeString(self.dbName)
oprot.writeFieldEnd()
if self.origTableName is not None:
oprot.writeFieldBegin('origTableName', TType.STRING, 4)
oprot.writeString(self.origTableName)
oprot.writeFieldEnd()
if self.createTime is not None:
oprot.writeFieldBegin('createTime', TType.I32, 5)
oprot.writeI32(self.createTime)
oprot.writeFieldEnd()
if self.lastAccessTime is not None:
oprot.writeFieldBegin('lastAccessTime', TType.I32, 6)
oprot.writeI32(self.lastAccessTime)
oprot.writeFieldEnd()
if self.indexTableName is not None:
oprot.writeFieldBegin('indexTableName', TType.STRING, 7)
oprot.writeString(self.indexTableName)
oprot.writeFieldEnd()
if self.sd is not None:
oprot.writeFieldBegin('sd', TType.STRUCT, 8)
self.sd.write(oprot)
oprot.writeFieldEnd()
if self.parameters is not None:
oprot.writeFieldBegin('parameters', TType.MAP, 9)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))
for kiter237,viter238 in self.parameters.items():
oprot.writeString(kiter237)
oprot.writeString(viter238)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.deferredRebuild is not None:
oprot.writeFieldBegin('deferredRebuild', TType.BOOL, 10)
oprot.writeBool(self.deferredRebuild)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BooleanColumnStatsData:
"""
Attributes:
- numTrues
- numFalses
- numNulls
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'numTrues', None, None, ), # 1
(2, TType.I64, 'numFalses', None, None, ), # 2
(3, TType.I64, 'numNulls', None, None, ), # 3
)
def __init__(self, numTrues=None, numFalses=None, numNulls=None,):
self.numTrues = numTrues
self.numFalses = numFalses
self.numNulls = numNulls
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.numTrues = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.numFalses = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.numNulls = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('BooleanColumnStatsData')
if self.numTrues is not None:
oprot.writeFieldBegin('numTrues', TType.I64, 1)
oprot.writeI64(self.numTrues)
oprot.writeFieldEnd()
if self.numFalses is not None:
oprot.writeFieldBegin('numFalses', TType.I64, 2)
oprot.writeI64(self.numFalses)
oprot.writeFieldEnd()
if self.numNulls is not None:
oprot.writeFieldBegin('numNulls', TType.I64, 3)
oprot.writeI64(self.numNulls)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.numTrues is None:
raise TProtocol.TProtocolException(message='Required field numTrues is unset!')
if self.numFalses is None:
raise TProtocol.TProtocolException(message='Required field numFalses is unset!')
if self.numNulls is None:
raise TProtocol.TProtocolException(message='Required field numNulls is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DoubleColumnStatsData:
"""
Attributes:
- lowValue
- highValue
- numNulls
- numDVs
"""
thrift_spec = (
None, # 0
(1, TType.DOUBLE, 'lowValue', None, None, ), # 1
(2, TType.DOUBLE, 'highValue', None, None, ), # 2
(3, TType.I64, 'numNulls', None, None, ), # 3
(4, TType.I64, 'numDVs', None, None, ), # 4
)
def __init__(self, lowValue=None, highValue=None, numNulls=None, numDVs=None,):
self.lowValue = lowValue
self.highValue = highValue
self.numNulls = numNulls
self.numDVs = numDVs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.DOUBLE:
self.lowValue = iprot.readDouble();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.DOUBLE:
self.highValue = iprot.readDouble();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.numNulls = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.numDVs = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DoubleColumnStatsData')
if self.lowValue is not None:
oprot.writeFieldBegin('lowValue', TType.DOUBLE, 1)
oprot.writeDouble(self.lowValue)
oprot.writeFieldEnd()
if self.highValue is not None:
oprot.writeFieldBegin('highValue', TType.DOUBLE, 2)
oprot.writeDouble(self.highValue)
oprot.writeFieldEnd()
if self.numNulls is not None:
oprot.writeFieldBegin('numNulls', TType.I64, 3)
oprot.writeI64(self.numNulls)
oprot.writeFieldEnd()
if self.numDVs is not None:
oprot.writeFieldBegin('numDVs', TType.I64, 4)
oprot.writeI64(self.numDVs)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.numNulls is None:
raise TProtocol.TProtocolException(message='Required field numNulls is unset!')
if self.numDVs is None:
raise TProtocol.TProtocolException(message='Required field numDVs is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LongColumnStatsData:
"""
Attributes:
- lowValue
- highValue
- numNulls
- numDVs
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'lowValue', None, None, ), # 1
(2, TType.I64, 'highValue', None, None, ), # 2
(3, TType.I64, 'numNulls', None, None, ), # 3
(4, TType.I64, 'numDVs', None, None, ), # 4
)
def __init__(self, lowValue=None, highValue=None, numNulls=None, numDVs=None,):
self.lowValue = lowValue
self.highValue = highValue
self.numNulls = numNulls
self.numDVs = numDVs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.lowValue = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.highValue = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.numNulls = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.numDVs = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('LongColumnStatsData')
if self.lowValue is not None:
oprot.writeFieldBegin('lowValue', TType.I64, 1)
oprot.writeI64(self.lowValue)
oprot.writeFieldEnd()
if self.highValue is not None:
oprot.writeFieldBegin('highValue', TType.I64, 2)
oprot.writeI64(self.highValue)
oprot.writeFieldEnd()
if self.numNulls is not None:
oprot.writeFieldBegin('numNulls', TType.I64, 3)
oprot.writeI64(self.numNulls)
oprot.writeFieldEnd()
if self.numDVs is not None:
oprot.writeFieldBegin('numDVs', TType.I64, 4)
oprot.writeI64(self.numDVs)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.numNulls is None:
raise TProtocol.TProtocolException(message='Required field numNulls is unset!')
if self.numDVs is None:
raise TProtocol.TProtocolException(message='Required field numDVs is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class StringColumnStatsData:
"""
Attributes:
- maxColLen
- avgColLen
- numNulls
- numDVs
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'maxColLen', None, None, ), # 1
(2, TType.DOUBLE, 'avgColLen', None, None, ), # 2
(3, TType.I64, 'numNulls', None, None, ), # 3
(4, TType.I64, 'numDVs', None, None, ), # 4
)
def __init__(self, maxColLen=None, avgColLen=None, numNulls=None, numDVs=None,):
self.maxColLen = maxColLen
self.avgColLen = avgColLen
self.numNulls = numNulls
self.numDVs = numDVs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.maxColLen = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.DOUBLE:
self.avgColLen = iprot.readDouble();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.numNulls = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.numDVs = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('StringColumnStatsData')
if self.maxColLen is not None:
oprot.writeFieldBegin('maxColLen', TType.I64, 1)
oprot.writeI64(self.maxColLen)
oprot.writeFieldEnd()
if self.avgColLen is not None:
oprot.writeFieldBegin('avgColLen', TType.DOUBLE, 2)
oprot.writeDouble(self.avgColLen)
oprot.writeFieldEnd()
if self.numNulls is not None:
oprot.writeFieldBegin('numNulls', TType.I64, 3)
oprot.writeI64(self.numNulls)
oprot.writeFieldEnd()
if self.numDVs is not None:
oprot.writeFieldBegin('numDVs', TType.I64, 4)
oprot.writeI64(self.numDVs)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.maxColLen is None:
raise TProtocol.TProtocolException(message='Required field maxColLen is unset!')
if self.avgColLen is None:
raise TProtocol.TProtocolException(message='Required field avgColLen is unset!')
if self.numNulls is None:
raise TProtocol.TProtocolException(message='Required field numNulls is unset!')
if self.numDVs is None:
raise TProtocol.TProtocolException(message='Required field numDVs is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BinaryColumnStatsData:
"""
Attributes:
- maxColLen
- avgColLen
- numNulls
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'maxColLen', None, None, ), # 1
(2, TType.DOUBLE, 'avgColLen', None, None, ), # 2
(3, TType.I64, 'numNulls', None, None, ), # 3
)
def __init__(self, maxColLen=None, avgColLen=None, numNulls=None,):
self.maxColLen = maxColLen
self.avgColLen = avgColLen
self.numNulls = numNulls
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.maxColLen = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.DOUBLE:
self.avgColLen = iprot.readDouble();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.numNulls = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('BinaryColumnStatsData')
if self.maxColLen is not None:
oprot.writeFieldBegin('maxColLen', TType.I64, 1)
oprot.writeI64(self.maxColLen)
oprot.writeFieldEnd()
if self.avgColLen is not None:
oprot.writeFieldBegin('avgColLen', TType.DOUBLE, 2)
oprot.writeDouble(self.avgColLen)
oprot.writeFieldEnd()
if self.numNulls is not None:
oprot.writeFieldBegin('numNulls', TType.I64, 3)
oprot.writeI64(self.numNulls)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.maxColLen is None:
raise TProtocol.TProtocolException(message='Required field maxColLen is unset!')
if self.avgColLen is None:
raise TProtocol.TProtocolException(message='Required field avgColLen is unset!')
if self.numNulls is None:
raise TProtocol.TProtocolException(message='Required field numNulls is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Decimal:
"""
Attributes:
- unscaled
- scale
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'unscaled', None, None, ), # 1
None, # 2
(3, TType.I16, 'scale', None, None, ), # 3
)
def __init__(self, unscaled=None, scale=None,):
self.unscaled = unscaled
self.scale = scale
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.unscaled = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I16:
self.scale = iprot.readI16();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Decimal')
if self.unscaled is not None:
oprot.writeFieldBegin('unscaled', TType.STRING, 1)
oprot.writeString(self.unscaled)
oprot.writeFieldEnd()
if self.scale is not None:
oprot.writeFieldBegin('scale', TType.I16, 3)
oprot.writeI16(self.scale)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.unscaled is None:
raise TProtocol.TProtocolException(message='Required field unscaled is unset!')
if self.scale is None:
raise TProtocol.TProtocolException(message='Required field scale is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DecimalColumnStatsData:
"""
Attributes:
- lowValue
- highValue
- numNulls
- numDVs
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'lowValue', (Decimal, Decimal.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'highValue', (Decimal, Decimal.thrift_spec), None, ), # 2
(3, TType.I64, 'numNulls', None, None, ), # 3
(4, TType.I64, 'numDVs', None, None, ), # 4
)
def __init__(self, lowValue=None, highValue=None, numNulls=None, numDVs=None,):
self.lowValue = lowValue
self.highValue = highValue
self.numNulls = numNulls
self.numDVs = numDVs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.lowValue = Decimal()
self.lowValue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.highValue = Decimal()
self.highValue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.numNulls = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.numDVs = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DecimalColumnStatsData')
if self.lowValue is not None:
oprot.writeFieldBegin('lowValue', TType.STRUCT, 1)
self.lowValue.write(oprot)
oprot.writeFieldEnd()
if self.highValue is not None:
oprot.writeFieldBegin('highValue', TType.STRUCT, 2)
self.highValue.write(oprot)
oprot.writeFieldEnd()
if self.numNulls is not None:
oprot.writeFieldBegin('numNulls', TType.I64, 3)
oprot.writeI64(self.numNulls)
oprot.writeFieldEnd()
if self.numDVs is not None:
oprot.writeFieldBegin('numDVs', TType.I64, 4)
oprot.writeI64(self.numDVs)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.numNulls is None:
raise TProtocol.TProtocolException(message='Required field numNulls is unset!')
if self.numDVs is None:
raise TProtocol.TProtocolException(message='Required field numDVs is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ColumnStatisticsData:
"""
Attributes:
- booleanStats
- longStats
- doubleStats
- stringStats
- binaryStats
- decimalStats
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'booleanStats', (BooleanColumnStatsData, BooleanColumnStatsData.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'longStats', (LongColumnStatsData, LongColumnStatsData.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'doubleStats', (DoubleColumnStatsData, DoubleColumnStatsData.thrift_spec), None, ), # 3
(4, TType.STRUCT, 'stringStats', (StringColumnStatsData, StringColumnStatsData.thrift_spec), None, ), # 4
(5, TType.STRUCT, 'binaryStats', (BinaryColumnStatsData, BinaryColumnStatsData.thrift_spec), None, ), # 5
(6, TType.STRUCT, 'decimalStats', (DecimalColumnStatsData, DecimalColumnStatsData.thrift_spec), None, ), # 6
)
def __init__(self, booleanStats=None, longStats=None, doubleStats=None, stringStats=None, binaryStats=None, decimalStats=None,):
self.booleanStats = booleanStats
self.longStats = longStats
self.doubleStats = doubleStats
self.stringStats = stringStats
self.binaryStats = binaryStats
self.decimalStats = decimalStats
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.booleanStats = BooleanColumnStatsData()
self.booleanStats.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.longStats = LongColumnStatsData()
self.longStats.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.doubleStats = DoubleColumnStatsData()
self.doubleStats.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.stringStats = StringColumnStatsData()
self.stringStats.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.binaryStats = BinaryColumnStatsData()
self.binaryStats.read(iprot)
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.decimalStats = DecimalColumnStatsData()
self.decimalStats.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ColumnStatisticsData')
if self.booleanStats is not None:
oprot.writeFieldBegin('booleanStats', TType.STRUCT, 1)
self.booleanStats.write(oprot)
oprot.writeFieldEnd()
if self.longStats is not None:
oprot.writeFieldBegin('longStats', TType.STRUCT, 2)
self.longStats.write(oprot)
oprot.writeFieldEnd()
if self.doubleStats is not None:
oprot.writeFieldBegin('doubleStats', TType.STRUCT, 3)
self.doubleStats.write(oprot)
oprot.writeFieldEnd()
if self.stringStats is not None:
oprot.writeFieldBegin('stringStats', TType.STRUCT, 4)
self.stringStats.write(oprot)
oprot.writeFieldEnd()
if self.binaryStats is not None:
oprot.writeFieldBegin('binaryStats', TType.STRUCT, 5)
self.binaryStats.write(oprot)
oprot.writeFieldEnd()
if self.decimalStats is not None:
oprot.writeFieldBegin('decimalStats', TType.STRUCT, 6)
self.decimalStats.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ColumnStatisticsObj:
"""
Attributes:
- colName
- colType
- statsData
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'colName', None, None, ), # 1
(2, TType.STRING, 'colType', None, None, ), # 2
(3, TType.STRUCT, 'statsData', (ColumnStatisticsData, ColumnStatisticsData.thrift_spec), None, ), # 3
)
def __init__(self, colName=None, colType=None, statsData=None,):
self.colName = colName
self.colType = colType
self.statsData = statsData
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.colName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.colType = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.statsData = ColumnStatisticsData()
self.statsData.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ColumnStatisticsObj')
if self.colName is not None:
oprot.writeFieldBegin('colName', TType.STRING, 1)
oprot.writeString(self.colName)
oprot.writeFieldEnd()
if self.colType is not None:
oprot.writeFieldBegin('colType', TType.STRING, 2)
oprot.writeString(self.colType)
oprot.writeFieldEnd()
if self.statsData is not None:
oprot.writeFieldBegin('statsData', TType.STRUCT, 3)
self.statsData.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.colName is None:
raise TProtocol.TProtocolException(message='Required field colName is unset!')
if self.colType is None:
raise TProtocol.TProtocolException(message='Required field colType is unset!')
if self.statsData is None:
raise TProtocol.TProtocolException(message='Required field statsData is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ColumnStatisticsDesc:
"""
Attributes:
- isTblLevel
- dbName
- tableName
- partName
- lastAnalyzed
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'isTblLevel', None, None, ), # 1
(2, TType.STRING, 'dbName', None, None, ), # 2
(3, TType.STRING, 'tableName', None, None, ), # 3
(4, TType.STRING, 'partName', None, None, ), # 4
(5, TType.I64, 'lastAnalyzed', None, None, ), # 5
)
def __init__(self, isTblLevel=None, dbName=None, tableName=None, partName=None, lastAnalyzed=None,):
self.isTblLevel = isTblLevel
self.dbName = dbName
self.tableName = tableName
self.partName = partName
self.lastAnalyzed = lastAnalyzed
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.isTblLevel = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.dbName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.partName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.lastAnalyzed = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ColumnStatisticsDesc')
if self.isTblLevel is not None:
oprot.writeFieldBegin('isTblLevel', TType.BOOL, 1)
oprot.writeBool(self.isTblLevel)
oprot.writeFieldEnd()
if self.dbName is not None:
oprot.writeFieldBegin('dbName', TType.STRING, 2)
oprot.writeString(self.dbName)
oprot.writeFieldEnd()
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 3)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.partName is not None:
oprot.writeFieldBegin('partName', TType.STRING, 4)
oprot.writeString(self.partName)
oprot.writeFieldEnd()
if self.lastAnalyzed is not None:
oprot.writeFieldBegin('lastAnalyzed', TType.I64, 5)
oprot.writeI64(self.lastAnalyzed)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.isTblLevel is None:
raise TProtocol.TProtocolException(message='Required field isTblLevel is unset!')
if self.dbName is None:
raise TProtocol.TProtocolException(message='Required field dbName is unset!')
if self.tableName is None:
raise TProtocol.TProtocolException(message='Required field tableName is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ColumnStatistics:
"""
Attributes:
- statsDesc
- statsObj
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'statsDesc', (ColumnStatisticsDesc, ColumnStatisticsDesc.thrift_spec), None, ), # 1
(2, TType.LIST, 'statsObj', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 2
)
def __init__(self, statsDesc=None, statsObj=None,):
self.statsDesc = statsDesc
self.statsObj = statsObj
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.statsDesc = ColumnStatisticsDesc()
self.statsDesc.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.statsObj = []
(_etype242, _size239) = iprot.readListBegin()
for _i243 in xrange(_size239):
_elem244 = ColumnStatisticsObj()
_elem244.read(iprot)
self.statsObj.append(_elem244)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ColumnStatistics')
if self.statsDesc is not None:
oprot.writeFieldBegin('statsDesc', TType.STRUCT, 1)
self.statsDesc.write(oprot)
oprot.writeFieldEnd()
if self.statsObj is not None:
oprot.writeFieldBegin('statsObj', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.statsObj))
for iter245 in self.statsObj:
iter245.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.statsDesc is None:
raise TProtocol.TProtocolException(message='Required field statsDesc is unset!')
if self.statsObj is None:
raise TProtocol.TProtocolException(message='Required field statsObj is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AggrStats:
"""
Attributes:
- colStats
- partsFound
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 1
(2, TType.I64, 'partsFound', None, None, ), # 2
)
def __init__(self, colStats=None, partsFound=None,):
self.colStats = colStats
self.partsFound = partsFound
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.colStats = []
(_etype249, _size246) = iprot.readListBegin()
for _i250 in xrange(_size246):
_elem251 = ColumnStatisticsObj()
_elem251.read(iprot)
self.colStats.append(_elem251)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.partsFound = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AggrStats')
if self.colStats is not None:
oprot.writeFieldBegin('colStats', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.colStats))
for iter252 in self.colStats:
iter252.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.partsFound is not None:
oprot.writeFieldBegin('partsFound', TType.I64, 2)
oprot.writeI64(self.partsFound)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.colStats is None:
raise TProtocol.TProtocolException(message='Required field colStats is unset!')
if self.partsFound is None:
raise TProtocol.TProtocolException(message='Required field partsFound is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SetPartitionsStatsRequest:
"""
Attributes:
- colStats
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatistics, ColumnStatistics.thrift_spec)), None, ), # 1
)
def __init__(self, colStats=None,):
self.colStats = colStats
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.colStats = []
(_etype256, _size253) = iprot.readListBegin()
for _i257 in xrange(_size253):
_elem258 = ColumnStatistics()
_elem258.read(iprot)
self.colStats.append(_elem258)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SetPartitionsStatsRequest')
if self.colStats is not None:
oprot.writeFieldBegin('colStats', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.colStats))
for iter259 in self.colStats:
iter259.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.colStats is None:
raise TProtocol.TProtocolException(message='Required field colStats is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Schema:
"""
Attributes:
- fieldSchemas
- properties
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'fieldSchemas', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 1
(2, TType.MAP, 'properties', (TType.STRING,None,TType.STRING,None), None, ), # 2
)
def __init__(self, fieldSchemas=None, properties=None,):
self.fieldSchemas = fieldSchemas
self.properties = properties
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.fieldSchemas = []
(_etype263, _size260) = iprot.readListBegin()
for _i264 in xrange(_size260):
_elem265 = FieldSchema()
_elem265.read(iprot)
self.fieldSchemas.append(_elem265)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.properties = {}
(_ktype267, _vtype268, _size266 ) = iprot.readMapBegin()
for _i270 in xrange(_size266):
_key271 = iprot.readString();
_val272 = iprot.readString();
self.properties[_key271] = _val272
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Schema')
if self.fieldSchemas is not None:
oprot.writeFieldBegin('fieldSchemas', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.fieldSchemas))
for iter273 in self.fieldSchemas:
iter273.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.properties is not None:
oprot.writeFieldBegin('properties', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
for kiter274,viter275 in self.properties.items():
oprot.writeString(kiter274)
oprot.writeString(viter275)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class EnvironmentContext:
"""
Attributes:
- properties
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'properties', (TType.STRING,None,TType.STRING,None), None, ), # 1
)
def __init__(self, properties=None,):
self.properties = properties
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.properties = {}
(_ktype277, _vtype278, _size276 ) = iprot.readMapBegin()
for _i280 in xrange(_size276):
_key281 = iprot.readString();
_val282 = iprot.readString();
self.properties[_key281] = _val282
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('EnvironmentContext')
if self.properties is not None:
oprot.writeFieldBegin('properties', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
for kiter283,viter284 in self.properties.items():
oprot.writeString(kiter283)
oprot.writeString(viter284)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PartitionsByExprResult:
"""
Attributes:
- partitions
- hasUnknownPartitions
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 1
(2, TType.BOOL, 'hasUnknownPartitions', None, None, ), # 2
)
def __init__(self, partitions=None, hasUnknownPartitions=None,):
self.partitions = partitions
self.hasUnknownPartitions = hasUnknownPartitions
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.partitions = []
(_etype288, _size285) = iprot.readListBegin()
for _i289 in xrange(_size285):
_elem290 = Partition()
_elem290.read(iprot)
self.partitions.append(_elem290)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.hasUnknownPartitions = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('PartitionsByExprResult')
if self.partitions is not None:
oprot.writeFieldBegin('partitions', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.partitions))
for iter291 in self.partitions:
iter291.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.hasUnknownPartitions is not None:
oprot.writeFieldBegin('hasUnknownPartitions', TType.BOOL, 2)
oprot.writeBool(self.hasUnknownPartitions)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.partitions is None:
raise TProtocol.TProtocolException(message='Required field partitions is unset!')
if self.hasUnknownPartitions is None:
raise TProtocol.TProtocolException(message='Required field hasUnknownPartitions is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PartitionsByExprRequest:
"""
Attributes:
- dbName
- tblName
- expr
- defaultPartitionName
- maxParts
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'dbName', None, None, ), # 1
(2, TType.STRING, 'tblName', None, None, ), # 2
(3, TType.STRING, 'expr', None, None, ), # 3
(4, TType.STRING, 'defaultPartitionName', None, None, ), # 4
(5, TType.I16, 'maxParts', None, -1, ), # 5
)
def __init__(self, dbName=None, tblName=None, expr=None, defaultPartitionName=None, maxParts=thrift_spec[5][4],):
self.dbName = dbName
self.tblName = tblName
self.expr = expr
self.defaultPartitionName = defaultPartitionName
self.maxParts = maxParts
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.dbName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.tblName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.expr = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.defaultPartitionName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I16:
self.maxParts = iprot.readI16();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('PartitionsByExprRequest')
if self.dbName is not None:
oprot.writeFieldBegin('dbName', TType.STRING, 1)
oprot.writeString(self.dbName)
oprot.writeFieldEnd()
if self.tblName is not None:
oprot.writeFieldBegin('tblName', TType.STRING, 2)
oprot.writeString(self.tblName)
oprot.writeFieldEnd()
if self.expr is not None:
oprot.writeFieldBegin('expr', TType.STRING, 3)
oprot.writeString(self.expr)
oprot.writeFieldEnd()
if self.defaultPartitionName is not None:
oprot.writeFieldBegin('defaultPartitionName', TType.STRING, 4)
oprot.writeString(self.defaultPartitionName)
oprot.writeFieldEnd()
if self.maxParts is not None:
oprot.writeFieldBegin('maxParts', TType.I16, 5)
oprot.writeI16(self.maxParts)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.dbName is None:
raise TProtocol.TProtocolException(message='Required field dbName is unset!')
if self.tblName is None:
raise TProtocol.TProtocolException(message='Required field tblName is unset!')
if self.expr is None:
raise TProtocol.TProtocolException(message='Required field expr is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TableStatsResult:
"""
Attributes:
- tableStats
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'tableStats', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 1
)
def __init__(self, tableStats=None,):
self.tableStats = tableStats
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.tableStats = []
(_etype295, _size292) = iprot.readListBegin()
for _i296 in xrange(_size292):
_elem297 = ColumnStatisticsObj()
_elem297.read(iprot)
self.tableStats.append(_elem297)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TableStatsResult')
if self.tableStats is not None:
oprot.writeFieldBegin('tableStats', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.tableStats))
for iter298 in self.tableStats:
iter298.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.tableStats is None:
raise TProtocol.TProtocolException(message='Required field tableStats is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PartitionsStatsResult:
"""
Attributes:
- partStats
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'partStats', (TType.STRING,None,TType.LIST,(TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec))), None, ), # 1
)
def __init__(self, partStats=None,):
self.partStats = partStats
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.partStats = {}
(_ktype300, _vtype301, _size299 ) = iprot.readMapBegin()
for _i303 in xrange(_size299):
_key304 = iprot.readString();
_val305 = []
(_etype309, _size306) = iprot.readListBegin()
for _i310 in xrange(_size306):
_elem311 = ColumnStatisticsObj()
_elem311.read(iprot)
_val305.append(_elem311)
iprot.readListEnd()
self.partStats[_key304] = _val305
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('PartitionsStatsResult')
if self.partStats is not None:
oprot.writeFieldBegin('partStats', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.partStats))
for kiter312,viter313 in self.partStats.items():
oprot.writeString(kiter312)
oprot.writeListBegin(TType.STRUCT, len(viter313))
for iter314 in viter313:
iter314.write(oprot)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.partStats is None:
raise TProtocol.TProtocolException(message='Required field partStats is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TableStatsRequest:
"""
Attributes:
- dbName
- tblName
- colNames
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'dbName', None, None, ), # 1
(2, TType.STRING, 'tblName', None, None, ), # 2
(3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3
)
def __init__(self, dbName=None, tblName=None, colNames=None,):
self.dbName = dbName
self.tblName = tblName
self.colNames = colNames
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.dbName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.tblName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.colNames = []
(_etype318, _size315) = iprot.readListBegin()
for _i319 in xrange(_size315):
_elem320 = iprot.readString();
self.colNames.append(_elem320)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TableStatsRequest')
if self.dbName is not None:
oprot.writeFieldBegin('dbName', TType.STRING, 1)
oprot.writeString(self.dbName)
oprot.writeFieldEnd()
if self.tblName is not None:
oprot.writeFieldBegin('tblName', TType.STRING, 2)
oprot.writeString(self.tblName)
oprot.writeFieldEnd()
if self.colNames is not None:
oprot.writeFieldBegin('colNames', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.colNames))
for iter321 in self.colNames:
oprot.writeString(iter321)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.dbName is None:
raise TProtocol.TProtocolException(message='Required field dbName is unset!')
if self.tblName is None:
raise TProtocol.TProtocolException(message='Required field tblName is unset!')
if self.colNames is None:
raise TProtocol.TProtocolException(message='Required field colNames is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PartitionsStatsRequest:
"""
Attributes:
- dbName
- tblName
- colNames
- partNames
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'dbName', None, None, ), # 1
(2, TType.STRING, 'tblName', None, None, ), # 2
(3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3
(4, TType.LIST, 'partNames', (TType.STRING,None), None, ), # 4
)
def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None,):
self.dbName = dbName
self.tblName = tblName
self.colNames = colNames
self.partNames = partNames
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.dbName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.tblName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.colNames = []
(_etype325, _size322) = iprot.readListBegin()
for _i326 in xrange(_size322):
_elem327 = iprot.readString();
self.colNames.append(_elem327)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.partNames = []
(_etype331, _size328) = iprot.readListBegin()
for _i332 in xrange(_size328):
_elem333 = iprot.readString();
self.partNames.append(_elem333)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('PartitionsStatsRequest')
if self.dbName is not None:
oprot.writeFieldBegin('dbName', TType.STRING, 1)
oprot.writeString(self.dbName)
oprot.writeFieldEnd()
if self.tblName is not None:
oprot.writeFieldBegin('tblName', TType.STRING, 2)
oprot.writeString(self.tblName)
oprot.writeFieldEnd()
if self.colNames is not None:
oprot.writeFieldBegin('colNames', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.colNames))
for iter334 in self.colNames:
oprot.writeString(iter334)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.partNames is not None:
oprot.writeFieldBegin('partNames', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.partNames))
for iter335 in self.partNames:
oprot.writeString(iter335)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.dbName is None:
raise TProtocol.TProtocolException(message='Required field dbName is unset!')
if self.tblName is None:
raise TProtocol.TProtocolException(message='Required field tblName is unset!')
if self.colNames is None:
raise TProtocol.TProtocolException(message='Required field colNames is unset!')
if self.partNames is None:
raise TProtocol.TProtocolException(message='Required field partNames is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AddPartitionsResult:
"""
Attributes:
- partitions
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 1
)
def __init__(self, partitions=None,):
self.partitions = partitions
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.partitions = []
(_etype339, _size336) = iprot.readListBegin()
for _i340 in xrange(_size336):
_elem341 = Partition()
_elem341.read(iprot)
self.partitions.append(_elem341)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AddPartitionsResult')
if self.partitions is not None:
oprot.writeFieldBegin('partitions', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.partitions))
for iter342 in self.partitions:
iter342.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AddPartitionsRequest:
"""
Attributes:
- dbName
- tblName
- parts
- ifNotExists
- needResult
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'dbName', None, None, ), # 1
(2, TType.STRING, 'tblName', None, None, ), # 2
(3, TType.LIST, 'parts', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 3
(4, TType.BOOL, 'ifNotExists', None, None, ), # 4
(5, TType.BOOL, 'needResult', None, True, ), # 5
)
def __init__(self, dbName=None, tblName=None, parts=None, ifNotExists=None, needResult=thrift_spec[5][4],):
self.dbName = dbName
self.tblName = tblName
self.parts = parts
self.ifNotExists = ifNotExists
self.needResult = needResult
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.dbName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.tblName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.parts = []
(_etype346, _size343) = iprot.readListBegin()
for _i347 in xrange(_size343):
_elem348 = Partition()
_elem348.read(iprot)
self.parts.append(_elem348)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.ifNotExists = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.needResult = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AddPartitionsRequest')
if self.dbName is not None:
oprot.writeFieldBegin('dbName', TType.STRING, 1)
oprot.writeString(self.dbName)
oprot.writeFieldEnd()
if self.tblName is not None:
oprot.writeFieldBegin('tblName', TType.STRING, 2)
oprot.writeString(self.tblName)
oprot.writeFieldEnd()
if self.parts is not None:
oprot.writeFieldBegin('parts', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.parts))
for iter349 in self.parts:
iter349.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ifNotExists is not None:
oprot.writeFieldBegin('ifNotExists', TType.BOOL, 4)
oprot.writeBool(self.ifNotExists)
oprot.writeFieldEnd()
if self.needResult is not None:
oprot.writeFieldBegin('needResult', TType.BOOL, 5)
oprot.writeBool(self.needResult)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.dbName is None:
raise TProtocol.TProtocolException(message='Required field dbName is unset!')
if self.tblName is None:
raise TProtocol.TProtocolException(message='Required field tblName is unset!')
if self.parts is None:
raise TProtocol.TProtocolException(message='Required field parts is unset!')
if self.ifNotExists is None:
raise TProtocol.TProtocolException(message='Required field ifNotExists is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DropPartitionsResult:
"""
Attributes:
- partitions
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 1
)
def __init__(self, partitions=None,):
self.partitions = partitions
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.partitions = []
(_etype353, _size350) = iprot.readListBegin()
for _i354 in xrange(_size350):
_elem355 = Partition()
_elem355.read(iprot)
self.partitions.append(_elem355)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DropPartitionsResult')
if self.partitions is not None:
oprot.writeFieldBegin('partitions', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.partitions))
for iter356 in self.partitions:
iter356.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DropPartitionsExpr:
"""
Attributes:
- expr
- partArchiveLevel
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'expr', None, None, ), # 1
(2, TType.I32, 'partArchiveLevel', None, None, ), # 2
)
def __init__(self, expr=None, partArchiveLevel=None,):
self.expr = expr
self.partArchiveLevel = partArchiveLevel
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.expr = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.partArchiveLevel = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DropPartitionsExpr')
if self.expr is not None:
oprot.writeFieldBegin('expr', TType.STRING, 1)
oprot.writeString(self.expr)
oprot.writeFieldEnd()
if self.partArchiveLevel is not None:
oprot.writeFieldBegin('partArchiveLevel', TType.I32, 2)
oprot.writeI32(self.partArchiveLevel)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.expr is None:
raise TProtocol.TProtocolException(message='Required field expr is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RequestPartsSpec:
"""
Attributes:
- names
- exprs
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'names', (TType.STRING,None), None, ), # 1
(2, TType.LIST, 'exprs', (TType.STRUCT,(DropPartitionsExpr, DropPartitionsExpr.thrift_spec)), None, ), # 2
)
def __init__(self, names=None, exprs=None,):
self.names = names
self.exprs = exprs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.names = []
(_etype360, _size357) = iprot.readListBegin()
for _i361 in xrange(_size357):
_elem362 = iprot.readString();
self.names.append(_elem362)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.exprs = []
(_etype366, _size363) = iprot.readListBegin()
for _i367 in xrange(_size363):
_elem368 = DropPartitionsExpr()
_elem368.read(iprot)
self.exprs.append(_elem368)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RequestPartsSpec')
if self.names is not None:
oprot.writeFieldBegin('names', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.names))
for iter369 in self.names:
oprot.writeString(iter369)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.exprs is not None:
oprot.writeFieldBegin('exprs', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.exprs))
for iter370 in self.exprs:
iter370.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DropPartitionsRequest:
"""
Attributes:
- dbName
- tblName
- parts
- deleteData
- ifExists
- ignoreProtection
- environmentContext
- needResult
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'dbName', None, None, ), # 1
(2, TType.STRING, 'tblName', None, None, ), # 2
(3, TType.STRUCT, 'parts', (RequestPartsSpec, RequestPartsSpec.thrift_spec), None, ), # 3
(4, TType.BOOL, 'deleteData', None, None, ), # 4
(5, TType.BOOL, 'ifExists', None, True, ), # 5
(6, TType.BOOL, 'ignoreProtection', None, None, ), # 6
(7, TType.STRUCT, 'environmentContext', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 7
(8, TType.BOOL, 'needResult', None, True, ), # 8
)
def __init__(self, dbName=None, tblName=None, parts=None, deleteData=None, ifExists=thrift_spec[5][4], ignoreProtection=None, environmentContext=None, needResult=thrift_spec[8][4],):
self.dbName = dbName
self.tblName = tblName
self.parts = parts
self.deleteData = deleteData
self.ifExists = ifExists
self.ignoreProtection = ignoreProtection
self.environmentContext = environmentContext
self.needResult = needResult
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.dbName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.tblName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.parts = RequestPartsSpec()
self.parts.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.deleteData = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.ifExists = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.ignoreProtection = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRUCT:
self.environmentContext = EnvironmentContext()
self.environmentContext.read(iprot)
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.needResult = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DropPartitionsRequest')
if self.dbName is not None:
oprot.writeFieldBegin('dbName', TType.STRING, 1)
oprot.writeString(self.dbName)
oprot.writeFieldEnd()
if self.tblName is not None:
oprot.writeFieldBegin('tblName', TType.STRING, 2)
oprot.writeString(self.tblName)
oprot.writeFieldEnd()
if self.parts is not None:
oprot.writeFieldBegin('parts', TType.STRUCT, 3)
self.parts.write(oprot)
oprot.writeFieldEnd()
if self.deleteData is not None:
oprot.writeFieldBegin('deleteData', TType.BOOL, 4)
oprot.writeBool(self.deleteData)
oprot.writeFieldEnd()
if self.ifExists is not None:
oprot.writeFieldBegin('ifExists', TType.BOOL, 5)
oprot.writeBool(self.ifExists)
oprot.writeFieldEnd()
if self.ignoreProtection is not None:
oprot.writeFieldBegin('ignoreProtection', TType.BOOL, 6)
oprot.writeBool(self.ignoreProtection)
oprot.writeFieldEnd()
if self.environmentContext is not None:
oprot.writeFieldBegin('environmentContext', TType.STRUCT, 7)
self.environmentContext.write(oprot)
oprot.writeFieldEnd()
if self.needResult is not None:
oprot.writeFieldBegin('needResult', TType.BOOL, 8)
oprot.writeBool(self.needResult)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.dbName is None:
raise TProtocol.TProtocolException(message='Required field dbName is unset!')
if self.tblName is None:
raise TProtocol.TProtocolException(message='Required field tblName is unset!')
if self.parts is None:
raise TProtocol.TProtocolException(message='Required field parts is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ResourceUri:
"""
Attributes:
- resourceType
- uri
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'resourceType', None, None, ), # 1
(2, TType.STRING, 'uri', None, None, ), # 2
)
def __init__(self, resourceType=None, uri=None,):
self.resourceType = resourceType
self.uri = uri
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.resourceType = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.uri = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ResourceUri')
if self.resourceType is not None:
oprot.writeFieldBegin('resourceType', TType.I32, 1)
oprot.writeI32(self.resourceType)
oprot.writeFieldEnd()
if self.uri is not None:
oprot.writeFieldBegin('uri', TType.STRING, 2)
oprot.writeString(self.uri)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Function:
"""
Attributes:
- functionName
- dbName
- className
- ownerName
- ownerType
- createTime
- functionType
- resourceUris
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'functionName', None, None, ), # 1
(2, TType.STRING, 'dbName', None, None, ), # 2
(3, TType.STRING, 'className', None, None, ), # 3
(4, TType.STRING, 'ownerName', None, None, ), # 4
(5, TType.I32, 'ownerType', None, None, ), # 5
(6, TType.I32, 'createTime', None, None, ), # 6
(7, TType.I32, 'functionType', None, None, ), # 7
(8, TType.LIST, 'resourceUris', (TType.STRUCT,(ResourceUri, ResourceUri.thrift_spec)), None, ), # 8
)
def __init__(self, functionName=None, dbName=None, className=None, ownerName=None, ownerType=None, createTime=None, functionType=None, resourceUris=None,):
self.functionName = functionName
self.dbName = dbName
self.className = className
self.ownerName = ownerName
self.ownerType = ownerType
self.createTime = createTime
self.functionType = functionType
self.resourceUris = resourceUris
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.functionName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.dbName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.className = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.ownerName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.ownerType = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.createTime = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.functionType = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.LIST:
self.resourceUris = []
(_etype374, _size371) = iprot.readListBegin()
for _i375 in xrange(_size371):
_elem376 = ResourceUri()
_elem376.read(iprot)
self.resourceUris.append(_elem376)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Function')
if self.functionName is not None:
oprot.writeFieldBegin('functionName', TType.STRING, 1)
oprot.writeString(self.functionName)
oprot.writeFieldEnd()
if self.dbName is not None:
oprot.writeFieldBegin('dbName', TType.STRING, 2)
oprot.writeString(self.dbName)
oprot.writeFieldEnd()
if self.className is not None:
oprot.writeFieldBegin('className', TType.STRING, 3)
oprot.writeString(self.className)
oprot.writeFieldEnd()
if self.ownerName is not None:
oprot.writeFieldBegin('ownerName', TType.STRING, 4)
oprot.writeString(self.ownerName)
oprot.writeFieldEnd()
if self.ownerType is not None:
oprot.writeFieldBegin('ownerType', TType.I32, 5)
oprot.writeI32(self.ownerType)
oprot.writeFieldEnd()
if self.createTime is not None:
oprot.writeFieldBegin('createTime', TType.I32, 6)
oprot.writeI32(self.createTime)
oprot.writeFieldEnd()
if self.functionType is not None:
oprot.writeFieldBegin('functionType', TType.I32, 7)
oprot.writeI32(self.functionType)
oprot.writeFieldEnd()
if self.resourceUris is not None:
oprot.writeFieldBegin('resourceUris', TType.LIST, 8)
oprot.writeListBegin(TType.STRUCT, len(self.resourceUris))
for iter377 in self.resourceUris:
iter377.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TxnInfo:
"""
Attributes:
- id
- state
- user
- hostname
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'id', None, None, ), # 1
(2, TType.I32, 'state', None, None, ), # 2
(3, TType.STRING, 'user', None, None, ), # 3
(4, TType.STRING, 'hostname', None, None, ), # 4
)
def __init__(self, id=None, state=None, user=None, hostname=None,):
self.id = id
self.state = state
self.user = user
self.hostname = hostname
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.id = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.state = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.user = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.hostname = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TxnInfo')
if self.id is not None:
oprot.writeFieldBegin('id', TType.I64, 1)
oprot.writeI64(self.id)
oprot.writeFieldEnd()
if self.state is not None:
oprot.writeFieldBegin('state', TType.I32, 2)
oprot.writeI32(self.state)
oprot.writeFieldEnd()
if self.user is not None:
oprot.writeFieldBegin('user', TType.STRING, 3)
oprot.writeString(self.user)
oprot.writeFieldEnd()
if self.hostname is not None:
oprot.writeFieldBegin('hostname', TType.STRING, 4)
oprot.writeString(self.hostname)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
if self.state is None:
raise TProtocol.TProtocolException(message='Required field state is unset!')
if self.user is None:
raise TProtocol.TProtocolException(message='Required field user is unset!')
if self.hostname is None:
raise TProtocol.TProtocolException(message='Required field hostname is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetOpenTxnsInfoResponse:
"""
Attributes:
- txn_high_water_mark
- open_txns
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'txn_high_water_mark', None, None, ), # 1
(2, TType.LIST, 'open_txns', (TType.STRUCT,(TxnInfo, TxnInfo.thrift_spec)), None, ), # 2
)
def __init__(self, txn_high_water_mark=None, open_txns=None,):
self.txn_high_water_mark = txn_high_water_mark
self.open_txns = open_txns
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.txn_high_water_mark = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.open_txns = []
(_etype381, _size378) = iprot.readListBegin()
for _i382 in xrange(_size378):
_elem383 = TxnInfo()
_elem383.read(iprot)
self.open_txns.append(_elem383)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetOpenTxnsInfoResponse')
if self.txn_high_water_mark is not None:
oprot.writeFieldBegin('txn_high_water_mark', TType.I64, 1)
oprot.writeI64(self.txn_high_water_mark)
oprot.writeFieldEnd()
if self.open_txns is not None:
oprot.writeFieldBegin('open_txns', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.open_txns))
for iter384 in self.open_txns:
iter384.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.txn_high_water_mark is None:
raise TProtocol.TProtocolException(message='Required field txn_high_water_mark is unset!')
if self.open_txns is None:
raise TProtocol.TProtocolException(message='Required field open_txns is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetOpenTxnsResponse:
"""
Attributes:
- txn_high_water_mark
- open_txns
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'txn_high_water_mark', None, None, ), # 1
(2, TType.SET, 'open_txns', (TType.I64,None), None, ), # 2
)
def __init__(self, txn_high_water_mark=None, open_txns=None,):
self.txn_high_water_mark = txn_high_water_mark
self.open_txns = open_txns
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.txn_high_water_mark = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.SET:
self.open_txns = set()
(_etype388, _size385) = iprot.readSetBegin()
for _i389 in xrange(_size385):
_elem390 = iprot.readI64();
self.open_txns.add(_elem390)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetOpenTxnsResponse')
if self.txn_high_water_mark is not None:
oprot.writeFieldBegin('txn_high_water_mark', TType.I64, 1)
oprot.writeI64(self.txn_high_water_mark)
oprot.writeFieldEnd()
if self.open_txns is not None:
oprot.writeFieldBegin('open_txns', TType.SET, 2)
oprot.writeSetBegin(TType.I64, len(self.open_txns))
for iter391 in self.open_txns:
oprot.writeI64(iter391)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.txn_high_water_mark is None:
raise TProtocol.TProtocolException(message='Required field txn_high_water_mark is unset!')
if self.open_txns is None:
raise TProtocol.TProtocolException(message='Required field open_txns is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class OpenTxnRequest:
"""
Attributes:
- num_txns
- user
- hostname
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'num_txns', None, None, ), # 1
(2, TType.STRING, 'user', None, None, ), # 2
(3, TType.STRING, 'hostname', None, None, ), # 3
)
def __init__(self, num_txns=None, user=None, hostname=None,):
self.num_txns = num_txns
self.user = user
self.hostname = hostname
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.num_txns = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.user = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.hostname = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('OpenTxnRequest')
if self.num_txns is not None:
oprot.writeFieldBegin('num_txns', TType.I32, 1)
oprot.writeI32(self.num_txns)
oprot.writeFieldEnd()
if self.user is not None:
oprot.writeFieldBegin('user', TType.STRING, 2)
oprot.writeString(self.user)
oprot.writeFieldEnd()
if self.hostname is not None:
oprot.writeFieldBegin('hostname', TType.STRING, 3)
oprot.writeString(self.hostname)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.num_txns is None:
raise TProtocol.TProtocolException(message='Required field num_txns is unset!')
if self.user is None:
raise TProtocol.TProtocolException(message='Required field user is unset!')
if self.hostname is None:
raise TProtocol.TProtocolException(message='Required field hostname is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class OpenTxnsResponse:
"""
Attributes:
- txn_ids
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'txn_ids', (TType.I64,None), None, ), # 1
)
def __init__(self, txn_ids=None,):
self.txn_ids = txn_ids
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.txn_ids = []
(_etype395, _size392) = iprot.readListBegin()
for _i396 in xrange(_size392):
_elem397 = iprot.readI64();
self.txn_ids.append(_elem397)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('OpenTxnsResponse')
if self.txn_ids is not None:
oprot.writeFieldBegin('txn_ids', TType.LIST, 1)
oprot.writeListBegin(TType.I64, len(self.txn_ids))
for iter398 in self.txn_ids:
oprot.writeI64(iter398)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.txn_ids is None:
raise TProtocol.TProtocolException(message='Required field txn_ids is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AbortTxnRequest:
"""
Attributes:
- txnid
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'txnid', None, None, ), # 1
)
def __init__(self, txnid=None,):
self.txnid = txnid
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.txnid = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AbortTxnRequest')
if self.txnid is not None:
oprot.writeFieldBegin('txnid', TType.I64, 1)
oprot.writeI64(self.txnid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.txnid is None:
raise TProtocol.TProtocolException(message='Required field txnid is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CommitTxnRequest:
"""
Attributes:
- txnid
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'txnid', None, None, ), # 1
)
def __init__(self, txnid=None,):
self.txnid = txnid
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.txnid = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CommitTxnRequest')
if self.txnid is not None:
oprot.writeFieldBegin('txnid', TType.I64, 1)
oprot.writeI64(self.txnid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.txnid is None:
raise TProtocol.TProtocolException(message='Required field txnid is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LockComponent:
"""
Attributes:
- type
- level
- dbname
- tablename
- partitionname
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'type', None, None, ), # 1
(2, TType.I32, 'level', None, None, ), # 2
(3, TType.STRING, 'dbname', None, None, ), # 3
(4, TType.STRING, 'tablename', None, None, ), # 4
(5, TType.STRING, 'partitionname', None, None, ), # 5
)
def __init__(self, type=None, level=None, dbname=None, tablename=None, partitionname=None,):
self.type = type
self.level = level
self.dbname = dbname
self.tablename = tablename
self.partitionname = partitionname
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.type = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.level = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.dbname = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.tablename = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.partitionname = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('LockComponent')
if self.type is not None:
oprot.writeFieldBegin('type', TType.I32, 1)
oprot.writeI32(self.type)
oprot.writeFieldEnd()
if self.level is not None:
oprot.writeFieldBegin('level', TType.I32, 2)
oprot.writeI32(self.level)
oprot.writeFieldEnd()
if self.dbname is not None:
oprot.writeFieldBegin('dbname', TType.STRING, 3)
oprot.writeString(self.dbname)
oprot.writeFieldEnd()
if self.tablename is not None:
oprot.writeFieldBegin('tablename', TType.STRING, 4)
oprot.writeString(self.tablename)
oprot.writeFieldEnd()
if self.partitionname is not None:
oprot.writeFieldBegin('partitionname', TType.STRING, 5)
oprot.writeString(self.partitionname)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.type is None:
raise TProtocol.TProtocolException(message='Required field type is unset!')
if self.level is None:
raise TProtocol.TProtocolException(message='Required field level is unset!')
if self.dbname is None:
raise TProtocol.TProtocolException(message='Required field dbname is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LockRequest:
"""
Attributes:
- component
- txnid
- user
- hostname
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'component', (TType.STRUCT,(LockComponent, LockComponent.thrift_spec)), None, ), # 1
(2, TType.I64, 'txnid', None, None, ), # 2
(3, TType.STRING, 'user', None, None, ), # 3
(4, TType.STRING, 'hostname', None, None, ), # 4
)
def __init__(self, component=None, txnid=None, user=None, hostname=None,):
self.component = component
self.txnid = txnid
self.user = user
self.hostname = hostname
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.component = []
(_etype402, _size399) = iprot.readListBegin()
for _i403 in xrange(_size399):
_elem404 = LockComponent()
_elem404.read(iprot)
self.component.append(_elem404)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.txnid = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.user = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.hostname = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('LockRequest')
if self.component is not None:
oprot.writeFieldBegin('component', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.component))
for iter405 in self.component:
iter405.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.txnid is not None:
oprot.writeFieldBegin('txnid', TType.I64, 2)
oprot.writeI64(self.txnid)
oprot.writeFieldEnd()
if self.user is not None:
oprot.writeFieldBegin('user', TType.STRING, 3)
oprot.writeString(self.user)
oprot.writeFieldEnd()
if self.hostname is not None:
oprot.writeFieldBegin('hostname', TType.STRING, 4)
oprot.writeString(self.hostname)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.user is None:
raise TProtocol.TProtocolException(message='Required field user is unset!')
if self.hostname is None:
raise TProtocol.TProtocolException(message='Required field hostname is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LockResponse:
"""
Attributes:
- lockid
- state
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'lockid', None, None, ), # 1
(2, TType.I32, 'state', None, None, ), # 2
)
def __init__(self, lockid=None, state=None,):
self.lockid = lockid
self.state = state
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.lockid = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.state = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('LockResponse')
if self.lockid is not None:
oprot.writeFieldBegin('lockid', TType.I64, 1)
oprot.writeI64(self.lockid)
oprot.writeFieldEnd()
if self.state is not None:
oprot.writeFieldBegin('state', TType.I32, 2)
oprot.writeI32(self.state)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.lockid is None:
raise TProtocol.TProtocolException(message='Required field lockid is unset!')
if self.state is None:
raise TProtocol.TProtocolException(message='Required field state is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CheckLockRequest:
"""
Attributes:
- lockid
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'lockid', None, None, ), # 1
)
def __init__(self, lockid=None,):
self.lockid = lockid
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.lockid = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CheckLockRequest')
if self.lockid is not None:
oprot.writeFieldBegin('lockid', TType.I64, 1)
oprot.writeI64(self.lockid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.lockid is None:
raise TProtocol.TProtocolException(message='Required field lockid is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class UnlockRequest:
"""
Attributes:
- lockid
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'lockid', None, None, ), # 1
)
def __init__(self, lockid=None,):
self.lockid = lockid
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.lockid = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('UnlockRequest')
if self.lockid is not None:
oprot.writeFieldBegin('lockid', TType.I64, 1)
oprot.writeI64(self.lockid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.lockid is None:
raise TProtocol.TProtocolException(message='Required field lockid is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ShowLocksRequest:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ShowLocksRequest')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ShowLocksResponseElement:
"""
Attributes:
- lockid
- dbname
- tablename
- partname
- state
- type
- txnid
- lastheartbeat
- acquiredat
- user
- hostname
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'lockid', None, None, ), # 1
(2, TType.STRING, 'dbname', None, None, ), # 2
(3, TType.STRING, 'tablename', None, None, ), # 3
(4, TType.STRING, 'partname', None, None, ), # 4
(5, TType.I32, 'state', None, None, ), # 5
(6, TType.I32, 'type', None, None, ), # 6
(7, TType.I64, 'txnid', None, None, ), # 7
(8, TType.I64, 'lastheartbeat', None, None, ), # 8
(9, TType.I64, 'acquiredat', None, None, ), # 9
(10, TType.STRING, 'user', None, None, ), # 10
(11, TType.STRING, 'hostname', None, None, ), # 11
)
def __init__(self, lockid=None, dbname=None, tablename=None, partname=None, state=None, type=None, txnid=None, lastheartbeat=None, acquiredat=None, user=None, hostname=None,):
self.lockid = lockid
self.dbname = dbname
self.tablename = tablename
self.partname = partname
self.state = state
self.type = type
self.txnid = txnid
self.lastheartbeat = lastheartbeat
self.acquiredat = acquiredat
self.user = user
self.hostname = hostname
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.lockid = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.dbname = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.tablename = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.partname = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.state = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.type = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I64:
self.txnid = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I64:
self.lastheartbeat = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I64:
self.acquiredat = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.STRING:
self.user = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.STRING:
self.hostname = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ShowLocksResponseElement')
if self.lockid is not None:
oprot.writeFieldBegin('lockid', TType.I64, 1)
oprot.writeI64(self.lockid)
oprot.writeFieldEnd()
if self.dbname is not None:
oprot.writeFieldBegin('dbname', TType.STRING, 2)
oprot.writeString(self.dbname)
oprot.writeFieldEnd()
if self.tablename is not None:
oprot.writeFieldBegin('tablename', TType.STRING, 3)
oprot.writeString(self.tablename)
oprot.writeFieldEnd()
if self.partname is not None:
oprot.writeFieldBegin('partname', TType.STRING, 4)
oprot.writeString(self.partname)
oprot.writeFieldEnd()
if self.state is not None:
oprot.writeFieldBegin('state', TType.I32, 5)
oprot.writeI32(self.state)
oprot.writeFieldEnd()
if self.type is not None:
oprot.writeFieldBegin('type', TType.I32, 6)
oprot.writeI32(self.type)
oprot.writeFieldEnd()
if self.txnid is not None:
oprot.writeFieldBegin('txnid', TType.I64, 7)
oprot.writeI64(self.txnid)
oprot.writeFieldEnd()
if self.lastheartbeat is not None:
oprot.writeFieldBegin('lastheartbeat', TType.I64, 8)
oprot.writeI64(self.lastheartbeat)
oprot.writeFieldEnd()
if self.acquiredat is not None:
oprot.writeFieldBegin('acquiredat', TType.I64, 9)
oprot.writeI64(self.acquiredat)
oprot.writeFieldEnd()
if self.user is not None:
oprot.writeFieldBegin('user', TType.STRING, 10)
oprot.writeString(self.user)
oprot.writeFieldEnd()
if self.hostname is not None:
oprot.writeFieldBegin('hostname', TType.STRING, 11)
oprot.writeString(self.hostname)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.lockid is None:
raise TProtocol.TProtocolException(message='Required field lockid is unset!')
if self.dbname is None:
raise TProtocol.TProtocolException(message='Required field dbname is unset!')
if self.state is None:
raise TProtocol.TProtocolException(message='Required field state is unset!')
if self.type is None:
raise TProtocol.TProtocolException(message='Required field type is unset!')
if self.lastheartbeat is None:
raise TProtocol.TProtocolException(message='Required field lastheartbeat is unset!')
if self.user is None:
raise TProtocol.TProtocolException(message='Required field user is unset!')
if self.hostname is None:
raise TProtocol.TProtocolException(message='Required field hostname is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ShowLocksResponse:
"""
Attributes:
- locks
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'locks', (TType.STRUCT,(ShowLocksResponseElement, ShowLocksResponseElement.thrift_spec)), None, ), # 1
)
def __init__(self, locks=None,):
self.locks = locks
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.locks = []
(_etype409, _size406) = iprot.readListBegin()
for _i410 in xrange(_size406):
_elem411 = ShowLocksResponseElement()
_elem411.read(iprot)
self.locks.append(_elem411)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ShowLocksResponse')
if self.locks is not None:
oprot.writeFieldBegin('locks', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.locks))
for iter412 in self.locks:
iter412.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class HeartbeatRequest:
"""
Attributes:
- lockid
- txnid
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'lockid', None, None, ), # 1
(2, TType.I64, 'txnid', None, None, ), # 2
)
def __init__(self, lockid=None, txnid=None,):
self.lockid = lockid
self.txnid = txnid
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.lockid = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.txnid = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('HeartbeatRequest')
if self.lockid is not None:
oprot.writeFieldBegin('lockid', TType.I64, 1)
oprot.writeI64(self.lockid)
oprot.writeFieldEnd()
if self.txnid is not None:
oprot.writeFieldBegin('txnid', TType.I64, 2)
oprot.writeI64(self.txnid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class HeartbeatTxnRangeRequest:
"""
Attributes:
- min
- max
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'min', None, None, ), # 1
(2, TType.I64, 'max', None, None, ), # 2
)
def __init__(self, min=None, max=None,):
self.min = min
self.max = max
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.min = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.max = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('HeartbeatTxnRangeRequest')
if self.min is not None:
oprot.writeFieldBegin('min', TType.I64, 1)
oprot.writeI64(self.min)
oprot.writeFieldEnd()
if self.max is not None:
oprot.writeFieldBegin('max', TType.I64, 2)
oprot.writeI64(self.max)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.min is None:
raise TProtocol.TProtocolException(message='Required field min is unset!')
if self.max is None:
raise TProtocol.TProtocolException(message='Required field max is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class HeartbeatTxnRangeResponse:
"""
Attributes:
- aborted
- nosuch
"""
thrift_spec = (
None, # 0
(1, TType.SET, 'aborted', (TType.I64,None), None, ), # 1
(2, TType.SET, 'nosuch', (TType.I64,None), None, ), # 2
)
def __init__(self, aborted=None, nosuch=None,):
self.aborted = aborted
self.nosuch = nosuch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.SET:
self.aborted = set()
(_etype416, _size413) = iprot.readSetBegin()
for _i417 in xrange(_size413):
_elem418 = iprot.readI64();
self.aborted.add(_elem418)
iprot.readSetEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.SET:
self.nosuch = set()
(_etype422, _size419) = iprot.readSetBegin()
for _i423 in xrange(_size419):
_elem424 = iprot.readI64();
self.nosuch.add(_elem424)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('HeartbeatTxnRangeResponse')
if self.aborted is not None:
oprot.writeFieldBegin('aborted', TType.SET, 1)
oprot.writeSetBegin(TType.I64, len(self.aborted))
for iter425 in self.aborted:
oprot.writeI64(iter425)
oprot.writeSetEnd()
oprot.writeFieldEnd()
if self.nosuch is not None:
oprot.writeFieldBegin('nosuch', TType.SET, 2)
oprot.writeSetBegin(TType.I64, len(self.nosuch))
for iter426 in self.nosuch:
oprot.writeI64(iter426)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.aborted is None:
raise TProtocol.TProtocolException(message='Required field aborted is unset!')
if self.nosuch is None:
raise TProtocol.TProtocolException(message='Required field nosuch is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CompactionRequest:
"""
Attributes:
- dbname
- tablename
- partitionname
- type
- runas
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'dbname', None, None, ), # 1
(2, TType.STRING, 'tablename', None, None, ), # 2
(3, TType.STRING, 'partitionname', None, None, ), # 3
(4, TType.I32, 'type', None, None, ), # 4
(5, TType.STRING, 'runas', None, None, ), # 5
)
def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, runas=None,):
self.dbname = dbname
self.tablename = tablename
self.partitionname = partitionname
self.type = type
self.runas = runas
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.dbname = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.tablename = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.partitionname = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.type = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.runas = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CompactionRequest')
if self.dbname is not None:
oprot.writeFieldBegin('dbname', TType.STRING, 1)
oprot.writeString(self.dbname)
oprot.writeFieldEnd()
if self.tablename is not None:
oprot.writeFieldBegin('tablename', TType.STRING, 2)
oprot.writeString(self.tablename)
oprot.writeFieldEnd()
if self.partitionname is not None:
oprot.writeFieldBegin('partitionname', TType.STRING, 3)
oprot.writeString(self.partitionname)
oprot.writeFieldEnd()
if self.type is not None:
oprot.writeFieldBegin('type', TType.I32, 4)
oprot.writeI32(self.type)
oprot.writeFieldEnd()
if self.runas is not None:
oprot.writeFieldBegin('runas', TType.STRING, 5)
oprot.writeString(self.runas)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.dbname is None:
raise TProtocol.TProtocolException(message='Required field dbname is unset!')
if self.tablename is None:
raise TProtocol.TProtocolException(message='Required field tablename is unset!')
if self.type is None:
raise TProtocol.TProtocolException(message='Required field type is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ShowCompactRequest:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ShowCompactRequest')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ShowCompactResponseElement:
"""
Attributes:
- dbname
- tablename
- partitionname
- type
- state
- workerid
- start
- runAs
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'dbname', None, None, ), # 1
(2, TType.STRING, 'tablename', None, None, ), # 2
(3, TType.STRING, 'partitionname', None, None, ), # 3
(4, TType.I32, 'type', None, None, ), # 4
(5, TType.STRING, 'state', None, None, ), # 5
(6, TType.STRING, 'workerid', None, None, ), # 6
(7, TType.I64, 'start', None, None, ), # 7
(8, TType.STRING, 'runAs', None, None, ), # 8
)
def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, state=None, workerid=None, start=None, runAs=None,):
self.dbname = dbname
self.tablename = tablename
self.partitionname = partitionname
self.type = type
self.state = state
self.workerid = workerid
self.start = start
self.runAs = runAs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.dbname = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.tablename = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.partitionname = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.type = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.state = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.workerid = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I64:
self.start = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.runAs = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ShowCompactResponseElement')
if self.dbname is not None:
oprot.writeFieldBegin('dbname', TType.STRING, 1)
oprot.writeString(self.dbname)
oprot.writeFieldEnd()
if self.tablename is not None:
oprot.writeFieldBegin('tablename', TType.STRING, 2)
oprot.writeString(self.tablename)
oprot.writeFieldEnd()
if self.partitionname is not None:
oprot.writeFieldBegin('partitionname', TType.STRING, 3)
oprot.writeString(self.partitionname)
oprot.writeFieldEnd()
if self.type is not None:
oprot.writeFieldBegin('type', TType.I32, 4)
oprot.writeI32(self.type)
oprot.writeFieldEnd()
if self.state is not None:
oprot.writeFieldBegin('state', TType.STRING, 5)
oprot.writeString(self.state)
oprot.writeFieldEnd()
if self.workerid is not None:
oprot.writeFieldBegin('workerid', TType.STRING, 6)
oprot.writeString(self.workerid)
oprot.writeFieldEnd()
if self.start is not None:
oprot.writeFieldBegin('start', TType.I64, 7)
oprot.writeI64(self.start)
oprot.writeFieldEnd()
if self.runAs is not None:
oprot.writeFieldBegin('runAs', TType.STRING, 8)
oprot.writeString(self.runAs)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.dbname is None:
raise TProtocol.TProtocolException(message='Required field dbname is unset!')
if self.tablename is None:
raise TProtocol.TProtocolException(message='Required field tablename is unset!')
if self.type is None:
raise TProtocol.TProtocolException(message='Required field type is unset!')
if self.state is None:
raise TProtocol.TProtocolException(message='Required field state is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ShowCompactResponse:
"""
Attributes:
- compacts
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'compacts', (TType.STRUCT,(ShowCompactResponseElement, ShowCompactResponseElement.thrift_spec)), None, ), # 1
)
def __init__(self, compacts=None,):
self.compacts = compacts
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.compacts = []
(_etype430, _size427) = iprot.readListBegin()
for _i431 in xrange(_size427):
_elem432 = ShowCompactResponseElement()
_elem432.read(iprot)
self.compacts.append(_elem432)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ShowCompactResponse')
if self.compacts is not None:
oprot.writeFieldBegin('compacts', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.compacts))
for iter433 in self.compacts:
iter433.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.compacts is None:
raise TProtocol.TProtocolException(message='Required field compacts is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NotificationEventRequest:
"""
Attributes:
- lastEvent
- maxEvents
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'lastEvent', None, None, ), # 1
(2, TType.I32, 'maxEvents', None, None, ), # 2
)
def __init__(self, lastEvent=None, maxEvents=None,):
self.lastEvent = lastEvent
self.maxEvents = maxEvents
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.lastEvent = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.maxEvents = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NotificationEventRequest')
if self.lastEvent is not None:
oprot.writeFieldBegin('lastEvent', TType.I64, 1)
oprot.writeI64(self.lastEvent)
oprot.writeFieldEnd()
if self.maxEvents is not None:
oprot.writeFieldBegin('maxEvents', TType.I32, 2)
oprot.writeI32(self.maxEvents)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.lastEvent is None:
raise TProtocol.TProtocolException(message='Required field lastEvent is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NotificationEvent:
"""
Attributes:
- eventId
- eventTime
- eventType
- dbName
- tableName
- message
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'eventId', None, None, ), # 1
(2, TType.I32, 'eventTime', None, None, ), # 2
(3, TType.STRING, 'eventType', None, None, ), # 3
(4, TType.STRING, 'dbName', None, None, ), # 4
(5, TType.STRING, 'tableName', None, None, ), # 5
(6, TType.STRING, 'message', None, None, ), # 6
)
def __init__(self, eventId=None, eventTime=None, eventType=None, dbName=None, tableName=None, message=None,):
self.eventId = eventId
self.eventTime = eventTime
self.eventType = eventType
self.dbName = dbName
self.tableName = tableName
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.eventId = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.eventTime = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.eventType = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.dbName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NotificationEvent')
if self.eventId is not None:
oprot.writeFieldBegin('eventId', TType.I64, 1)
oprot.writeI64(self.eventId)
oprot.writeFieldEnd()
if self.eventTime is not None:
oprot.writeFieldBegin('eventTime', TType.I32, 2)
oprot.writeI32(self.eventTime)
oprot.writeFieldEnd()
if self.eventType is not None:
oprot.writeFieldBegin('eventType', TType.STRING, 3)
oprot.writeString(self.eventType)
oprot.writeFieldEnd()
if self.dbName is not None:
oprot.writeFieldBegin('dbName', TType.STRING, 4)
oprot.writeString(self.dbName)
oprot.writeFieldEnd()
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 5)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 6)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.eventId is None:
raise TProtocol.TProtocolException(message='Required field eventId is unset!')
if self.eventTime is None:
raise TProtocol.TProtocolException(message='Required field eventTime is unset!')
if self.eventType is None:
raise TProtocol.TProtocolException(message='Required field eventType is unset!')
if self.message is None:
raise TProtocol.TProtocolException(message='Required field message is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NotificationEventResponse:
"""
Attributes:
- events
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'events', (TType.STRUCT,(NotificationEvent, NotificationEvent.thrift_spec)), None, ), # 1
)
def __init__(self, events=None,):
self.events = events
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.events = []
(_etype437, _size434) = iprot.readListBegin()
for _i438 in xrange(_size434):
_elem439 = NotificationEvent()
_elem439.read(iprot)
self.events.append(_elem439)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NotificationEventResponse')
if self.events is not None:
oprot.writeFieldBegin('events', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.events))
for iter440 in self.events:
iter440.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.events is None:
raise TProtocol.TProtocolException(message='Required field events is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CurrentNotificationEventId:
"""
Attributes:
- eventId
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'eventId', None, None, ), # 1
)
def __init__(self, eventId=None,):
self.eventId = eventId
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.eventId = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CurrentNotificationEventId')
if self.eventId is not None:
oprot.writeFieldBegin('eventId', TType.I64, 1)
oprot.writeI64(self.eventId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.eventId is None:
raise TProtocol.TProtocolException(message='Required field eventId is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class InsertEventRequestData:
"""
Attributes:
- filesAdded
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'filesAdded', (TType.STRING,None), None, ), # 1
)
def __init__(self, filesAdded=None,):
self.filesAdded = filesAdded
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.filesAdded = []
(_etype444, _size441) = iprot.readListBegin()
for _i445 in xrange(_size441):
_elem446 = iprot.readString();
self.filesAdded.append(_elem446)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('InsertEventRequestData')
if self.filesAdded is not None:
oprot.writeFieldBegin('filesAdded', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.filesAdded))
for iter447 in self.filesAdded:
oprot.writeString(iter447)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.filesAdded is None:
raise TProtocol.TProtocolException(message='Required field filesAdded is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class FireEventRequestData:
"""
Attributes:
- insertData
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'insertData', (InsertEventRequestData, InsertEventRequestData.thrift_spec), None, ), # 1
)
def __init__(self, insertData=None,):
self.insertData = insertData
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.insertData = InsertEventRequestData()
self.insertData.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('FireEventRequestData')
if self.insertData is not None:
oprot.writeFieldBegin('insertData', TType.STRUCT, 1)
self.insertData.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class FireEventRequest:
"""
Attributes:
- successful
- data
- dbName
- tableName
- partitionVals
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'successful', None, None, ), # 1
(2, TType.STRUCT, 'data', (FireEventRequestData, FireEventRequestData.thrift_spec), None, ), # 2
(3, TType.STRING, 'dbName', None, None, ), # 3
(4, TType.STRING, 'tableName', None, None, ), # 4
(5, TType.LIST, 'partitionVals', (TType.STRING,None), None, ), # 5
)
def __init__(self, successful=None, data=None, dbName=None, tableName=None, partitionVals=None,):
self.successful = successful
self.data = data
self.dbName = dbName
self.tableName = tableName
self.partitionVals = partitionVals
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.successful = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.data = FireEventRequestData()
self.data.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.dbName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.partitionVals = []
(_etype451, _size448) = iprot.readListBegin()
for _i452 in xrange(_size448):
_elem453 = iprot.readString();
self.partitionVals.append(_elem453)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('FireEventRequest')
if self.successful is not None:
oprot.writeFieldBegin('successful', TType.BOOL, 1)
oprot.writeBool(self.successful)
oprot.writeFieldEnd()
if self.data is not None:
oprot.writeFieldBegin('data', TType.STRUCT, 2)
self.data.write(oprot)
oprot.writeFieldEnd()
if self.dbName is not None:
oprot.writeFieldBegin('dbName', TType.STRING, 3)
oprot.writeString(self.dbName)
oprot.writeFieldEnd()
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 4)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.partitionVals is not None:
oprot.writeFieldBegin('partitionVals', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.partitionVals))
for iter454 in self.partitionVals:
oprot.writeString(iter454)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.successful is None:
raise TProtocol.TProtocolException(message='Required field successful is unset!')
if self.data is None:
raise TProtocol.TProtocolException(message='Required field data is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class FireEventResponse:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('FireEventResponse')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MetaException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('MetaException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class UnknownTableException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('UnknownTableException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class UnknownDBException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('UnknownDBException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AlreadyExistsException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AlreadyExistsException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class InvalidPartitionException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('InvalidPartitionException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class UnknownPartitionException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('UnknownPartitionException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class InvalidObjectException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('InvalidObjectException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NoSuchObjectException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NoSuchObjectException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class IndexAlreadyExistsException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('IndexAlreadyExistsException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class InvalidOperationException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('InvalidOperationException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ConfigValSecurityException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ConfigValSecurityException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class InvalidInputException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('InvalidInputException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NoSuchTxnException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NoSuchTxnException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TxnAbortedException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TxnAbortedException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TxnOpenException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TxnOpenException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NoSuchLockException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NoSuchLockException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| apache-2.0 |
LPgenerator/django-robokassa | robokassa/south_migrations/0001_initial.py | 5 | 1420 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SuccessNotification'
db.create_table('robokassa_successnotification', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('InvId', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
('OutSum', self.gf('django.db.models.fields.CharField')(max_length=15)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('robokassa', ['SuccessNotification'])
def backwards(self, orm):
# Deleting model 'SuccessNotification'
db.delete_table('robokassa_successnotification')
models = {
'robokassa.successnotification': {
'InvId': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'Meta': {'object_name': 'SuccessNotification'},
'OutSum': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['robokassa'] | mit |
kgreav/cassandra | bin/cqlsh.py | 6 | 100531 | #!/bin/sh
# -*- mode: Python -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":"
# bash code here; finds a suitable python interpreter and execs this file.
# this implementation of cqlsh is compatible with both Python 3 and Python 2.7.
# prefer unqualified "python" if suitable:
python -c 'import sys; sys.exit(not (0x020700b0 < sys.hexversion))' 2>/dev/null \
&& exec python "$0" "$@"
for pyver in 3 2.7; do
which python$pyver > /dev/null 2>&1 && exec python$pyver "$0" "$@"
done
echo "No appropriate python interpreter found." >&2
exit 1
":"""
from __future__ import division, unicode_literals
import cmd
import codecs
import csv
import getpass
import optparse
import os
import platform
import sys
import traceback
import warnings
import webbrowser
from contextlib import contextmanager
from glob import glob
from uuid import UUID
if sys.version_info.major != 3 and (sys.version_info.major == 2 and sys.version_info.minor != 7):
sys.exit("\nCQL Shell supports only Python 3 or Python 2.7\n")
# see CASSANDRA-10428
if platform.python_implementation().startswith('Jython'):
sys.exit("\nCQL Shell does not run on Jython\n")
UTF8 = 'utf-8'
CP65001 = 'cp65001' # Win utf-8 variant
description = "CQL Shell for Apache Cassandra"
version = "5.0.1"
readline = None
try:
# check if tty first, cause readline doesn't check, and only cares
# about $TERM. we don't want the funky escape code stuff to be
# output if not a tty.
if sys.stdin.isatty():
import readline
except ImportError:
pass
CQL_LIB_PREFIX = 'cassandra-driver-internal-only-'
CASSANDRA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
CASSANDRA_CQL_HTML_FALLBACK = 'https://cassandra.apache.org/doc/cql3/CQL-3.2.html'
# default location of local CQL.html
if os.path.exists(CASSANDRA_PATH + '/doc/cql3/CQL.html'):
# default location of local CQL.html
CASSANDRA_CQL_HTML = 'file://' + CASSANDRA_PATH + '/doc/cql3/CQL.html'
elif os.path.exists('/usr/share/doc/cassandra/CQL.html'):
# fallback to package file
CASSANDRA_CQL_HTML = 'file:///usr/share/doc/cassandra/CQL.html'
else:
# fallback to online version
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
# On Linux, the Python webbrowser module uses the 'xdg-open' executable
# to open a file/URL. But that only works, if the current session has been
# opened from _within_ a desktop environment. I.e. 'xdg-open' will fail,
# if the session's been opened via ssh to a remote box.
#
# Use 'python' to get some information about the detected browsers.
# >>> import webbrowser
# >>> webbrowser._tryorder
# >>> webbrowser._browser
#
if len(webbrowser._tryorder) == 0:
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
elif webbrowser._tryorder[0] == 'xdg-open' and os.environ.get('XDG_DATA_DIRS', '') == '':
# only on Linux (some OS with xdg-open)
webbrowser._tryorder.remove('xdg-open')
webbrowser._tryorder.append('xdg-open')
# use bundled lib for python-cql if available. if there
# is a ../lib dir, use bundled libs there preferentially.
ZIPLIB_DIRS = [os.path.join(CASSANDRA_PATH, 'lib')]
myplatform = platform.system()
is_win = myplatform == 'Windows'
# Workaround for supporting CP65001 encoding on python < 3.3 (https://bugs.python.org/issue13216)
if is_win and sys.version_info < (3, 3):
codecs.register(lambda name: codecs.lookup(UTF8) if name == CP65001 else None)
if myplatform == 'Linux':
ZIPLIB_DIRS.append('/usr/share/cassandra/lib')
if os.environ.get('CQLSH_NO_BUNDLED', ''):
ZIPLIB_DIRS = ()
def find_zip(libprefix):
for ziplibdir in ZIPLIB_DIRS:
zips = glob(os.path.join(ziplibdir, libprefix + '*.zip'))
if zips:
return max(zips) # probably the highest version, if multiple
cql_zip = find_zip(CQL_LIB_PREFIX)
if cql_zip:
ver = os.path.splitext(os.path.basename(cql_zip))[0][len(CQL_LIB_PREFIX):]
sys.path.insert(0, os.path.join(cql_zip, 'cassandra-driver-' + ver))
third_parties = ('futures-', 'six-', 'geomet-')
for lib in third_parties:
lib_zip = find_zip(lib)
if lib_zip:
sys.path.insert(0, lib_zip)
# We cannot import six until we add its location to sys.path so the Python
# interpreter can find it. Do not move this to the top.
import six
from six.moves import configparser, input
from six import StringIO, ensure_text, ensure_str
warnings.filterwarnings("ignore", r".*blist.*")
try:
import cassandra
except ImportError as e:
sys.exit("\nPython Cassandra driver not installed, or not on PYTHONPATH.\n"
'You might try "pip install cassandra-driver".\n\n'
'Python: %s\n'
'Module load path: %r\n\n'
'Error: %s\n' % (sys.executable, sys.path, e))
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from cassandra.cqltypes import cql_typename
from cassandra.marshal import int64_unpack
from cassandra.metadata import (ColumnMetadata, KeyspaceMetadata,
TableMetadata, protect_name, protect_names)
from cassandra.policies import WhiteListRoundRobinPolicy
from cassandra.query import SimpleStatement, ordered_dict_factory, TraceUnavailable
from cassandra.util import datetime_from_timestamp
# cqlsh should run correctly when run out of a Cassandra source tree,
# out of an unpacked Cassandra tarball, and after a proper package install.
cqlshlibdir = os.path.join(CASSANDRA_PATH, 'pylib')
if os.path.isdir(cqlshlibdir):
sys.path.insert(0, cqlshlibdir)
from cqlshlib import cql3handling, cqlhandling, pylexotron, sslhandling, cqlshhandling
from cqlshlib.copyutil import ExportTask, ImportTask
from cqlshlib.displaying import (ANSI_RESET, BLUE, COLUMN_NAME_COLORS, CYAN,
RED, WHITE, FormattedValue, colorme)
from cqlshlib.formatting import (DEFAULT_DATE_FORMAT, DEFAULT_NANOTIME_FORMAT,
DEFAULT_TIMESTAMP_FORMAT, CqlType, DateTimeFormat,
format_by_type, formatter_for)
from cqlshlib.tracing import print_trace, print_trace_session
from cqlshlib.util import get_file_encoding_bomsize, trim_if_present
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 9042
DEFAULT_SSL = False
DEFAULT_CONNECT_TIMEOUT_SECONDS = 5
DEFAULT_REQUEST_TIMEOUT_SECONDS = 10
DEFAULT_FLOAT_PRECISION = 5
DEFAULT_DOUBLE_PRECISION = 5
DEFAULT_MAX_TRACE_WAIT = 10
if readline is not None and readline.__doc__ is not None and 'libedit' in readline.__doc__:
DEFAULT_COMPLETEKEY = '\t'
else:
DEFAULT_COMPLETEKEY = 'tab'
cqldocs = None
cqlruleset = None
epilog = """Connects to %(DEFAULT_HOST)s:%(DEFAULT_PORT)d by default. These
defaults can be changed by setting $CQLSH_HOST and/or $CQLSH_PORT. When a
host (and optional port number) are given on the command line, they take
precedence over any defaults.""" % globals()
parser = optparse.OptionParser(description=description, epilog=epilog,
usage="Usage: %prog [options] [host [port]]",
version='cqlsh ' + version)
parser.add_option("-C", "--color", action='store_true', dest='color',
help='Always use color output')
parser.add_option("--no-color", action='store_false', dest='color',
help='Never use color output')
parser.add_option("--browser", dest='browser', help="""The browser to use to display CQL help, where BROWSER can be:
- one of the supported browsers in https://docs.python.org/2/library/webbrowser.html.
- browser path followed by %s, example: /usr/bin/google-chrome-stable %s""")
parser.add_option('--ssl', action='store_true', help='Use SSL', default=False)
parser.add_option("-u", "--username", help="Authenticate as user.")
parser.add_option("-p", "--password", help="Authenticate using password.")
parser.add_option('-k', '--keyspace', help='Authenticate to the given keyspace.')
parser.add_option("-f", "--file", help="Execute commands from FILE, then exit")
parser.add_option('--debug', action='store_true',
help='Show additional debugging information')
parser.add_option('--coverage', action='store_true',
help='Collect coverage data')
parser.add_option("--encoding", help="Specify a non-default encoding for output."
+ " (Default: %s)" % (UTF8,))
parser.add_option("--cqlshrc", help="Specify an alternative cqlshrc file location.")
parser.add_option('--cqlversion', default=None,
help='Specify a particular CQL version, '
'by default the highest version supported by the server will be used.'
' Examples: "3.0.3", "3.1.0"')
parser.add_option("--protocol-version", type="int", default=None,
help='Specify a specific protcol version otherwise the client will default and downgrade as necessary')
parser.add_option("-e", "--execute", help='Execute the statement and quit.')
parser.add_option("--connect-timeout", default=DEFAULT_CONNECT_TIMEOUT_SECONDS, dest='connect_timeout',
help='Specify the connection timeout in seconds (default: %default seconds).')
parser.add_option("--request-timeout", default=DEFAULT_REQUEST_TIMEOUT_SECONDS, dest='request_timeout',
help='Specify the default request timeout in seconds (default: %default seconds).')
parser.add_option("-t", "--tty", action='store_true', dest='tty',
help='Force tty mode (command prompt).')
optvalues = optparse.Values()
(options, arguments) = parser.parse_args(sys.argv[1:], values=optvalues)
# BEGIN history/config definition
HISTORY_DIR = os.path.expanduser(os.path.join('~', '.cassandra'))
if hasattr(options, 'cqlshrc'):
CONFIG_FILE = options.cqlshrc
if not os.path.exists(CONFIG_FILE):
print('\nWarning: Specified cqlshrc location `%s` does not exist. Using `%s` instead.\n' % (CONFIG_FILE, HISTORY_DIR))
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
else:
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
HISTORY = os.path.join(HISTORY_DIR, 'cqlsh_history')
if not os.path.exists(HISTORY_DIR):
try:
os.mkdir(HISTORY_DIR)
except OSError:
print('\nWarning: Cannot create directory at `%s`. Command history will not be saved.\n' % HISTORY_DIR)
OLD_CONFIG_FILE = os.path.expanduser(os.path.join('~', '.cqlshrc'))
if os.path.exists(OLD_CONFIG_FILE):
if os.path.exists(CONFIG_FILE):
print('\nWarning: cqlshrc config files were found at both the old location ({0})'
+ ' and the new location ({1}), the old config file will not be migrated to the new'
+ ' location, and the new location will be used for now. You should manually'
+ ' consolidate the config files at the new location and remove the old file.'
.format(OLD_CONFIG_FILE, CONFIG_FILE))
else:
os.rename(OLD_CONFIG_FILE, CONFIG_FILE)
OLD_HISTORY = os.path.expanduser(os.path.join('~', '.cqlsh_history'))
if os.path.exists(OLD_HISTORY):
os.rename(OLD_HISTORY, HISTORY)
# END history/config definition
CQL_ERRORS = (
cassandra.AlreadyExists, cassandra.AuthenticationFailed, cassandra.CoordinationFailure,
cassandra.InvalidRequest, cassandra.Timeout, cassandra.Unauthorized, cassandra.OperationTimedOut,
cassandra.cluster.NoHostAvailable,
cassandra.connection.ConnectionBusy, cassandra.connection.ProtocolError, cassandra.connection.ConnectionException,
cassandra.protocol.ErrorMessage, cassandra.protocol.InternalError, cassandra.query.TraceUnavailable
)
debug_completion = bool(os.environ.get('CQLSH_DEBUG_COMPLETION', '') == 'YES')
class NoKeyspaceError(Exception):
pass
class KeyspaceNotFound(Exception):
pass
class ColumnFamilyNotFound(Exception):
pass
class IndexNotFound(Exception):
pass
class MaterializedViewNotFound(Exception):
pass
class ObjectNotFound(Exception):
pass
class VersionNotSupported(Exception):
pass
class UserTypeNotFound(Exception):
pass
class FunctionNotFound(Exception):
pass
class AggregateNotFound(Exception):
pass
class DecodeError(Exception):
verb = 'decode'
def __init__(self, thebytes, err, colname=None):
self.thebytes = thebytes
self.err = err
self.colname = colname
def __str__(self):
return str(self.thebytes)
def message(self):
what = 'value %r' % (self.thebytes,)
if self.colname is not None:
what = 'value %r (for column %r)' % (self.thebytes, self.colname)
return 'Failed to %s %s : %s' \
% (self.verb, what, self.err)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.message())
class FormatError(DecodeError):
verb = 'format'
def full_cql_version(ver):
while ver.count('.') < 2:
ver += '.0'
ver_parts = ver.split('-', 1) + ['']
vertuple = tuple(list(map(int, ver_parts[0].split('.'))) + [ver_parts[1]])
return ver, vertuple
def format_value(val, cqltype, encoding, addcolor=False, date_time_format=None,
float_precision=None, colormap=None, nullval=None):
if isinstance(val, DecodeError):
if addcolor:
return colorme(repr(val.thebytes), colormap, 'error')
else:
return FormattedValue(repr(val.thebytes))
return format_by_type(val, cqltype=cqltype, encoding=encoding, colormap=colormap,
addcolor=addcolor, nullval=nullval, date_time_format=date_time_format,
float_precision=float_precision)
def show_warning_without_quoting_line(message, category, filename, lineno, file=None, line=None):
if file is None:
file = sys.stderr
try:
file.write(warnings.formatwarning(message, category, filename, lineno, line=''))
except IOError:
pass
warnings.showwarning = show_warning_without_quoting_line
warnings.filterwarnings('always', category=cql3handling.UnexpectedTableStructure)
def insert_driver_hooks():
class DateOverFlowWarning(RuntimeWarning):
pass
# Native datetime types blow up outside of datetime.[MIN|MAX]_YEAR. We will fall back to an int timestamp
def deserialize_date_fallback_int(byts, protocol_version):
timestamp_ms = int64_unpack(byts)
try:
return datetime_from_timestamp(timestamp_ms / 1000.0)
except OverflowError:
warnings.warn(DateOverFlowWarning("Some timestamps are larger than Python datetime can represent. "
"Timestamps are displayed in milliseconds from epoch."))
return timestamp_ms
cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int)
if hasattr(cassandra, 'deserializers'):
del cassandra.deserializers.DesDateType
# Return cassandra.cqltypes.EMPTY instead of None for empty values
cassandra.cqltypes.CassandraType.support_empty_values = True
class Shell(cmd.Cmd):
custom_prompt = os.getenv('CQLSH_PROMPT', '')
if custom_prompt is not '':
custom_prompt += "\n"
default_prompt = custom_prompt + "cqlsh> "
continue_prompt = " ... "
keyspace_prompt = custom_prompt + "cqlsh:{}> "
keyspace_continue_prompt = "{} ... "
show_line_nums = False
debug = False
coverage = False
coveragerc_path = None
stop = False
last_hist = None
shunted_query_out = None
use_paging = True
default_page_size = 100
def __init__(self, hostname, port, color=False,
username=None, password=None, encoding=None, stdin=None, tty=True,
completekey=DEFAULT_COMPLETEKEY, browser=None, use_conn=None,
cqlver=None, keyspace=None,
tracing_enabled=False, expand_enabled=False,
display_nanotime_format=DEFAULT_NANOTIME_FORMAT,
display_timestamp_format=DEFAULT_TIMESTAMP_FORMAT,
display_date_format=DEFAULT_DATE_FORMAT,
display_float_precision=DEFAULT_FLOAT_PRECISION,
display_double_precision=DEFAULT_DOUBLE_PRECISION,
display_timezone=None,
max_trace_wait=DEFAULT_MAX_TRACE_WAIT,
ssl=False,
single_statement=None,
request_timeout=DEFAULT_REQUEST_TIMEOUT_SECONDS,
protocol_version=None,
connect_timeout=DEFAULT_CONNECT_TIMEOUT_SECONDS,
is_subshell=False):
cmd.Cmd.__init__(self, completekey=completekey)
self.hostname = hostname
self.port = port
self.auth_provider = None
if username:
if not password:
password = getpass.getpass()
self.auth_provider = PlainTextAuthProvider(username=username, password=password)
self.username = username
self.keyspace = keyspace
self.ssl = ssl
self.tracing_enabled = tracing_enabled
self.page_size = self.default_page_size
self.expand_enabled = expand_enabled
if use_conn:
self.conn = use_conn
else:
kwargs = {}
if protocol_version is not None:
kwargs['protocol_version'] = protocol_version
self.conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=cqlver,
auth_provider=self.auth_provider,
ssl_options=sslhandling.ssl_settings(hostname, CONFIG_FILE) if ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=connect_timeout,
connect_timeout=connect_timeout,
**kwargs)
self.owns_connection = not use_conn
if keyspace:
self.session = self.conn.connect(keyspace)
else:
self.session = self.conn.connect()
if browser == "":
browser = None
self.browser = browser
self.color = color
self.display_nanotime_format = display_nanotime_format
self.display_timestamp_format = display_timestamp_format
self.display_date_format = display_date_format
self.display_float_precision = display_float_precision
self.display_double_precision = display_double_precision
self.display_timezone = display_timezone
self.session.default_timeout = request_timeout
self.session.row_factory = ordered_dict_factory
self.session.default_consistency_level = cassandra.ConsistencyLevel.ONE
self.get_connection_versions()
self.set_expanded_cql_version(self.connection_versions['cql'])
self.current_keyspace = keyspace
self.max_trace_wait = max_trace_wait
self.session.max_trace_wait = max_trace_wait
self.tty = tty
self.encoding = encoding
self.check_windows_encoding()
self.output_codec = codecs.lookup(encoding)
self.statement = StringIO()
self.lineno = 1
self.in_comment = False
self.prompt = ''
if stdin is None:
stdin = sys.stdin
if tty:
self.reset_prompt()
self.report_connection()
print('Use HELP for help.')
else:
self.show_line_nums = True
self.stdin = stdin
self.query_out = sys.stdout
self.consistency_level = cassandra.ConsistencyLevel.ONE
self.serial_consistency_level = cassandra.ConsistencyLevel.SERIAL
self.empty_lines = 0
self.statement_error = False
self.single_statement = single_statement
self.is_subshell = is_subshell
@property
def is_using_utf8(self):
# utf8 encodings from https://docs.python.org/{2,3}/library/codecs.html
return self.encoding.replace('-', '_').lower() in ['utf', 'utf_8', 'u8', 'utf8', CP65001]
def check_windows_encoding(self):
if is_win and os.name == 'nt' and self.tty and \
self.is_using_utf8 and sys.stdout.encoding != CP65001:
self.printerr("\nWARNING: console codepage must be set to cp65001 "
"to support {} encoding on Windows platforms.\n"
"If you experience encoding problems, change your console"
" codepage with 'chcp 65001' before starting cqlsh.\n".format(self.encoding))
def set_expanded_cql_version(self, ver):
ver, vertuple = full_cql_version(ver)
self.cql_version = ver
self.cql_ver_tuple = vertuple
def cqlver_atleast(self, major, minor=0, patch=0):
return self.cql_ver_tuple[:3] >= (major, minor, patch)
def myformat_value(self, val, cqltype=None, **kwargs):
if isinstance(val, DecodeError):
self.decoding_errors.append(val)
try:
dtformats = DateTimeFormat(timestamp_format=self.display_timestamp_format,
date_format=self.display_date_format, nanotime_format=self.display_nanotime_format,
timezone=self.display_timezone)
precision = self.display_double_precision if cqltype is not None and cqltype.type_name == 'double' \
else self.display_float_precision
return format_value(val, cqltype=cqltype, encoding=self.output_codec.name,
addcolor=self.color, date_time_format=dtformats,
float_precision=precision, **kwargs)
except Exception as e:
err = FormatError(val, e)
self.decoding_errors.append(err)
return format_value(err, cqltype=cqltype, encoding=self.output_codec.name, addcolor=self.color)
def myformat_colname(self, name, table_meta=None):
column_colors = COLUMN_NAME_COLORS.copy()
# check column role and color appropriately
if table_meta:
if name in [col.name for col in table_meta.partition_key]:
column_colors.default_factory = lambda: RED
elif name in [col.name for col in table_meta.clustering_key]:
column_colors.default_factory = lambda: CYAN
elif name in table_meta.columns and table_meta.columns[name].is_static:
column_colors.default_factory = lambda: WHITE
return self.myformat_value(name, colormap=column_colors)
def report_connection(self):
self.show_host()
self.show_version()
def show_host(self):
print("Connected to {0} at {1}:{2}."
.format(self.applycolor(self.get_cluster_name(), BLUE),
self.hostname,
self.port))
def show_version(self):
vers = self.connection_versions.copy()
vers['shver'] = version
# system.Versions['cql'] apparently does not reflect changes with
# set_cql_version.
vers['cql'] = self.cql_version
print("[cqlsh %(shver)s | Cassandra %(build)s | CQL spec %(cql)s | Native protocol v%(protocol)s]" % vers)
def show_session(self, sessionid, partial_session=False):
print_trace_session(self, self.session, sessionid, partial_session)
def get_connection_versions(self):
result, = self.session.execute("select * from system.local where key = 'local'")
vers = {
'build': result['release_version'],
'protocol': self.conn.protocol_version,
'cql': result['cql_version'],
}
self.connection_versions = vers
def get_keyspace_names(self):
return list(map(str, list(self.conn.metadata.keyspaces.keys())))
def get_columnfamily_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return list(map(str, list(self.get_keyspace_meta(ksname).tables.keys())))
def get_materialized_view_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return list(map(str, list(self.get_keyspace_meta(ksname).views.keys())))
def get_index_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return list(map(str, list(self.get_keyspace_meta(ksname).indexes.keys())))
def get_column_names(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
layout = self.get_table_meta(ksname, cfname)
return [str(col) for col in layout.columns]
def get_usertype_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return list(self.get_keyspace_meta(ksname).user_types.keys())
def get_usertype_layout(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
ks_meta = self.get_keyspace_meta(ksname)
try:
user_type = ks_meta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type {!r} not found".format(typename))
return list(zip(user_type.field_names, user_type.field_types))
def get_userfunction_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return [f.name for f in list(self.get_keyspace_meta(ksname).functions.values())]
def get_useraggregate_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return [f.name for f in list(self.get_keyspace_meta(ksname).aggregates.values())]
def get_cluster_name(self):
return self.conn.metadata.cluster_name
def get_partitioner(self):
return self.conn.metadata.partitioner
def get_keyspace_meta(self, ksname):
if ksname in self.conn.metadata.keyspaces:
return self.conn.metadata.keyspaces[ksname]
raise KeyspaceNotFound('Keyspace %r not found.' % ksname)
def get_keyspaces(self):
return list(self.conn.metadata.keyspaces.values())
def get_ring(self, ks):
self.conn.metadata.token_map.rebuild_keyspace(ks, build_if_absent=True)
return self.conn.metadata.token_map.tokens_to_hosts_by_ks[ks]
def get_table_meta(self, ksname, tablename):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if tablename not in ksmeta.tables:
if ksname == 'system_auth' and tablename in ['roles', 'role_permissions']:
self.get_fake_auth_table_meta(ksname, tablename)
else:
raise ColumnFamilyNotFound("Column family {} not found".format(tablename))
else:
return ksmeta.tables[tablename]
def get_fake_auth_table_meta(self, ksname, tablename):
# may be using external auth implementation so internal tables
# aren't actually defined in schema. In this case, we'll fake
# them up
if tablename == 'roles':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'roles')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['is_superuser'] = ColumnMetadata(table_meta, 'is_superuser', cassandra.cqltypes.BooleanType)
table_meta.columns['can_login'] = ColumnMetadata(table_meta, 'can_login', cassandra.cqltypes.BooleanType)
elif tablename == 'role_permissions':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'role_permissions')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['resource'] = ColumnMetadata(table_meta, 'resource', cassandra.cqltypes.UTF8Type)
table_meta.columns['permission'] = ColumnMetadata(table_meta, 'permission', cassandra.cqltypes.UTF8Type)
else:
raise ColumnFamilyNotFound("Column family {} not found".format(tablename))
def get_index_meta(self, ksname, idxname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if idxname not in ksmeta.indexes:
raise IndexNotFound("Index {} not found".format(idxname))
return ksmeta.indexes[idxname]
def get_view_meta(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if viewname not in ksmeta.views:
raise MaterializedViewNotFound("Materialized view '{}' not found".format(viewname))
return ksmeta.views[viewname]
def get_object_meta(self, ks, name):
if name is None:
if ks and ks in self.conn.metadata.keyspaces:
return self.conn.metadata.keyspaces[ks]
elif self.current_keyspace is None:
raise ObjectNotFound("'{}' not found in keyspaces".format(ks))
else:
name = ks
ks = self.current_keyspace
if ks is None:
ks = self.current_keyspace
ksmeta = self.get_keyspace_meta(ks)
if name in ksmeta.tables:
return ksmeta.tables[name]
elif name in ksmeta.indexes:
return ksmeta.indexes[name]
elif name in ksmeta.views:
return ksmeta.views[name]
raise ObjectNotFound("'{}' not found in keyspace '{}'".format(name, ks))
def get_usertypes_meta(self):
data = self.session.execute("select * from system.schema_usertypes")
if not data:
return cql3handling.UserTypesMeta({})
return cql3handling.UserTypesMeta.from_layout(data)
def get_trigger_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return [trigger.name
for table in list(self.get_keyspace_meta(ksname).tables.values())
for trigger in list(table.triggers.values())]
def reset_statement(self):
self.reset_prompt()
self.statement.truncate(0)
self.statement.seek(0)
self.empty_lines = 0
def reset_prompt(self):
if self.current_keyspace is None:
self.set_prompt(self.default_prompt, True)
else:
self.set_prompt(self.keyspace_prompt.format(self.current_keyspace), True)
def set_continue_prompt(self):
if self.empty_lines >= 3:
self.set_prompt("Statements are terminated with a ';'. You can press CTRL-C to cancel an incomplete statement.")
self.empty_lines = 0
return
if self.current_keyspace is None:
self.set_prompt(self.continue_prompt)
else:
spaces = ' ' * len(str(self.current_keyspace))
self.set_prompt(self.keyspace_continue_prompt.format(spaces))
self.empty_lines = self.empty_lines + 1 if not self.lastcmd else 0
@contextmanager
def prepare_loop(self):
readline = None
if self.tty and self.completekey:
try:
import readline
except ImportError:
if is_win:
print("WARNING: pyreadline dependency missing. Install to enable tab completion.")
pass
else:
old_completer = readline.get_completer()
readline.set_completer(self.complete)
if readline.__doc__ is not None and 'libedit' in readline.__doc__:
readline.parse_and_bind("bind -e")
readline.parse_and_bind("bind '" + self.completekey + "' rl_complete")
readline.parse_and_bind("bind ^R em-inc-search-prev")
else:
readline.parse_and_bind(self.completekey + ": complete")
# start coverage collection if requested, unless in subshell
if self.coverage and not self.is_subshell:
# check for coveragerc file, write it if missing
if os.path.exists(HISTORY_DIR):
self.coveragerc_path = os.path.join(HISTORY_DIR, '.coveragerc')
covdata_path = os.path.join(HISTORY_DIR, '.coverage')
if not os.path.isfile(self.coveragerc_path):
with open(self.coveragerc_path, 'w') as f:
f.writelines(["[run]\n",
"concurrency = multiprocessing\n",
"data_file = {}\n".format(covdata_path),
"parallel = true\n"]
)
# start coverage
import coverage
self.cov = coverage.Coverage(config_file=self.coveragerc_path)
self.cov.start()
try:
yield
finally:
if readline is not None:
readline.set_completer(old_completer)
if self.coverage and not self.is_subshell:
self.stop_coverage()
def get_input_line(self, prompt=''):
if self.tty:
self.lastcmd = input(prompt)
line = self.lastcmd + '\n'
else:
self.lastcmd = self.stdin.readline()
line = self.lastcmd
if not len(line):
raise EOFError
self.lineno += 1
line = ensure_text(line)
return line
def use_stdin_reader(self, until='', prompt=''):
until += '\n'
while True:
try:
newline = self.get_input_line(prompt=prompt)
except EOFError:
return
if newline == until:
return
yield newline
def cmdloop(self):
"""
Adapted from cmd.Cmd's version, because there is literally no way with
cmd.Cmd.cmdloop() to tell the difference between "EOF" showing up in
input and an actual EOF.
"""
with self.prepare_loop():
while not self.stop:
try:
if self.single_statement:
line = self.single_statement
self.stop = True
else:
line = self.get_input_line(self.prompt)
self.statement.write(line)
if self.onecmd(self.statement.getvalue()):
self.reset_statement()
except EOFError:
self.handle_eof()
except CQL_ERRORS as cqlerr:
self.printerr(cqlerr.message)
except KeyboardInterrupt:
self.reset_statement()
print('')
def onecmd(self, statementtext):
"""
Returns true if the statement is complete and was handled (meaning it
can be reset).
"""
try:
statements, endtoken_escaped = cqlruleset.cql_split_statements(statementtext)
except pylexotron.LexingError as e:
if self.show_line_nums:
self.printerr('Invalid syntax at line {0}, char {1}'
.format(e.linenum, e.charnum))
else:
self.printerr('Invalid syntax at char {0}'.format(e.charnum))
statementline = statementtext.split('\n')[e.linenum - 1]
self.printerr(' {0}'.format(statementline))
self.printerr(' {0}^'.format(' ' * e.charnum))
return True
while statements and not statements[-1]:
statements = statements[:-1]
if not statements:
return True
if endtoken_escaped or statements[-1][-1][0] != 'endtoken':
self.set_continue_prompt()
return
for st in statements:
try:
self.handle_statement(st, statementtext)
except Exception as e:
if self.debug:
traceback.print_exc()
else:
self.printerr(e)
return True
def handle_eof(self):
if self.tty:
print('')
statement = self.statement.getvalue()
if statement.strip():
if not self.onecmd(statement):
self.printerr('Incomplete statement at end of file')
self.do_exit()
def handle_statement(self, tokens, srcstr):
# Concat multi-line statements and insert into history
if readline is not None:
nl_count = srcstr.count("\n")
new_hist = srcstr.replace("\n", " ").rstrip()
if nl_count > 1 and self.last_hist != new_hist:
readline.add_history(new_hist)
self.last_hist = new_hist
cmdword = tokens[0][1]
if cmdword == '?':
cmdword = 'help'
custom_handler = getattr(self, 'do_' + cmdword.lower(), None)
if custom_handler:
parsed = cqlruleset.cql_whole_parse_tokens(tokens, srcstr=srcstr,
startsymbol='cqlshCommand')
if parsed and not parsed.remainder:
# successful complete parse
return custom_handler(parsed)
else:
return self.handle_parse_error(cmdword, tokens, parsed, srcstr)
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
def handle_parse_error(self, cmdword, tokens, parsed, srcstr):
if cmdword.lower() in ('select', 'insert', 'update', 'delete', 'truncate',
'create', 'drop', 'alter', 'grant', 'revoke',
'batch', 'list'):
# hey, maybe they know about some new syntax we don't. type
# assumptions won't work, but maybe the query will.
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
if parsed:
self.printerr('Improper %s command (problem at %r).' % (cmdword, parsed.remainder[0]))
else:
self.printerr('Improper %s command.' % cmdword)
def do_use(self, parsed):
ksname = parsed.get_binding('ksname')
success, _ = self.perform_simple_statement(SimpleStatement(parsed.extract_orig()))
if success:
if ksname[0] == '"' and ksname[-1] == '"':
self.current_keyspace = self.cql_unprotect_name(ksname)
else:
self.current_keyspace = ksname.lower()
def do_select(self, parsed):
tracing_was_enabled = self.tracing_enabled
ksname = parsed.get_binding('ksname')
stop_tracing = ksname == 'system_traces' or (ksname is None and self.current_keyspace == 'system_traces')
self.tracing_enabled = self.tracing_enabled and not stop_tracing
statement = parsed.extract_orig()
self.perform_statement(statement)
self.tracing_enabled = tracing_was_enabled
def perform_statement(self, statement):
statement = ensure_str(statement)
stmt = SimpleStatement(statement, consistency_level=self.consistency_level, serial_consistency_level=self.serial_consistency_level, fetch_size=self.page_size if self.use_paging else None)
success, future = self.perform_simple_statement(stmt)
if future:
if future.warnings:
self.print_warnings(future.warnings)
if self.tracing_enabled:
try:
for trace in future.get_all_query_traces(max_wait_per=self.max_trace_wait, query_cl=self.consistency_level):
print_trace(self, trace)
except TraceUnavailable:
msg = "Statement trace did not complete within %d seconds; trace data may be incomplete." % (self.session.max_trace_wait,)
self.writeresult(msg, color=RED)
for trace_id in future.get_query_trace_ids():
self.show_session(trace_id, partial_session=True)
except Exception as err:
self.printerr("Unable to fetch query trace: %s" % (str(err),))
return success
def parse_for_select_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname', None))
try:
return self.get_table_meta(ks, name)
except ColumnFamilyNotFound:
try:
return self.get_view_meta(ks, name)
except MaterializedViewNotFound:
raise ObjectNotFound("'{}' not found in keyspace '{}'".format(name, ks))
def parse_for_update_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
return self.get_table_meta(ks, cf)
def perform_simple_statement(self, statement):
if not statement:
return False, None
future = self.session.execute_async(statement, trace=self.tracing_enabled)
result = None
try:
result = future.result()
except CQL_ERRORS as err:
err_msg = ensure_text(err.message if hasattr(err, 'message') else str(err))
self.printerr(str(err.__class__.__name__) + ": " + err_msg)
except Exception:
import traceback
self.printerr(traceback.format_exc())
# Even if statement failed we try to refresh schema if not agreed (see CASSANDRA-9689)
if not future.is_schema_agreed:
try:
self.conn.refresh_schema_metadata(5) # will throw exception if there is a schema mismatch
except Exception:
self.printerr("Warning: schema version mismatch detected; check the schema versions of your "
"nodes in system.local and system.peers.")
self.conn.refresh_schema_metadata(-1)
if result is None:
return False, None
if statement.query_string[:6].lower() == 'select':
self.print_result(result, self.parse_for_select_meta(statement.query_string))
elif statement.query_string.lower().startswith("list users") or statement.query_string.lower().startswith("list roles"):
self.print_result(result, self.get_table_meta('system_auth', 'roles'))
elif statement.query_string.lower().startswith("list"):
self.print_result(result, self.get_table_meta('system_auth', 'role_permissions'))
elif result:
# CAS INSERT/UPDATE
self.writeresult("")
self.print_static_result(result, self.parse_for_update_meta(statement.query_string))
self.flush_output()
return True, future
def print_result(self, result, table_meta):
self.decoding_errors = []
self.writeresult("")
if result.has_more_pages and self.tty:
num_rows = 0
while True:
if result.current_rows:
num_rows += len(result.current_rows)
self.print_static_result(result, table_meta)
if result.has_more_pages:
if self.shunted_query_out is None:
# Only pause when not capturing.
input("---MORE---")
result.fetch_next_page()
else:
break
else:
num_rows = len(result.current_rows)
self.print_static_result(result, table_meta)
self.writeresult("(%d rows)" % num_rows)
if self.decoding_errors:
for err in self.decoding_errors[:2]:
self.writeresult(err.message(), color=RED)
if len(self.decoding_errors) > 2:
self.writeresult('%d more decoding errors suppressed.'
% (len(self.decoding_errors) - 2), color=RED)
def print_static_result(self, result, table_meta):
if not result.column_names and not table_meta:
return
column_names = result.column_names or list(table_meta.columns.keys())
formatted_names = [self.myformat_colname(name, table_meta) for name in column_names]
if not result.current_rows:
# print header only
self.print_formatted_result(formatted_names, None)
return
cql_types = []
if result.column_types:
ks_name = table_meta.keyspace_name if table_meta else self.current_keyspace
ks_meta = self.conn.metadata.keyspaces.get(ks_name, None)
cql_types = [CqlType(cql_typename(t), ks_meta) for t in result.column_types]
formatted_values = [list(map(self.myformat_value, [row[c] for c in column_names], cql_types)) for row in result.current_rows]
if self.expand_enabled:
self.print_formatted_result_vertically(formatted_names, formatted_values)
else:
self.print_formatted_result(formatted_names, formatted_values)
def print_formatted_result(self, formatted_names, formatted_values):
# determine column widths
widths = [n.displaywidth for n in formatted_names]
if formatted_values is not None:
for fmtrow in formatted_values:
for num, col in enumerate(fmtrow):
widths[num] = max(widths[num], col.displaywidth)
# print header
header = ' | '.join(hdr.ljust(w, color=self.color) for (hdr, w) in zip(formatted_names, widths))
self.writeresult(' ' + header.rstrip())
self.writeresult('-%s-' % '-+-'.join('-' * w for w in widths))
# stop if there are no rows
if formatted_values is None:
self.writeresult("")
return
# print row data
for row in formatted_values:
line = ' | '.join(col.rjust(w, color=self.color) for (col, w) in zip(row, widths))
self.writeresult(' ' + line)
self.writeresult("")
def print_formatted_result_vertically(self, formatted_names, formatted_values):
max_col_width = max([n.displaywidth for n in formatted_names])
max_val_width = max([n.displaywidth for row in formatted_values for n in row])
# for each row returned, list all the column-value pairs
for row_id, row in enumerate(formatted_values):
self.writeresult("@ Row %d" % (row_id + 1))
self.writeresult('-%s-' % '-+-'.join(['-' * max_col_width, '-' * max_val_width]))
for field_id, field in enumerate(row):
column = formatted_names[field_id].ljust(max_col_width, color=self.color)
value = field.ljust(field.displaywidth, color=self.color)
self.writeresult(' ' + " | ".join([column, value]))
self.writeresult('')
def print_warnings(self, warnings):
if warnings is None or len(warnings) == 0:
return
self.writeresult('')
self.writeresult('Warnings :')
for warning in warnings:
self.writeresult(warning)
self.writeresult('')
def emptyline(self):
pass
def parseline(self, line):
# this shouldn't be needed
raise NotImplementedError
def complete(self, text, state):
if readline is None:
return
if state == 0:
try:
self.completion_matches = self.find_completions(text)
except Exception:
if debug_completion:
import traceback
traceback.print_exc()
else:
raise
try:
return self.completion_matches[state]
except IndexError:
return None
def find_completions(self, text):
curline = readline.get_line_buffer()
prevlines = self.statement.getvalue()
wholestmt = prevlines + curline
begidx = readline.get_begidx() + len(prevlines)
stuff_to_complete = wholestmt[:begidx]
return cqlruleset.cql_complete(stuff_to_complete, text, cassandra_conn=self,
debug=debug_completion, startsymbol='cqlshCommand')
def set_prompt(self, prompt, prepend_user=False):
if prepend_user and self.username:
self.prompt = "{0}@{1}".format(self.username, prompt)
return
self.prompt = prompt
def cql_unprotect_name(self, namestr):
if namestr is None:
return
return cqlruleset.dequote_name(namestr)
def cql_unprotect_value(self, valstr):
if valstr is not None:
return cqlruleset.dequote_value(valstr)
def print_recreate_keyspace(self, ksdef, out):
out.write(ksdef.export_as_string())
out.write("\n")
def print_recreate_columnfamily(self, ksname, cfname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given table.
Writes output to the given out stream.
"""
out.write(self.get_table_meta(ksname, cfname).export_as_string())
out.write("\n")
def print_recreate_index(self, ksname, idxname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given index.
Writes output to the given out stream.
"""
out.write(self.get_index_meta(ksname, idxname).export_as_string())
out.write("\n")
def print_recreate_materialized_view(self, ksname, viewname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given materialized view.
Writes output to the given out stream.
"""
out.write(self.get_view_meta(ksname, viewname).export_as_string())
out.write("\n")
def print_recreate_object(self, ks, name, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given object (ks, table or index).
Writes output to the given out stream.
"""
out.write(self.get_object_meta(ks, name).export_as_string())
out.write("\n")
def describe_keyspaces(self):
print('')
cmd.Cmd.columnize(self, protect_names(self.get_keyspace_names()))
print('')
def describe_keyspace(self, ksname):
print('')
self.print_recreate_keyspace(self.get_keyspace_meta(ksname), sys.stdout)
print('')
def describe_columnfamily(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print('')
self.print_recreate_columnfamily(ksname, cfname, sys.stdout)
print('')
def describe_index(self, ksname, idxname):
print('')
self.print_recreate_index(ksname, idxname, sys.stdout)
print('')
def describe_materialized_view(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print('')
self.print_recreate_materialized_view(ksname, viewname, sys.stdout)
print('')
def describe_object(self, ks, name):
print('')
self.print_recreate_object(ks, name, sys.stdout)
print('')
def describe_columnfamilies(self, ksname):
print('')
if ksname is None:
for k in self.get_keyspaces():
name = protect_name(k.name)
print('Keyspace %s' % (name,))
print('---------%s' % ('-' * len(name)))
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(k.name)))
print('')
else:
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(ksname)))
print('')
def describe_functions(self, ksname):
print('')
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print('Keyspace %s' % (name,))
print('---------%s' % ('-' * len(name)))
self._columnize_unicode(list(ksmeta.functions.keys()))
else:
ksmeta = self.get_keyspace_meta(ksname)
self._columnize_unicode(list(ksmeta.functions.keys()))
def describe_function(self, ksname, functionname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print('')
ksmeta = self.get_keyspace_meta(ksname)
functions = [f for f in list(ksmeta.functions.values()) if f.name == functionname]
if len(functions) == 0:
raise FunctionNotFound("User defined function {} not found".format(functionname))
print("\n\n".join(func.export_as_string() for func in functions))
print('')
def describe_aggregates(self, ksname):
print('')
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print('Keyspace %s' % (name,))
print('---------%s' % ('-' * len(name)))
self._columnize_unicode(list(ksmeta.aggregates.keys()))
else:
ksmeta = self.get_keyspace_meta(ksname)
self._columnize_unicode(list(ksmeta.aggregates.keys()))
def describe_aggregate(self, ksname, aggregatename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print('')
ksmeta = self.get_keyspace_meta(ksname)
aggregates = [f for f in list(ksmeta.aggregates.values()) if f.name == aggregatename]
if len(aggregates) == 0:
raise FunctionNotFound("User defined aggregate {} not found".format(aggregatename))
print("\n\n".join(aggr.export_as_string() for aggr in aggregates))
print('')
def describe_usertypes(self, ksname):
print('')
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print('Keyspace %s' % (name,))
print('---------%s' % ('-' * len(name)))
self._columnize_unicode(list(ksmeta.user_types.keys()), quote=True)
else:
ksmeta = self.get_keyspace_meta(ksname)
self._columnize_unicode(list(ksmeta.user_types.keys()), quote=True)
def describe_usertype(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print('')
ksmeta = self.get_keyspace_meta(ksname)
try:
usertype = ksmeta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type {} not found".format(typename))
print(usertype.export_as_string())
def _columnize_unicode(self, name_list, quote=False):
"""
Used when columnizing identifiers that may contain unicode
"""
names = [n for n in name_list]
if quote:
names = protect_names(names)
cmd.Cmd.columnize(self, names)
print('')
def describe_cluster(self):
print('\nCluster: %s' % self.get_cluster_name())
p = trim_if_present(self.get_partitioner(), 'org.apache.cassandra.dht.')
print('Partitioner: %s\n' % p)
# TODO: snitch?
# snitch = trim_if_present(self.get_snitch(), 'org.apache.cassandra.locator.')
# print 'Snitch: %s\n' % snitch
if self.current_keyspace is not None and self.current_keyspace != 'system':
print("Range ownership:")
ring = self.get_ring(self.current_keyspace)
for entry in list(ring.items()):
print(' %39s [%s]' % (str(entry[0].value), ', '.join([host.address for host in entry[1]])))
print('')
def describe_schema(self, include_system=False):
print('')
for k in self.get_keyspaces():
if include_system or k.name not in cql3handling.SYSTEM_KEYSPACES:
self.print_recreate_keyspace(k, sys.stdout)
print('')
def do_describe(self, parsed):
"""
DESCRIBE [cqlsh only]
(DESC may be used as a shorthand.)
Outputs information about the connected Cassandra cluster, or about
the data objects stored in the cluster. Use in one of the following ways:
DESCRIBE KEYSPACES
Output the names of all keyspaces.
DESCRIBE KEYSPACE [<keyspacename>]
Output CQL commands that could be used to recreate the given keyspace,
and the objects in it (such as tables, types, functions, etc.).
In some cases, as the CQL interface matures, there will be some metadata
about a keyspace that is not representable with CQL. That metadata will not be shown.
The '<keyspacename>' argument may be omitted, in which case the current
keyspace will be described.
DESCRIBE TABLES
Output the names of all tables in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TABLE [<keyspace>.]<tablename>
Output CQL commands that could be used to recreate the given table.
In some cases, as above, there may be table metadata which is not
representable and which will not be shown.
DESCRIBE INDEX <indexname>
Output the CQL command that could be used to recreate the given index.
In some cases, there may be index metadata which is not representable
and which will not be shown.
DESCRIBE MATERIALIZED VIEW <viewname>
Output the CQL command that could be used to recreate the given materialized view.
In some cases, there may be materialized view metadata which is not representable
and which will not be shown.
DESCRIBE CLUSTER
Output information about the connected Cassandra cluster, such as the
cluster name, and the partitioner and snitch in use. When you are
connected to a non-system keyspace, also shows endpoint-range
ownership information for the Cassandra ring.
DESCRIBE [FULL] SCHEMA
Output CQL commands that could be used to recreate the entire (non-system) schema.
Works as though "DESCRIBE KEYSPACE k" was invoked for each non-system keyspace
k. Use DESCRIBE FULL SCHEMA to include the system keyspaces.
DESCRIBE TYPES
Output the names of all user-defined-types in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TYPE [<keyspace>.]<type>
Output the CQL command that could be used to recreate the given user-defined-type.
DESCRIBE FUNCTIONS
Output the names of all user-defined-functions in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE FUNCTION [<keyspace>.]<function>
Output the CQL command that could be used to recreate the given user-defined-function.
DESCRIBE AGGREGATES
Output the names of all user-defined-aggregates in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE AGGREGATE [<keyspace>.]<aggregate>
Output the CQL command that could be used to recreate the given user-defined-aggregate.
DESCRIBE <objname>
Output CQL commands that could be used to recreate the entire object schema,
where object can be either a keyspace or a table or an index or a materialized
view (in this order).
"""
what = parsed.matched[1][1].lower()
if what == 'functions':
self.describe_functions(self.current_keyspace)
elif what == 'function':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
functionname = self.cql_unprotect_name(parsed.get_binding('udfname'))
self.describe_function(ksname, functionname)
elif what == 'aggregates':
self.describe_aggregates(self.current_keyspace)
elif what == 'aggregate':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
aggregatename = self.cql_unprotect_name(parsed.get_binding('udaname'))
self.describe_aggregate(ksname, aggregatename)
elif what == 'keyspaces':
self.describe_keyspaces()
elif what == 'keyspace':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', ''))
if not ksname:
ksname = self.current_keyspace
if ksname is None:
self.printerr('Not in any keyspace.')
return
self.describe_keyspace(ksname)
elif what in ('columnfamily', 'table'):
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
self.describe_columnfamily(ks, cf)
elif what == 'index':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
idx = self.cql_unprotect_name(parsed.get_binding('idxname', None))
self.describe_index(ks, idx)
elif what == 'materialized' and parsed.matched[2][1].lower() == 'view':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
mv = self.cql_unprotect_name(parsed.get_binding('mvname'))
self.describe_materialized_view(ks, mv)
elif what in ('columnfamilies', 'tables'):
self.describe_columnfamilies(self.current_keyspace)
elif what == 'types':
self.describe_usertypes(self.current_keyspace)
elif what == 'type':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
ut = self.cql_unprotect_name(parsed.get_binding('utname'))
self.describe_usertype(ks, ut)
elif what == 'cluster':
self.describe_cluster()
elif what == 'schema':
self.describe_schema(False)
elif what == 'full' and parsed.matched[2][1].lower() == 'schema':
self.describe_schema(True)
elif what:
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname'))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('idxname', None))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('mvname', None))
self.describe_object(ks, name)
do_desc = do_describe
def do_copy(self, parsed):
r"""
COPY [cqlsh only]
COPY x FROM: Imports CSV data into a Cassandra table
COPY x TO: Exports data from a Cassandra table in CSV format.
COPY <table_name> [ ( column [, ...] ) ]
FROM ( '<file_pattern_1, file_pattern_2, ... file_pattern_n>' | STDIN )
[ WITH <option>='value' [AND ...] ];
File patterns are either file names or valid python glob expressions, e.g. *.csv or folder/*.csv.
COPY <table_name> [ ( column [, ...] ) ]
TO ( '<filename>' | STDOUT )
[ WITH <option>='value' [AND ...] ];
Available common COPY options and defaults:
DELIMITER=',' - character that appears between records
QUOTE='"' - quoting character to be used to quote fields
ESCAPE='\' - character to appear before the QUOTE char when quoted
HEADER=false - whether to ignore the first line
NULL='' - string that represents a null value
DATETIMEFORMAT= - timestamp strftime format
'%Y-%m-%d %H:%M:%S%z' defaults to time_format value in cqlshrc
MAXATTEMPTS=5 - the maximum number of attempts per batch or range
REPORTFREQUENCY=0.25 - the frequency with which we display status updates in seconds
DECIMALSEP='.' - the separator for decimal values
THOUSANDSSEP='' - the separator for thousands digit groups
BOOLSTYLE='True,False' - the representation for booleans, case insensitive, specify true followed by false,
for example yes,no or 1,0
NUMPROCESSES=n - the number of worker processes, by default the number of cores minus one
capped at 16
CONFIGFILE='' - a configuration file with the same format as .cqlshrc (see the Python ConfigParser
documentation) where you can specify WITH options under the following optional
sections: [copy], [copy-to], [copy-from], [copy:ks.table], [copy-to:ks.table],
[copy-from:ks.table], where <ks> is your keyspace name and <table> is your table
name. Options are read from these sections, in the order specified
above, and command line options always override options in configuration files.
Depending on the COPY direction, only the relevant copy-from or copy-to sections
are used. If no configfile is specified then .cqlshrc is searched instead.
RATEFILE='' - an optional file where to print the output statistics
Available COPY FROM options and defaults:
CHUNKSIZE=5000 - the size of chunks passed to worker processes
INGESTRATE=100000 - an approximate ingest rate in rows per second
MINBATCHSIZE=10 - the minimum size of an import batch
MAXBATCHSIZE=20 - the maximum size of an import batch
MAXROWS=-1 - the maximum number of rows, -1 means no maximum
SKIPROWS=0 - the number of rows to skip
SKIPCOLS='' - a comma separated list of column names to skip
MAXPARSEERRORS=-1 - the maximum global number of parsing errors, -1 means no maximum
MAXINSERTERRORS=1000 - the maximum global number of insert errors, -1 means no maximum
ERRFILE='' - a file where to store all rows that could not be imported, by default this is
import_ks_table.err where <ks> is your keyspace and <table> is your table name.
PREPAREDSTATEMENTS=True - whether to use prepared statements when importing, by default True. Set this to
False if you don't mind shifting data parsing to the cluster. The cluster will also
have to compile every batch statement. For large and oversized clusters
this will result in a faster import but for smaller clusters it may generate
timeouts.
TTL=3600 - the time to live in seconds, by default data will not expire
Available COPY TO options and defaults:
ENCODING='utf8' - encoding for CSV output
PAGESIZE='1000' - the page size for fetching results
PAGETIMEOUT=10 - the page timeout in seconds for fetching results
BEGINTOKEN='' - the minimum token string to consider when exporting data
ENDTOKEN='' - the maximum token string to consider when exporting data
MAXREQUESTS=6 - the maximum number of requests each worker process can work on in parallel
MAXOUTPUTSIZE='-1' - the maximum size of the output file measured in number of lines,
beyond this maximum the output file will be split into segments,
-1 means unlimited.
FLOATPRECISION=5 - the number of digits displayed after the decimal point for cql float values
DOUBLEPRECISION=12 - the number of digits displayed after the decimal point for cql double values
When entering CSV data on STDIN, you can use the sequence "\."
on a line by itself to end the data input.
"""
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
if ks is None:
ks = self.current_keyspace
if ks is None:
raise NoKeyspaceError("Not in any keyspace.")
table = self.cql_unprotect_name(parsed.get_binding('cfname'))
columns = parsed.get_binding('colnames', None)
if columns is not None:
columns = list(map(self.cql_unprotect_name, columns))
else:
# default to all known columns
columns = self.get_column_names(ks, table)
fname = parsed.get_binding('fname', None)
if fname is not None:
fname = self.cql_unprotect_value(fname)
copyoptnames = list(map(six.text_type.lower, parsed.get_binding('optnames', ())))
copyoptvals = list(map(self.cql_unprotect_value, parsed.get_binding('optvals', ())))
opts = dict(list(zip(copyoptnames, copyoptvals)))
direction = parsed.get_binding('dir').upper()
if direction == 'FROM':
task = ImportTask(self, ks, table, columns, fname, opts, self.conn.protocol_version, CONFIG_FILE)
elif direction == 'TO':
task = ExportTask(self, ks, table, columns, fname, opts, self.conn.protocol_version, CONFIG_FILE)
else:
raise SyntaxError("Unknown direction %s" % direction)
task.run()
def do_show(self, parsed):
"""
SHOW [cqlsh only]
Displays information about the current cqlsh session. Can be called in
the following ways:
SHOW VERSION
Shows the version and build of the connected Cassandra instance, as
well as the version of the CQL spec that the connected Cassandra
instance understands.
SHOW HOST
Shows where cqlsh is currently connected.
SHOW SESSION <sessionid>
Pretty-prints the requested tracing session.
"""
showwhat = parsed.get_binding('what').lower()
if showwhat == 'version':
self.get_connection_versions()
self.show_version()
elif showwhat == 'host':
self.show_host()
elif showwhat.startswith('session'):
session_id = parsed.get_binding('sessionid').lower()
self.show_session(UUID(session_id))
else:
self.printerr('Wait, how do I show %r?' % (showwhat,))
def do_source(self, parsed):
"""
SOURCE [cqlsh only]
Executes a file containing CQL statements. Gives the output for each
statement in turn, if any, or any errors that occur along the way.
Errors do NOT abort execution of the CQL source file.
Usage:
SOURCE '<file>';
That is, the path to the file to be executed must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
See also the --file option to cqlsh.
"""
fname = parsed.get_binding('fname')
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
encoding, bom_size = get_file_encoding_bomsize(fname)
f = codecs.open(fname, 'r', encoding)
f.seek(bom_size)
except IOError as e:
self.printerr('Could not open %r: %s' % (fname, e))
return
username = self.auth_provider.username if self.auth_provider else None
password = self.auth_provider.password if self.auth_provider else None
subshell = Shell(self.hostname, self.port, color=self.color,
username=username, password=password,
encoding=self.encoding, stdin=f, tty=False, use_conn=self.conn,
cqlver=self.cql_version, keyspace=self.current_keyspace,
tracing_enabled=self.tracing_enabled,
display_nanotime_format=self.display_nanotime_format,
display_timestamp_format=self.display_timestamp_format,
display_date_format=self.display_date_format,
display_float_precision=self.display_float_precision,
display_double_precision=self.display_double_precision,
display_timezone=self.display_timezone,
max_trace_wait=self.max_trace_wait, ssl=self.ssl,
request_timeout=self.session.default_timeout,
connect_timeout=self.conn.connect_timeout,
is_subshell=True)
# duplicate coverage related settings in subshell
if self.coverage:
subshell.coverage = True
subshell.coveragerc_path = self.coveragerc_path
subshell.cmdloop()
f.close()
def do_capture(self, parsed):
"""
CAPTURE [cqlsh only]
Begins capturing command output and appending it to a specified file.
Output will not be shown at the console while it is captured.
Usage:
CAPTURE '<file>';
CAPTURE OFF;
CAPTURE;
That is, the path to the file to be appended to must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
Only query result output is captured. Errors and output from cqlsh-only
commands will still be shown in the cqlsh session.
To stop capturing output and show it in the cqlsh session again, use
CAPTURE OFF.
To inspect the current capture configuration, use CAPTURE with no
arguments.
"""
fname = parsed.get_binding('fname')
if fname is None:
if self.shunted_query_out is not None:
print("Currently capturing query output to %r." % (self.query_out.name,))
else:
print("Currently not capturing query output.")
return
if fname.upper() == 'OFF':
if self.shunted_query_out is None:
self.printerr('Not currently capturing output.')
return
self.query_out.close()
self.query_out = self.shunted_query_out
self.color = self.shunted_color
self.shunted_query_out = None
del self.shunted_color
return
if self.shunted_query_out is not None:
self.printerr('Already capturing output to %s. Use CAPTURE OFF'
' to disable.' % (self.query_out.name,))
return
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
f = open(fname, 'a')
except IOError as e:
self.printerr('Could not open %r for append: %s' % (fname, e))
return
self.shunted_query_out = self.query_out
self.shunted_color = self.color
self.query_out = f
self.color = False
print('Now capturing query output to %r.' % (fname,))
def do_tracing(self, parsed):
"""
TRACING [cqlsh]
Enables or disables request tracing.
TRACING ON
Enables tracing for all further requests.
TRACING OFF
Disables tracing.
TRACING
TRACING with no arguments shows the current tracing status.
"""
self.tracing_enabled = SwitchCommand("TRACING", "Tracing").execute(self.tracing_enabled, parsed, self.printerr)
def do_expand(self, parsed):
"""
EXPAND [cqlsh]
Enables or disables expanded (vertical) output.
EXPAND ON
Enables expanded (vertical) output.
EXPAND OFF
Disables expanded (vertical) output.
EXPAND
EXPAND with no arguments shows the current value of expand setting.
"""
self.expand_enabled = SwitchCommand("EXPAND", "Expanded output").execute(self.expand_enabled, parsed, self.printerr)
def do_consistency(self, parsed):
"""
CONSISTENCY [cqlsh only]
Overrides default consistency level (default level is ONE).
CONSISTENCY <level>
Sets consistency level for future requests.
Valid consistency levels:
ANY, ONE, TWO, THREE, QUORUM, ALL, LOCAL_ONE, LOCAL_QUORUM, EACH_QUORUM, SERIAL and LOCAL_SERIAL.
SERIAL and LOCAL_SERIAL may be used only for SELECTs; will be rejected with updates.
CONSISTENCY
CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print('Current consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.consistency_level]))
return
self.consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print('Consistency level set to %s.' % (level.upper(),))
def do_serial(self, parsed):
"""
SERIAL CONSISTENCY [cqlsh only]
Overrides serial consistency level (default level is SERIAL).
SERIAL CONSISTENCY <level>
Sets consistency level for future conditional updates.
Valid consistency levels:
SERIAL, LOCAL_SERIAL.
SERIAL CONSISTENCY
SERIAL CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print('Current serial consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.serial_consistency_level]))
return
self.serial_consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print('Serial consistency level set to %s.' % (level.upper(),))
def do_login(self, parsed):
"""
LOGIN [cqlsh only]
Changes login information without requiring restart.
LOGIN <username> (<password>)
Login using the specified username. If password is specified, it will be used
otherwise, you will be prompted to enter.
"""
username = parsed.get_binding('username')
password = parsed.get_binding('password')
if password is None:
password = getpass.getpass()
else:
password = password[1:-1]
auth_provider = PlainTextAuthProvider(username=username, password=password)
conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=self.conn.cql_version,
protocol_version=self.conn.protocol_version,
auth_provider=auth_provider,
ssl_options=self.conn.ssl_options,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=self.conn.connect_timeout,
connect_timeout=self.conn.connect_timeout)
if self.current_keyspace:
session = conn.connect(self.current_keyspace)
else:
session = conn.connect()
# Copy session properties
session.default_timeout = self.session.default_timeout
session.row_factory = self.session.row_factory
session.default_consistency_level = self.session.default_consistency_level
session.max_trace_wait = self.session.max_trace_wait
# Update after we've connected in case we fail to authenticate
self.conn = conn
self.auth_provider = auth_provider
self.username = username
self.session = session
def do_exit(self, parsed=None):
"""
EXIT/QUIT [cqlsh only]
Exits cqlsh.
"""
self.stop = True
if self.owns_connection:
self.conn.shutdown()
do_quit = do_exit
def do_clear(self, parsed):
"""
CLEAR/CLS [cqlsh only]
Clears the console.
"""
import subprocess
subprocess.call(['clear', 'cls'][is_win], shell=True)
do_cls = do_clear
def do_debug(self, parsed):
import pdb
pdb.set_trace()
def get_help_topics(self):
topics = [t[3:] for t in dir(self) if t.startswith('do_') and getattr(self, t, None).__doc__]
for hide_from_help in ('quit',):
topics.remove(hide_from_help)
return topics
def columnize(self, slist, *a, **kw):
return cmd.Cmd.columnize(self, sorted([u.upper() for u in slist]), *a, **kw)
def do_help(self, parsed):
"""
HELP [cqlsh only]
Gives information about cqlsh commands. To see available topics,
enter "HELP" without any arguments. To see help on a topic,
use "HELP <topic>".
"""
topics = parsed.get_binding('topic', ())
if not topics:
shell_topics = [t.upper() for t in self.get_help_topics()]
self.print_topics("\nDocumented shell commands:", shell_topics, 15, 80)
cql_topics = [t.upper() for t in cqldocs.get_help_topics()]
self.print_topics("CQL help topics:", cql_topics, 15, 80)
return
for t in topics:
if t.lower() in self.get_help_topics():
doc = getattr(self, 'do_' + t.lower()).__doc__
self.stdout.write(doc + "\n")
elif t.lower() in cqldocs.get_help_topics():
urlpart = cqldocs.get_help_topic(t)
if urlpart is not None:
url = "%s#%s" % (CASSANDRA_CQL_HTML, urlpart)
if len(webbrowser._tryorder) == 0:
self.printerr("*** No browser to display CQL help. URL for help topic %s : %s" % (t, url))
elif self.browser is not None:
webbrowser.get(self.browser).open_new_tab(url)
else:
webbrowser.open_new_tab(url)
else:
self.printerr("*** No help on %s" % (t,))
def do_unicode(self, parsed):
"""
Textual input/output
When control characters, or other characters which can't be encoded
in your current locale, are found in values of 'text' or 'ascii'
types, it will be shown as a backslash escape. If color is enabled,
any such backslash escapes will be shown in a different color from
the surrounding text.
Unicode code points in your data will be output intact, if the
encoding for your locale is capable of decoding them. If you prefer
that non-ascii characters be shown with Python-style "\\uABCD"
escape sequences, invoke cqlsh with an ASCII locale (for example,
by setting the $LANG environment variable to "C").
"""
def do_paging(self, parsed):
"""
PAGING [cqlsh]
Enables or disables query paging.
PAGING ON
Enables query paging for all further queries.
PAGING OFF
Disables paging.
PAGING
PAGING with no arguments shows the current query paging status.
"""
(self.use_paging, requested_page_size) = SwitchCommandWithValue(
"PAGING", "Query paging", value_type=int).execute(self.use_paging, parsed, self.printerr)
if self.use_paging and requested_page_size is not None:
self.page_size = requested_page_size
if self.use_paging:
print(("Page size: {}".format(self.page_size)))
else:
self.page_size = self.default_page_size
def applycolor(self, text, color=None):
if not color or not self.color:
return text
return color + text + ANSI_RESET
def writeresult(self, text, color=None, newline=True, out=None):
if out is None:
out = self.query_out
# convert Exceptions, etc to text
if not isinstance(text, six.text_type):
text = "{}".format(text)
to_write = self.applycolor(text, color) + ('\n' if newline else '')
to_write = ensure_str(to_write)
out.write(to_write)
def flush_output(self):
self.query_out.flush()
def printerr(self, text, color=RED, newline=True, shownum=None):
self.statement_error = True
if shownum is None:
shownum = self.show_line_nums
if shownum:
text = '%s:%d:%s' % (self.stdin.name, self.lineno, text)
self.writeresult(text, color, newline=newline, out=sys.stderr)
def stop_coverage(self):
if self.coverage and self.cov is not None:
self.cov.stop()
self.cov.save()
self.cov = None
class SwitchCommand(object):
command = None
description = None
def __init__(self, command, desc):
self.command = command
self.description = desc
def execute(self, state, parsed, printerr):
switch = parsed.get_binding('switch')
if switch is None:
if state:
print("%s is currently enabled. Use %s OFF to disable"
% (self.description, self.command))
else:
print("%s is currently disabled. Use %s ON to enable."
% (self.description, self.command))
return state
if switch.upper() == 'ON':
if state:
printerr('%s is already enabled. Use %s OFF to disable.'
% (self.description, self.command))
return state
print('Now %s is enabled' % (self.description,))
return True
if switch.upper() == 'OFF':
if not state:
printerr('%s is not enabled.' % (self.description,))
return state
print('Disabled %s.' % (self.description,))
return False
class SwitchCommandWithValue(SwitchCommand):
"""The same as SwitchCommand except it also accepts a value in place of ON.
This returns a tuple of the form: (SWITCH_VALUE, PASSED_VALUE)
eg: PAGING 50 returns (True, 50)
PAGING OFF returns (False, None)
PAGING ON returns (True, None)
The value_type must match for the PASSED_VALUE, otherwise it will return None.
"""
def __init__(self, command, desc, value_type=int):
SwitchCommand.__init__(self, command, desc)
self.value_type = value_type
def execute(self, state, parsed, printerr):
binary_switch_value = SwitchCommand.execute(self, state, parsed, printerr)
switch = parsed.get_binding('switch')
try:
value = self.value_type(switch)
binary_switch_value = True
except (ValueError, TypeError):
value = None
return (binary_switch_value, value)
def option_with_default(cparser_getter, section, option, default=None):
try:
return cparser_getter(section, option)
except configparser.Error:
return default
def raw_option_with_default(configs, section, option, default=None):
"""
Same (almost) as option_with_default() but won't do any string interpolation.
Useful for config values that include '%' symbol, e.g. time format string.
"""
try:
return configs.get(section, option, raw=True)
except configparser.Error:
return default
def should_use_color():
if not sys.stdout.isatty():
return False
if os.environ.get('TERM', '') in ('dumb', ''):
return False
try:
import subprocess
p = subprocess.Popen(['tput', 'colors'], stdout=subprocess.PIPE)
stdout, _ = p.communicate()
if int(stdout.strip()) < 8:
return False
except (OSError, ImportError, ValueError):
# oh well, we tried. at least we know there's a $TERM and it's
# not "dumb".
pass
return True
def read_options(cmdlineargs, environment):
configs = configparser.SafeConfigParser()
configs.read(CONFIG_FILE)
rawconfigs = configparser.RawConfigParser()
rawconfigs.read(CONFIG_FILE)
optvalues = optparse.Values()
optvalues.username = option_with_default(configs.get, 'authentication', 'username')
optvalues.password = option_with_default(rawconfigs.get, 'authentication', 'password')
optvalues.keyspace = option_with_default(configs.get, 'authentication', 'keyspace')
optvalues.browser = option_with_default(configs.get, 'ui', 'browser', None)
optvalues.completekey = option_with_default(configs.get, 'ui', 'completekey',
DEFAULT_COMPLETEKEY)
optvalues.color = option_with_default(configs.getboolean, 'ui', 'color')
optvalues.time_format = raw_option_with_default(configs, 'ui', 'time_format',
DEFAULT_TIMESTAMP_FORMAT)
optvalues.nanotime_format = raw_option_with_default(configs, 'ui', 'nanotime_format',
DEFAULT_NANOTIME_FORMAT)
optvalues.date_format = raw_option_with_default(configs, 'ui', 'date_format',
DEFAULT_DATE_FORMAT)
optvalues.float_precision = option_with_default(configs.getint, 'ui', 'float_precision',
DEFAULT_FLOAT_PRECISION)
optvalues.double_precision = option_with_default(configs.getint, 'ui', 'double_precision',
DEFAULT_DOUBLE_PRECISION)
optvalues.field_size_limit = option_with_default(configs.getint, 'csv', 'field_size_limit', csv.field_size_limit())
optvalues.max_trace_wait = option_with_default(configs.getfloat, 'tracing', 'max_trace_wait',
DEFAULT_MAX_TRACE_WAIT)
optvalues.timezone = option_with_default(configs.get, 'ui', 'timezone', None)
optvalues.debug = False
optvalues.coverage = False
if 'CQLSH_COVERAGE' in environment.keys():
optvalues.coverage = True
optvalues.file = None
optvalues.ssl = option_with_default(configs.getboolean, 'connection', 'ssl', DEFAULT_SSL)
optvalues.encoding = option_with_default(configs.get, 'ui', 'encoding', UTF8)
optvalues.tty = option_with_default(configs.getboolean, 'ui', 'tty', sys.stdin.isatty())
optvalues.protocol_version = option_with_default(configs.getint, 'protocol', 'version', None)
optvalues.cqlversion = option_with_default(configs.get, 'cql', 'version', None)
optvalues.connect_timeout = option_with_default(configs.getint, 'connection', 'timeout', DEFAULT_CONNECT_TIMEOUT_SECONDS)
optvalues.request_timeout = option_with_default(configs.getint, 'connection', 'request_timeout', DEFAULT_REQUEST_TIMEOUT_SECONDS)
optvalues.execute = None
(options, arguments) = parser.parse_args(cmdlineargs, values=optvalues)
hostname = option_with_default(configs.get, 'connection', 'hostname', DEFAULT_HOST)
port = option_with_default(configs.get, 'connection', 'port', DEFAULT_PORT)
try:
options.connect_timeout = int(options.connect_timeout)
except ValueError:
parser.error('"%s" is not a valid connect timeout.' % (options.connect_timeout,))
options.connect_timeout = DEFAULT_CONNECT_TIMEOUT_SECONDS
try:
options.request_timeout = int(options.request_timeout)
except ValueError:
parser.error('"%s" is not a valid request timeout.' % (options.request_timeout,))
options.request_timeout = DEFAULT_REQUEST_TIMEOUT_SECONDS
hostname = environment.get('CQLSH_HOST', hostname)
port = environment.get('CQLSH_PORT', port)
if len(arguments) > 0:
hostname = arguments[0]
if len(arguments) > 1:
port = arguments[1]
if options.file or options.execute:
options.tty = False
if options.execute and not options.execute.endswith(';'):
options.execute += ';'
if optvalues.color in (True, False):
options.color = optvalues.color
else:
if options.file is not None:
options.color = False
else:
options.color = should_use_color()
if options.cqlversion is not None:
options.cqlversion, cqlvertup = full_cql_version(options.cqlversion)
if cqlvertup[0] < 3:
parser.error('%r is not a supported CQL version.' % options.cqlversion)
options.cqlmodule = cql3handling
try:
port = int(port)
except ValueError:
parser.error('%r is not a valid port number.' % port)
return options, hostname, port
def setup_cqlruleset(cqlmodule):
global cqlruleset
cqlruleset = cqlmodule.CqlRuleSet
cqlruleset.append_rules(cqlshhandling.cqlsh_extra_syntax_rules)
for rulename, termname, func in cqlshhandling.cqlsh_syntax_completers:
cqlruleset.completer_for(rulename, termname)(func)
cqlruleset.commands_end_with_newline.update(cqlshhandling.my_commands_ending_with_newline)
def setup_cqldocs(cqlmodule):
global cqldocs
cqldocs = cqlmodule.cqldocs
def init_history():
if readline is not None:
try:
readline.read_history_file(HISTORY)
except IOError:
pass
delims = readline.get_completer_delims()
delims.replace("'", "")
delims += '.'
readline.set_completer_delims(delims)
def save_history():
if readline is not None:
try:
readline.write_history_file(HISTORY)
except IOError:
pass
def main(options, hostname, port):
setup_cqlruleset(options.cqlmodule)
setup_cqldocs(options.cqlmodule)
init_history()
csv.field_size_limit(options.field_size_limit)
if options.file is None:
stdin = None
else:
try:
encoding, bom_size = get_file_encoding_bomsize(options.file)
stdin = codecs.open(options.file, 'r', encoding)
stdin.seek(bom_size)
except IOError as e:
sys.exit("Can't open %r: %s" % (options.file, e))
if options.debug:
sys.stderr.write("Using CQL driver: %s\n" % (cassandra,))
sys.stderr.write("Using connect timeout: %s seconds\n" % (options.connect_timeout,))
sys.stderr.write("Using '%s' encoding\n" % (options.encoding,))
sys.stderr.write("Using ssl: %s\n" % (options.ssl,))
# create timezone based on settings, environment or auto-detection
timezone = None
if options.timezone or 'TZ' in os.environ:
try:
import pytz
if options.timezone:
try:
timezone = pytz.timezone(options.timezone)
except Exception:
sys.stderr.write("Warning: could not recognize timezone '%s' specified in cqlshrc\n\n" % (options.timezone))
if 'TZ' in os.environ:
try:
timezone = pytz.timezone(os.environ['TZ'])
except Exception:
sys.stderr.write("Warning: could not recognize timezone '%s' from environment value TZ\n\n" % (os.environ['TZ']))
except ImportError:
sys.stderr.write("Warning: Timezone defined and 'pytz' module for timezone conversion not installed. Timestamps will be displayed in UTC timezone.\n\n")
# try auto-detect timezone if tzlocal is installed
if not timezone:
try:
from tzlocal import get_localzone
timezone = get_localzone()
except ImportError:
# we silently ignore and fallback to UTC unless a custom timestamp format (which likely
# does contain a TZ part) was specified
if options.time_format != DEFAULT_TIMESTAMP_FORMAT:
sys.stderr.write("Warning: custom timestamp format specified in cqlshrc, "
+ "but local timezone could not be detected.\n"
+ "Either install Python 'tzlocal' module for auto-detection "
+ "or specify client timezone in your cqlshrc.\n\n")
try:
shell = Shell(hostname,
port,
color=options.color,
username=options.username,
password=options.password,
stdin=stdin,
tty=options.tty,
completekey=options.completekey,
browser=options.browser,
protocol_version=options.protocol_version,
cqlver=options.cqlversion,
keyspace=options.keyspace,
display_timestamp_format=options.time_format,
display_nanotime_format=options.nanotime_format,
display_date_format=options.date_format,
display_float_precision=options.float_precision,
display_double_precision=options.double_precision,
display_timezone=timezone,
max_trace_wait=options.max_trace_wait,
ssl=options.ssl,
single_statement=options.execute,
request_timeout=options.request_timeout,
connect_timeout=options.connect_timeout,
encoding=options.encoding)
except KeyboardInterrupt:
sys.exit('Connection aborted.')
except CQL_ERRORS as e:
sys.exit('Connection error: %s' % (e,))
except VersionNotSupported as e:
sys.exit('Unsupported CQL version: %s' % (e,))
if options.debug:
shell.debug = True
if options.coverage:
shell.coverage = True
import signal
def handle_sighup():
shell.stop_coverage()
shell.do_exit()
signal.signal(signal.SIGHUP, handle_sighup)
shell.cmdloop()
save_history()
batch_mode = options.file or options.execute
if batch_mode and shell.statement_error:
sys.exit(2)
# always call this regardless of module name: when a sub-process is spawned
# on Windows then the module name is not __main__, see CASSANDRA-9304
insert_driver_hooks()
if __name__ == '__main__':
main(*read_options(sys.argv[1:], os.environ))
# vim: set ft=python et ts=4 sw=4 :
| apache-2.0 |
deepsrijit1105/edx-platform | lms/djangoapps/course_wiki/tests/test_access.py | 17 | 8442 | """
Tests for wiki permissions
"""
from django.contrib.auth.models import Group
from nose.plugins.attrib import attr
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.factories import InstructorFactory, StaffFactory
from wiki.models import URLPath
from course_wiki.views import get_or_create_root
from course_wiki.utils import user_is_article_course_staff, course_wiki_slug
from course_wiki import settings
class TestWikiAccessBase(ModuleStoreTestCase):
"""Base class for testing wiki access."""
def setUp(self):
super(TestWikiAccessBase, self).setUp()
self.wiki = get_or_create_root()
self.course_math101 = CourseFactory.create(org='org', number='math101', display_name='Course', metadata={'use_unique_wiki_id': 'false'})
self.course_math101_staff = self.create_staff_for_course(self.course_math101)
wiki_math101 = self.create_urlpath(self.wiki, course_wiki_slug(self.course_math101))
wiki_math101_page = self.create_urlpath(wiki_math101, 'Child')
wiki_math101_page_page = self.create_urlpath(wiki_math101_page, 'Grandchild')
self.wiki_math101_pages = [wiki_math101, wiki_math101_page, wiki_math101_page_page]
self.course_math101b = CourseFactory.create(org='org', number='math101b', display_name='Course', metadata={'use_unique_wiki_id': 'true'})
self.course_math101b_staff = self.create_staff_for_course(self.course_math101b)
wiki_math101b = self.create_urlpath(self.wiki, course_wiki_slug(self.course_math101b))
wiki_math101b_page = self.create_urlpath(wiki_math101b, 'Child')
wiki_math101b_page_page = self.create_urlpath(wiki_math101b_page, 'Grandchild')
self.wiki_math101b_pages = [wiki_math101b, wiki_math101b_page, wiki_math101b_page_page]
def create_urlpath(self, parent, slug):
"""Creates an article at /parent/slug and returns its URLPath"""
return URLPath.create_article(parent, slug, title=slug)
def create_staff_for_course(self, course):
"""Creates and returns users with instructor and staff access to course."""
return [
InstructorFactory(course_key=course.id), # Creates instructor_org/number/run role name
StaffFactory(course_key=course.id), # Creates staff_org/number/run role name
]
@attr(shard=1)
class TestWikiAccess(TestWikiAccessBase):
"""Test wiki access for course staff."""
def setUp(self):
super(TestWikiAccess, self).setUp()
self.course_310b = CourseFactory.create(org='org', number='310b', display_name='Course')
self.course_310b_staff = self.create_staff_for_course(self.course_310b)
self.course_310b2 = CourseFactory.create(org='org', number='310b_', display_name='Course')
self.course_310b2_staff = self.create_staff_for_course(self.course_310b2)
self.wiki_310b = self.create_urlpath(self.wiki, course_wiki_slug(self.course_310b))
self.wiki_310b2 = self.create_urlpath(self.wiki, course_wiki_slug(self.course_310b2))
def test_no_one_is_root_wiki_staff(self):
all_course_staff = self.course_math101_staff + self.course_310b_staff + self.course_310b2_staff
for course_staff in all_course_staff:
self.assertFalse(user_is_article_course_staff(course_staff, self.wiki.article))
def test_course_staff_is_course_wiki_staff(self):
for page in self.wiki_math101_pages:
for course_staff in self.course_math101_staff:
self.assertTrue(user_is_article_course_staff(course_staff, page.article))
for page in self.wiki_math101b_pages:
for course_staff in self.course_math101b_staff:
self.assertTrue(user_is_article_course_staff(course_staff, page.article))
def test_settings(self):
for page in self.wiki_math101_pages:
for course_staff in self.course_math101_staff:
self.assertTrue(settings.CAN_DELETE(page.article, course_staff))
self.assertTrue(settings.CAN_MODERATE(page.article, course_staff))
self.assertTrue(settings.CAN_CHANGE_PERMISSIONS(page.article, course_staff))
self.assertTrue(settings.CAN_ASSIGN(page.article, course_staff))
self.assertTrue(settings.CAN_ASSIGN_OWNER(page.article, course_staff))
for page in self.wiki_math101b_pages:
for course_staff in self.course_math101b_staff:
self.assertTrue(settings.CAN_DELETE(page.article, course_staff))
self.assertTrue(settings.CAN_MODERATE(page.article, course_staff))
self.assertTrue(settings.CAN_CHANGE_PERMISSIONS(page.article, course_staff))
self.assertTrue(settings.CAN_ASSIGN(page.article, course_staff))
self.assertTrue(settings.CAN_ASSIGN_OWNER(page.article, course_staff))
def test_other_course_staff_is_not_course_wiki_staff(self):
for page in self.wiki_math101_pages:
for course_staff in self.course_math101b_staff:
self.assertFalse(user_is_article_course_staff(course_staff, page.article))
for page in self.wiki_math101_pages:
for course_staff in self.course_310b_staff:
self.assertFalse(user_is_article_course_staff(course_staff, page.article))
for course_staff in self.course_310b_staff:
self.assertFalse(user_is_article_course_staff(course_staff, self.wiki_310b2.article))
for course_staff in self.course_310b2_staff:
self.assertFalse(user_is_article_course_staff(course_staff, self.wiki_310b.article))
@attr(shard=1)
class TestWikiAccessForStudent(TestWikiAccessBase):
"""Test access for students."""
def setUp(self):
super(TestWikiAccessForStudent, self).setUp()
self.student = UserFactory.create()
def test_student_is_not_root_wiki_staff(self):
self.assertFalse(user_is_article_course_staff(self.student, self.wiki.article))
def test_student_is_not_course_wiki_staff(self):
for page in self.wiki_math101_pages:
self.assertFalse(user_is_article_course_staff(self.student, page.article))
@attr(shard=1)
class TestWikiAccessForNumericalCourseNumber(TestWikiAccessBase):
"""Test staff has access if course number is numerical and wiki slug has an underscore appended."""
def setUp(self):
super(TestWikiAccessForNumericalCourseNumber, self).setUp()
self.course_200 = CourseFactory.create(org='org', number='200', display_name='Course')
self.course_200_staff = self.create_staff_for_course(self.course_200)
wiki_200 = self.create_urlpath(self.wiki, course_wiki_slug(self.course_200))
wiki_200_page = self.create_urlpath(wiki_200, 'Child')
wiki_200_page_page = self.create_urlpath(wiki_200_page, 'Grandchild')
self.wiki_200_pages = [wiki_200, wiki_200_page, wiki_200_page_page]
def test_course_staff_is_course_wiki_staff_for_numerical_course_number(self):
for page in self.wiki_200_pages:
for course_staff in self.course_200_staff:
self.assertTrue(user_is_article_course_staff(course_staff, page.article))
@attr(shard=1)
class TestWikiAccessForOldFormatCourseStaffGroups(TestWikiAccessBase):
"""Test staff has access if course group has old format."""
def setUp(self):
super(TestWikiAccessForOldFormatCourseStaffGroups, self).setUp()
self.course_math101c = CourseFactory.create(org='org', number='math101c', display_name='Course')
Group.objects.get_or_create(name='instructor_math101c')
self.course_math101c_staff = self.create_staff_for_course(self.course_math101c)
wiki_math101c = self.create_urlpath(self.wiki, course_wiki_slug(self.course_math101c))
wiki_math101c_page = self.create_urlpath(wiki_math101c, 'Child')
wiki_math101c_page_page = self.create_urlpath(wiki_math101c_page, 'Grandchild')
self.wiki_math101c_pages = [wiki_math101c, wiki_math101c_page, wiki_math101c_page_page]
def test_course_staff_is_course_wiki_staff(self):
for page in self.wiki_math101c_pages:
for course_staff in self.course_math101c_staff:
self.assertTrue(user_is_article_course_staff(course_staff, page.article))
| agpl-3.0 |
kawamon/hue | desktop/core/ext-py/PyYAML-5.4.1/tests/lib/test_multi_constructor.py | 8 | 1519 | import yaml
import pprint
import sys
def _load_code(expression):
return eval(expression)
def myconstructor1(constructor, tag, node):
seq = constructor.construct_sequence(node)
return {tag: seq }
def myconstructor2(constructor, tag, node):
seq = constructor.construct_sequence(node)
string = ''
try:
i = tag.index('!') + 1
except:
try:
i = tag.rindex(':') + 1
except:
pass
if i >= 0:
tag = tag[i:]
return { tag: seq }
class Multi1(yaml.FullLoader):
pass
class Multi2(yaml.FullLoader):
pass
def test_multi_constructor(input_filename, code_filename, verbose=False):
input = open(input_filename, 'rb').read().decode('utf-8')
native = _load_code(open(code_filename, 'rb').read())
# default multi constructor for ! and !! tags
Multi1.add_multi_constructor('!', myconstructor1)
Multi1.add_multi_constructor('tag:yaml.org,2002:', myconstructor1)
data = yaml.load(input, Loader=Multi1)
if verbose:
print('Multi1:')
print(data)
print(native)
assert(data == native)
# default multi constructor for all tags
Multi2.add_multi_constructor(None, myconstructor2)
data = yaml.load(input, Loader=Multi2)
if verbose:
print('Multi2:')
print(data)
print(native)
assert(data == native)
test_multi_constructor.unittest = ['.multi', '.code']
if __name__ == '__main__':
import test_appliance
test_appliance.run(globals())
| apache-2.0 |
fake-name/ReadableWebProxy | WebMirror/processor/ProcessorUtils/jsLiteralParse.py | 1 | 11155 |
import pyparsing as pp
def jsParse(inStr):
# This disaster is a context-free grammar parser for parsing javascript object literals.
# It needs to be able to handle a lot of the definitional messes you find in in-the-wild
# javascript object literals.
# Unfortunately, Javascript is /way/ more tolerant then JSON when it comes to object literals
# so we can't just parse objects using python's `json` library.
TRUE = pp.Keyword("true").setParseAction( pp.replaceWith(True) )
FALSE = pp.Keyword("false").setParseAction( pp.replaceWith(False) )
NULL = pp.Keyword("null").setParseAction( pp.replaceWith(None) )
jsonString = pp.quotedString.setParseAction( pp.removeQuotes )
jsonNumber = pp.Combine( pp.Optional('-') + ( '0' | pp.Word('123456789',pp.nums) ) +
pp.Optional( '.' + pp.Word(pp.nums) ) +
pp.Optional( pp.Word('eE',exact=1) + pp.Word(pp.nums+'+-',pp.nums) ) )
jsonObject = pp.Forward()
jsonValue = pp.Forward()
jsonDict = pp.Forward()
jsonArray = pp.Forward()
jsonElements = pp.Forward()
rawText = pp.Regex('[a-zA-Z_$][0-9a-zA-Z_$]*')
commaToNull = pp.Word(',,', exact=1).setParseAction(pp.replaceWith(None))
jsonElements << pp.ZeroOrMore(commaToNull) + pp.Optional(jsonObject) + pp.ZeroOrMore((pp.Suppress(',') + jsonObject) | commaToNull)
jsonValue << ( jsonString | jsonNumber | TRUE | FALSE | NULL )
dictMembers = pp.delimitedList( pp.Group( (rawText | jsonString) + pp.Suppress(':') + (jsonValue | jsonDict | jsonArray)))
jsonDict << ( pp.Dict( pp.Suppress('{') + pp.Optional(dictMembers) + pp.ZeroOrMore(pp.Suppress(',')) + pp.Suppress('}') ) )
jsonArray << ( pp.Group(pp.Suppress('[') + pp.Optional(jsonElements) + pp.Suppress(']') ) )
jsonObject << (jsonValue | jsonDict | jsonArray)
jsonComment = pp.cppStyleComment
jsonObject.ignore( jsonComment )
def convertDict(s, l, toks):
return dict(toks.asList())
def convertNumbers(s,l,toks):
n = toks[0]
try:
return int(n)
except ValueError:
return float(n)
jsonNumber.setParseAction(convertNumbers)
jsonDict.setParseAction(convertDict)
# jsonObject.setDebug()
jsonObject.parseString('"inStr"').pop()
return jsonObject.parseString(inStr).pop()
# Stolen from http://stackoverflow.com/a/12017573/268006
import re
import urllib.parse
import os
# content-disposition = "Content-Disposition" ":"
# disposition-type *( ";" disposition-parm )
# disposition-type = "inline" | "attachment" | disp-ext-type
# ; case-insensitive
# disp-ext-type = token
# disposition-parm = filename-parm | disp-ext-parm
# filename-parm = "filename" "=" value
# | "filename*" "=" ext-value
# disp-ext-parm = token "=" value
# | ext-token "=" ext-value
# ext-token = <the characters in token, followed by "*">
def parseContentDispositon(cDispHdr, srcUrl):
token = '[-!#-\'*+.\dA-Z^-z|~]+'
qdtext = '[]-~\t !#-[]'
mimeCharset = '[-!#-&+\dA-Z^-z]+'
language = '(?:[A-Za-z]{2,3}(?:-[A-Za-z]{3}(?:-[A-Za-z]{3}){,2})?|[A-Za-z]{4,8})(?:-[A-Za-z]{4})?(?:-(?:[A-Za-z]{2}|\d{3}))(?:-(?:[\dA-Za-z]{5,8}|\d[\dA-Za-z]{3}))*(?:-[\dA-WY-Za-wy-z](?:-[\dA-Za-z]{2,8})+)*(?:-[Xx](?:-[\dA-Za-z]{1,8})+)?|[Xx](?:-[\dA-Za-z]{1,8})+|[Ee][Nn]-[Gg][Bb]-[Oo][Ee][Dd]|[Ii]-[Aa][Mm][Ii]|[Ii]-[Bb][Nn][Nn]|[Ii]-[Dd][Ee][Ff][Aa][Uu][Ll][Tt]|[Ii]-[Ee][Nn][Oo][Cc][Hh][Ii][Aa][Nn]|[Ii]-[Hh][Aa][Kk]|[Ii]-[Kk][Ll][Ii][Nn][Gg][Oo][Nn]|[Ii]-[Ll][Uu][Xx]|[Ii]-[Mm][Ii][Nn][Gg][Oo]|[Ii]-[Nn][Aa][Vv][Aa][Jj][Oo]|[Ii]-[Pp][Ww][Nn]|[Ii]-[Tt][Aa][Oo]|[Ii]-[Tt][Aa][Yy]|[Ii]-[Tt][Ss][Uu]|[Ss][Gg][Nn]-[Bb][Ee]-[Ff][Rr]|[Ss][Gg][Nn]-[Bb][Ee]-[Nn][Ll]|[Ss][Gg][Nn]-[Cc][Hh]-[Dd][Ee]'
valueChars = '(?:%[\dA-F][\dA-F]|[-!#$&+.\dA-Z^-z|~])*'
dispositionParm = '[Ff][Ii][Ll][Ee][Nn][Aa][Mm][Ee]\s*=\s*(?:({token})|"((?:{qdtext}|\\\\[\t !-~])*)")|[Ff][Ii][Ll][Ee][Nn][Aa][Mm][Ee]\*\s*=\s*({mimeCharset})\'(?:{language})?\'({valueChars})|{token}\s*=\s*(?:{token}|"(?:{qdtext}|\\\\[\t !-~])*")|{token}\*\s*=\s*{mimeCharset}\'(?:{language})?\'{valueChars}'.format(**locals())
# Wat?
formatArgs = {
'token' : token,
'qdtext' : qdtext,
'mimeCharset' : mimeCharset,
'language' : language,
'valueChars' : valueChars,
'dispositionParm' : dispositionParm
}
try:
m = re.match('(?:{token}\s*;\s*)?(?:{dispositionParm})(?:\s*;\s*(?:{dispositionParm}))*|{token}'.format(**formatArgs), cDispHdr)
except KeyError:
name = os.path.basename(urllib.parse.unquote(urllib.parse.urlparse(srcUrl).path))
else:
if not m:
name = os.path.basename(urllib.parse.unquote(urllib.parse.urlparse(srcUrl).path))
# Many user agent implementations predating this specification do not
# understand the "filename*" parameter. Therefore, when both "filename"
# and "filename*" are present in a single header field value, recipients
# SHOULD pick "filename*" and ignore "filename"
elif m.group(8) is not None:
name = urllib.parse.unquote(m.group(8))
# name = urllib.parse.unquote(m.group(8)).decode(m.group(7)) # Urllib is decoding the headers before I get them, because annoying, apparentlty.
elif m.group(4) is not None:
name = urllib.parse.unquote(m.group(4))
# name = urllib.parse.unquote(m.group(4)).decode(m.group(3))
elif m.group(6) is not None:
name = re.sub('\\\\(.)', '\1', m.group(6))
elif m.group(5) is not None:
name = m.group(5)
elif m.group(2) is not None:
name = re.sub('\\\\(.)', '\1', m.group(2))
else:
name = m.group(1)
# Recipients MUST NOT be able to write into any location other than one to
# which they are specifically entitled
if name:
name = os.path.basename(name)
else:
name = os.path.basename(urllib.parse.unquote(urllib.parse.urlparse(srcUrl).path))
return name
def test():
tests = (
'''[{'id': 'thing', }, 'wat']''', # ok
'''{wat: [1, 2, ,], lol: [],}''',
'''[{wat: [1, 2, ,], lol: [],}]''',
'''[{'id': 'thing', }, 'wat',]''', # ok
'''"wat", "wat"''', # ok
'''
[{'id': '0B8UYgI2TD_nmNUMzNWJpZnJkRkU', 'title': 'story1-2.txt','enableStandaloneSharing': true,'enableEmbedDialog': true,'projectorFeedbackId': '99950', 'projectorFeedbackBucket': 'viewer-web',},["",1,,1,1,1,1,,,1,1,[0,,0,"AIzaSyDVQw45DwoYh632gvsP5vPDqEKvb-Ywnb8",0,0,1,0,,,0,"/drive/v2internal",0,0,0,[0,0,0]
]
,1,5,1,"https://docs.google.com",0,1,"https://docs.google.com",0,1,1,1,1,,1,20,1,0,0,1,1,[[,"0"]
,6,1,1]
,1,1,1,,[0,,,,"https://accounts.google.com/ServiceLogin?service\u003dwise\u0026passive\u003d1209600\u0026continue\u003dhttps://docs.google.com/file/d/0B8UYgI2TD_nmNUMzNWJpZnJkRkU/edit?pli%3D1\u0026hl\u003den\u0026followup\u003dhttps://docs.google.com/file/d/0B8UYgI2TD_nmNUMzNWJpZnJkRkU/edit?pli%3D1"]
,0,1,1,600000,[1]
,,0,0,[0,0,0]
,["https://youtube.googleapis.com",1]
,0,0,,0,1,0]
,[,"story1-2.txt","https://lh5.googleusercontent.com/0JHRa3LjGQrV7UOhZMcuCj5I81mXlTOnrvtm4HPjQruxNP0SMuGJF-K7HsjDP8b1rM_e\u003ds1600",,,,"0B8UYgI2TD_nmNUMzNWJpZnJkRkU",,,"https://docs.google.com/st/viewurls?id\u003d0B8UYgI2TD_nmNUMzNWJpZnJkRkU\u0026m\u003d1440",,"text/plain",,,6,,"https://docs.google.com/file/d/0B8UYgI2TD_nmNUMzNWJpZnJkRkU/view?pli\u003d1",1,"https://docs.google.com/uc?id\u003d0B8UYgI2TD_nmNUMzNWJpZnJkRkU\u0026export\u003ddownload",,5,,,,,,,,,,,0]]
''',
'''{folderModel: [
[,"1X8oBqzsQcOe42evH7Fiw2LlPqczDJh5GISzwr6kPH5M",,,"https://docs.google.com/spreadsheets/d/1X8oBqzsQcOe42evH7Fiw2LlPqczDJh5GISzwr6kPH5M/edit?usp\u003ddrive_web"],
[,"1ZdweQdjIBqNsJW6opMhkkRcSlrbgUN5WHCcYrMY7oqI",,,"https://docs.google.com/document/d/1ZdweQdjIBqNsJW6opMhkkRcSlrbgUN5WHCcYrMY7oqI/edit?usp\u003ddrive_web"],
[,"1aqTds7Pl1VOkmSnxKrP4TdylM_tWr_0DlTdUJ3DjRLE",,,"https://docs.google.com/document/d/1aqTds7Pl1VOkmSnxKrP4TdylM_tWr_0DlTdUJ3DjRLE/edit?usp\u003ddrive_web"],
[,"1lw1IiIly8-9BcrKOVfxt7IcA_aYr-AibqTQXu3WR2zU",,,"https://docs.google.com/document/d/1lw1IiIly8-9BcrKOVfxt7IcA_aYr-AibqTQXu3WR2zU/edit?usp\u003ddrive_web"],
[,"1zmxymHw5mpCEr8P-rXAvbVUoJkHj-8T02g7TUMKqcME",,,"https://docs.google.com/document/d/1zmxymHw5mpCEr8P-rXAvbVUoJkHj-8T02g7TUMKqcME/edit?usp\u003ddrive_web"],
[,"1yQm3FCJySVSPXNScsjAivWOKkcDk1lQWEf1MFRoQ0eI",,,"https://docs.google.com/document/d/1yQm3FCJySVSPXNScsjAivWOKkcDk1lQWEf1MFRoQ0eI/edit?usp\u003ddrive_web"],
[,"17TWHTW4ucfyz52fRu4csDyu2GLR2fQHL3TGa3awMnyA",,,"https://docs.google.com/document/d/17TWHTW4ucfyz52fRu4csDyu2GLR2fQHL3TGa3awMnyA/edit?usp\u003ddrive_web"],
[,"1-r8cQXe-Eq0JRLlu_KHblMsyfJa1K0x0eRdAJHGOq5M",,,"https://docs.google.com/document/d/1-r8cQXe-Eq0JRLlu_KHblMsyfJa1K0x0eRdAJHGOq5M/edit?usp\u003ddrive_web"],
[,"13LA8daW3YYqAcBwZtjEd-Y_Y3BuPib7yyPKhsGZclw4",,,"https://docs.google.com/document/d/13LA8daW3YYqAcBwZtjEd-Y_Y3BuPib7yyPKhsGZclw4/edit?usp\u003ddrive_web"],
[,"1KIV74EoKr9nJWirCV4YX0aCuB66M065sjOCFcqMaAkE",,,"https://docs.google.com/document/d/1KIV74EoKr9nJWirCV4YX0aCuB66M065sjOCFcqMaAkE/edit?usp\u003ddrive_web"],
[,"1wiAkjP0iKyH_dcduJEO9kuQPmOxcg8NrRVTBrtbNX80",,,"https://docs.google.com/document/d/1wiAkjP0iKyH_dcduJEO9kuQPmOxcg8NrRVTBrtbNX80/edit?usp\u003ddrive_web"],
[,"14E-mqfTx4AMxQp16lkFER4KxDY3IEbPFngDG2b7U8_A",,,"https://docs.google.com/document/d/14E-mqfTx4AMxQp16lkFER4KxDY3IEbPFngDG2b7U8_A/edit?usp\u003ddrive_web"],
[,"1gs_lPbBDUIU5CU9Ifzfb5iU2sw6NSJ99pUz_SQRAiek",,,"https://docs.google.com/document/d/1gs_lPbBDUIU5CU9Ifzfb5iU2sw6NSJ99pUz_SQRAiek/edit?usp\u003ddrive_web"],
[,"1gohzQIVuU1t4Xe7Ptgta9z2rzqPttudipcBP93LYNOg",,,"https://docs.google.com/document/d/1gohzQIVuU1t4Xe7Ptgta9z2rzqPttudipcBP93LYNOg/edit?usp\u003ddrive_web"],
[,"1WBHJYHScs1T_A8cx4vl7G4CsVGZW1N7fmUPpJktRInM",,,"https://docs.google.com/document/d/1WBHJYHScs1T_A8cx4vl7G4CsVGZW1N7fmUPpJktRInM/edit?usp\u003ddrive_web"],
[,"1QIWDTGzuPUSu4zt9DK7UXCULC8XbiGYSFJ6xQO6KuEA",,,"https://docs.google.com/document/d/1QIWDTGzuPUSu4zt9DK7UXCULC8XbiGYSFJ6xQO6KuEA/edit?usp\u003ddrive_web"],
[,"1TdFei-WeOoNqyziCopOYtdULMTspy81247PWKICL40U",,,"https://docs.google.com/document/d/1TdFei-WeOoNqyziCopOYtdULMTspy81247PWKICL40U/edit?usp\u003ddrive_web"],
[,"12VtmcLrm99guIYu0VejKAbfCSlJiORs3erUEoGxyRh8",,,"https://docs.google.com/document/d/12VtmcLrm99guIYu0VejKAbfCSlJiORs3erUEoGxyRh8/edit?usp\u003ddrive_web"],
[,"10H9JbkmBK6qblquM7eUfh0MMazrx96p-ISSeO4WaN-w",,,"https://docs.google.com/document/d/10H9JbkmBK6qblquM7eUfh0MMazrx96p-ISSeO4WaN-w/edit?usp\u003ddrive_web"],
[,"1fmZVeM43nb08qZIQ2881ah3xy-w7UZGlRFgyiNICddw",,,"https://docs.google.com/document/d/1fmZVeM43nb08qZIQ2881ah3xy-w7UZGlRFgyiNICddw/edit?usp\u003ddrive_web"],
[,"1c6SmpWQZWNM0mldRZ5Dg9eNUNnkqJhY3SGHR5YpV0Nw",,,"https://docs.google.com/document/d/1c6SmpWQZWNM0mldRZ5Dg9eNUNnkqJhY3SGHR5YpV0Nw/edit?usp\u003ddrive_web"],
[,"1rhoCQcOUW1eccV4Fsh6K76u0Vup7DMQhBfDcknEbFHI",,,"https://docs.google.com/document/d/1rhoCQcOUW1eccV4Fsh6K76u0Vup7DMQhBfDcknEbFHI/edit?usp\u003ddrive_web"]
]
}''',
)
for test in tests:
print("Parsing '%s'" % test)
results = jsParse(test)
print(results)
# print(results[-1])
if __name__ == "__main__":
test()
| bsd-3-clause |
karllessard/tensorflow | tensorflow/python/kernel_tests/control_flow_ops_py_test.py | 1 | 190717 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OiR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-long-lambda
"""Tests for tensorflow.ops.control_flow_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import re
import sys
import time
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import while_v2 # pylint: disable=unused-import
# pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
import tensorflow.python.ops.tensor_array_grad
# pylint: enable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
def check_consumers(graph):
"""Sanity check on the consumer list of the tensors."""
consumer_count = {}
for op in graph.get_operations():
for v in op.inputs:
cnt = consumer_count.get(v, 0)
consumer_count[v] = cnt + 1
for k, v in consumer_count.items():
if len(k.consumers()) != v:
return False
return True
def all_fetchables():
tensor_names = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.outputs:
if graph.is_fetchable(t):
tensor_names.append(t.name)
return tensor_names
def all_feedables():
feedable_tensors = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_feedable(t):
feedable_tensors.append(t)
return feedable_tensors
def opt_cfg(do_constant_folding=True):
return config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True,
do_constant_folding=do_constant_folding)))
def isum(s, maximum_iterations=None):
i = constant_op.constant(0, name="i")
c = lambda i, s: math_ops.less(i, 10)
b = lambda i, s: [math_ops.add(i, 1), math_ops.add(i, s)]
_, r_s = control_flow_ops.while_loop(
c, b, [i, s], maximum_iterations=maximum_iterations)
return r_s
def enqueue_print_op(s):
"""Enqueues an op that prints a message to be captured in the test."""
return logging_ops.print_v2("ControlFlowOpsTest: " + s)
def filter_test_messages(s):
"""Returns a list of messages printed by enqueue_print_op."""
prefix = "ControlFlowOpsTest: "
return [l[len(prefix):] for l in s.split("\n") if l.startswith(prefix)]
def tf_function_in_tf2(f):
if tf2.enabled():
# In TF1 do not wrap with tf.function so that we can test the v1 control
# flow code path.
return def_function.function(f)
return f
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase, parameterized.TestCase):
@test_util.run_v1_only("b/120545219")
def testRefIdentity(self):
with self.cached_session():
v = variables.VariableV1(7)
v = control_flow_ops._Identity(v)
op = state_ops.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(isinstance(v2, ops.Tensor))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(9, self.evaluate(v2))
@test_util.run_v1_only("b/120545219")
def testRefEnter(self):
with self.cached_session():
v = variables.VariableV1(7)
enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True)
nine = constant_op.constant(9)
enter_nine = gen_control_flow_ops.enter(nine, "foo_1")
op = state_ops.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(9, self.evaluate(v3))
@test_util.run_v1_only("b/120545219")
def testRefSwitch(self):
with self.cached_session():
v = variables.VariableV1(7)
p = constant_op.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v._ref(), p) # pylint: disable=protected-access
v2 = state_ops.assign(v1[1], 9)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(9, self.evaluate(v2))
def testEnterMulExit(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = gen_control_flow_ops.enter(data, "foo_1", False)
five = constant_op.constant(5)
enter_five = gen_control_flow_ops.enter(five, "foo_1", False)
mul_op = math_ops.multiply(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = self.evaluate(exit_op)
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_deprecated_v1
def testEnterShapePropagation(self):
with self.cached_session():
v = variables.Variable([0.0, 0.0], dtype=dtypes.float32)
# If is_constant=True, the shape information should be propagated.
enter_v_constant = gen_control_flow_ops.enter(
v, "frame1", is_constant=True)
self.assertEqual(enter_v_constant.shape, [2])
# Otherwise, the shape should be unknown.
enter_v_non_constant = gen_control_flow_ops.enter(
v, "frame2", is_constant=False)
self.assertEqual(enter_v_non_constant.shape, None)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([1, 2, 3, 4, 5, 6])
indices = constant_op.constant([0, 2, 4, 6, 8, 10])
data = ops.IndexedSlices(values, indices)
pred = ops.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
val = merge_op.values
ind = merge_op.indices
self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind)
@test_util.run_v1_only("b/120545219")
def testSwitchDeadBranch(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
dead_branch = array_ops.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Retval[0] does not have value" in str(e)):
self.evaluate(dead_branch)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeLess(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
zero = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
less_op = math_ops.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = self.evaluate(merge_op)
self.assertAllEqual(np.arange(1, 7), result)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeAddIdentity(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
id_op = array_ops.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = self.evaluate(merge_op)
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeAddMul(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
five = constant_op.constant(5)
mul_op = math_ops.multiply(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = self.evaluate(merge_op)
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_v1_only("b/120545219")
def testLoop_false(self):
with self.cached_session():
false = ops.convert_to_tensor(False)
n = constant_op.constant(10)
enter_false = gen_control_flow_ops.enter(false, "foo_1", False)
enter_n = gen_control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = self.evaluate(exit_n)
self.assertAllEqual(10, result)
@test_util.run_deprecated_v1
def testLoop_1(self):
with self.cached_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
with ops.device(test.gpu_device_name()):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = self.evaluate(exit_i)
self.assertAllEqual(10, result)
@test_util.run_v1_only("b/120545219")
def testLoop_2(self):
with self.cached_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
with ops.device(test.gpu_device_name()):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = self.evaluate(exit_i)
self.assertAllEqual(10, result)
@test_util.run_v1_only("b/120545219")
def testDifferentFrame(self):
with self.cached_session():
data = array_ops.placeholder(dtypes.float32, shape=[])
enter_1 = gen_control_flow_ops.enter(data, "foo_1", False)
enter_2 = gen_control_flow_ops.enter(data, "foo_2", False)
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError("has inputs from different frames"):
res.eval(feed_dict={data: 1.0})
@test_util.run_deprecated_v1
def testCondBool(self):
values = constant_op.constant(10)
fn1 = lambda: math_ops.add(values, 1)
fn2 = lambda: math_ops.subtract(values, 1)
with self.assertRaisesRegex(TypeError, "must not be a Python bool"):
_ = control_flow_ops.cond(False, fn1, fn2)
@test_util.run_deprecated_v1
def testCondInt(self):
p = array_ops.placeholder(dtypes.bool, shape=[])
v = constant_op.constant(10)
fn1 = lambda: math_ops.add(v, 1)
fn2 = lambda: math_ops.subtract(v, 1)
y = control_flow_ops.cond(p, fn1, fn2)
grad = gradients_impl.gradients(y, [v])
self.assertAllEqual([None], grad)
def testCondOutputShape(self):
x = constant_op.constant(1.0)
b = control_flow_ops.cond(
constant_op.constant(True), lambda: math_ops.square(x),
lambda: math_ops.subtract(x, 1.))
self.assertEqual(b.shape, tensor_shape.TensorShape([]))
@test_util.run_v1_only("b/120545219")
def testFetchable(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
control_flow_ops.cond(
constant_op.constant(True), lambda: x + 2, lambda: x + 0)
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_fetchable(t.op):
sess.run(t, feed_dict={x: 3})
else:
with self.assertRaisesRegex(ValueError,
"has been marked as not fetchable"):
sess.run(t, feed_dict={x: 3})
@test_util.disable_control_flow_v2("Not relevant")
@test_util.run_v1_only("b/120545219")
def testFeedable(self):
with self.cached_session() as sess:
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
self.assertEqual(1000, r.eval(feed_dict={i0: 0}))
feedable_tensors = all_feedables()
for t in feedable_tensors:
sess.run(r, feed_dict={t: 3})
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if t not in feedable_tensors and t.dtype is dtypes.int32:
with self.assertRaisesRegex(ValueError, "may not be fed"):
sess.run(r, feed_dict={t: 3})
@test_util.run_v1_only("b/120545219")
def testCondIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([10])
indices = constant_op.constant([0])
x = ops.IndexedSlices(values, indices)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), indices)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values
ind = r.indices
self.assertAllEqual([11], val)
self.assertAllEqual([0], ind)
def testCondMismatchedIndexedSlices(self):
@def_function.function
def foo():
values = constant_op.constant([10])
indices = constant_op.constant([0])
x = ops.IndexedSlices(values, indices)
with self.assertRaisesRegex(TypeError,
"Cannot reconcile tf.cond 0-th outputs"):
control_flow_ops.cond(
constant_op.constant(True),
lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices),
lambda: math_ops.add(x.values, 1), indices)
foo()
def testCondSparseTensor(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
pred = math_ops.less(1, 2)
fn1 = lambda: sparse_tensor.SparseTensor(
indices + 1, x.values + 1, dense_shape=shape)
fn2 = lambda: sparse_tensor.SparseTensor(
indices, x.values - 1, dense_shape=shape)
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3.0, 5.0], r.values)
self.assertAllEqual([[1], [4]], r.indices)
self.assertAllEqual(r.values.get_shape(), (2,))
def testCondRaggedTensor(self):
rt = ragged_factory_ops.constant([[1, 2], [3], [4, 5, 6]])
pred = math_ops.less(1, 2)
fn1 = lambda: array_ops.concat([rt + 2, [[100]]], axis=0)
fn2 = lambda: rt[:2] - 2
result = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3, 4, 5, 6, 7, 8, 100], result.values)
self.assertAllEqual([0, 2, 3, 6, 7], result.row_splits)
@test_util.run_v1_only("b/120545219")
def testCondResource(self):
with self.cached_session():
rv = resource_variable_ops.ResourceVariable(True)
self.evaluate(variables.global_variables_initializer())
t = ops.convert_to_tensor(1.0)
def case():
assign = resource_variable_ops.assign_variable_op(rv.handle, False)
with ops.control_dependencies([assign]):
return array_ops.identity(t)
self.assertEqual(
1.0, self.evaluate(control_flow_ops.cond(rv, case, lambda: t)))
@test_util.run_deprecated_v1
def testCondResourceGradShape(self):
rv1 = resource_variable_ops.ResourceVariable([1.0, 2.0])
rv2 = resource_variable_ops.ResourceVariable([3.0, 4.0])
pred = constant_op.constant(True)
result = control_flow_ops.cond(pred, lambda: rv1, lambda: rv2)
grads = gradients_impl.gradients(result, [rv1, rv2])
self.assertAllEqual(grads[0].shape.as_list(), [2])
self.assertAllEqual(grads[1].shape.as_list(), [2])
@test_util.run_v1_only("b/120545219")
def testCondWithTensorArrayGrad(self):
with self.cached_session() as sess:
with ops.device(test.gpu_device_name()):
pred = array_ops.placeholder(dtypes.bool, [])
x = constant_op.constant([1.0, 2.0, 3.0])
y = control_flow_ops.cond(
pred, lambda: map_fn.map_fn(lambda z: z * 2.0, x),
lambda: constant_op.constant([1.0, 1.0, 1.0]))
g = gradients_impl.gradients(y, x)[0]
self.assertAllEqual(sess.run(g, {pred: True}), [2.0, 2.0, 2.0])
self.assertAllEqual(sess.run(g, {pred: False}), [0.0, 0.0, 0.0])
@test_util.run_v1_only("b/120545219")
def testCondIndexedSlicesDifferentTypes(self):
with self.cached_session():
values = constant_op.constant([10])
i_32 = ops.convert_to_tensor([0], name="one", dtype=dtypes.int32)
i_64 = ops.convert_to_tensor([0], name="one", dtype=dtypes.int64)
x = ops.IndexedSlices(values, i_32)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), i_32)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), i_64)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values
ind = r.indices
self.assertAllEqual([11], val)
self.assertAllEqual([0], ind)
self.assertTrue(ind.dtype == np.int64)
@test_util.run_v1_only("b/120545219")
def testCondColocation(self):
with self.session(use_gpu=True):
with ops.device("/cpu:0"):
v = variables.Variable(7.0)
x = constant_op.constant(10.0)
pred = math_ops.less(1.0, 2.0)
fn1 = lambda: math_ops.add(v, 1.0)
fn2 = lambda: math_ops.subtract(x, 1.0)
r = control_flow_ops.cond(pred, fn1, fn2)
for op in x.graph.get_operations():
if op.name == "cond/Add/Switch":
self.assertDeviceEqual(op.device, "/cpu:0")
def _testCond_1(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
r = control_flow_ops.cond(pred, fn1, fn2)
result = self.evaluate(r)
self.assertAllEqual(11, result)
def testCond_1(self):
self._testCond_1(use_gpu=False)
# TODO(b/116526896): Enable GPU tests.
# self._testCond_1(use_gpu=True)
def testCond_2(self):
with self.cached_session():
x = constant_op.constant(10)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
result = self.evaluate(r)
self.assertAllEqual(9, result)
def testCond_3(self):
with self.cached_session():
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
fn3 = lambda: math_ops.add(control_flow_ops.cond(pred, fn1, fn2), 1)
r = control_flow_ops.cond(pred, fn3, fn2)
result = self.evaluate(r)
self.assertAllEqual(12, result)
@test_util.run_in_graph_and_eager_modes
def testCondPruning(self):
v1 = variables.Variable(7)
v2 = variables.Variable(7)
v3 = variables.Variable(7)
def f():
age = constant_op.constant(3)
max_age = constant_op.constant(2)
pred = math_ops.greater(age, max_age)
fn1 = lambda: [state_ops.assign(v1, 1).op, state_ops.assign(v2, 2).op]
fn2 = lambda: [state_ops.assign(v3, 3).op, constant_op.constant(10).op]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertEqual(len(r), 2)
return r[1]
f_defun = eager_function.defun(f)
if not context.executing_eagerly():
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
result = self.evaluate(f())
self.assertEqual(True, result)
# Only second cond result was fetched, so v1 assign shouldn't run.
self.assertEqual(7, self.evaluate(v1))
self.assertEqual(2, self.evaluate(v2))
self.assertEqual(7, self.evaluate(v3))
result = f_defun()
self.assertEqual(True, self.evaluate(result))
# Both v1 and v2 branch assignments should be run in defun.
self.assertEqual(1, self.evaluate(v1))
self.assertEqual(2, self.evaluate(v2))
self.assertEqual(7, self.evaluate(v3))
def testCond_5(self):
with self.cached_session():
alive = constant_op.constant(True, name="alive")
count = constant_op.constant(0, name="count")
def body(i):
return control_flow_ops.cond(
alive, lambda: [math_ops.less(i, 3), math_ops.add(count, 1)],
lambda: [alive, count])
for i in range(10):
alive, count = body(i)
self.assertAllEqual(4, self.evaluate(count))
@test_util.run_v1_only("b/120545219")
def testCond_6(self):
with self.cached_session():
v1 = variables.Variable([7])
age = constant_op.constant(3)
pred = math_ops.greater(age, 4)
fn1 = lambda: age
fn2 = lambda: v1
r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(variables.global_variables_initializer())
result = self.evaluate(r)
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, 1), math_ops.add(x, 2)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([11, 12], self.evaluate(r))
@parameterized.parameters(dtypes.float32, dtypes.float64)
@test_util.run_v1_only("Uses tf.gradients")
def testCondResourceGrad(self, dtype):
init = constant_op.constant([7.], dtype=dtype)
v1 = variables.Variable(init)
age = constant_op.constant(3., dtype=dtype)
pred = math_ops.greater(age, 4.)
fn1 = lambda: age
fn2 = lambda: v1
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, v1)[0]
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(grad, [1.])
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testCond_Device(self):
x = constant_op.constant(-10.)
# True branch function defined outside of device scope
def true_fn():
return math_ops.exp(x)
with ops.device("CPU:0"):
r = control_flow_ops.cond(
constant_op.constant(True), true_fn, lambda: 0.)
self.assertIn("cpu", r.device.lower())
with session.Session() as sess:
options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(r, options=options, run_metadata=run_metadata)
# We expect that everything runs on CPU, even if GPU is available.
self.assertEqual(len(run_metadata.partition_graphs), 1)
def _count_matching_switch_nodes_on_device(self, run_metadata, device_str,
dtype):
# Returns the number of Switch nodes with type dtype placed on
# `device_str`.
device_graphs = [
g for g in run_metadata.partition_graphs
if device_str in g.node[0].device
]
self.assertLen(device_graphs, 1)
switch_nodes = [
n for n in device_graphs[0].node
if n.op == "Switch" and n.attr["T"].type == dtype.as_datatype_enum
]
return len(switch_nodes)
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testCondSwitchColocatedWithInputWhenInputExplicitlyPlacedOnCPU(self):
x = array_ops.placeholder(dtypes.float32)
# `arg` is used in the cond then branch so a Switch node is created for it.
# We test that the Switch node gets placed on the same device as `arg`.
# We force `arg` to be on CPU here.
with ops.device("CPU:0"):
arg = x + 10.
def true_fn():
with ops.device("CPU:0"):
return arg + 1
r = control_flow_ops.cond(constant_op.constant(True), true_fn, lambda: 0.)
with session.Session() as sess:
run_metadata = config_pb2.RunMetadata()
options = config_pb2.RunOptions(output_partition_graphs=True)
sess.run(
r, feed_dict={x: -10.}, options=options, run_metadata=run_metadata)
self.assertLen(run_metadata.partition_graphs, 2)
# Check that the Switch for `arg` gets placed on CPU.
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "CPU",
dtypes.float32), 1)
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "GPU",
dtypes.float32), 0)
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testCondSwitchColocatedWithInputWhenInputPlacedOnCPU(self):
x = array_ops.placeholder(dtypes.float32)
# `arg` is used in the cond then branch so a Switch node is created for it.
# We test that the Switch node gets placed on the same device as `arg`.
# Since arg is a dataset (and only has a CPU kernel), it gets placed on CPU
# by placer.
arg = dataset_ops.Dataset.range(8)
def true_fn():
return cardinality.cardinality(arg)
r = control_flow_ops.cond(
constant_op.constant(True), true_fn,
lambda: constant_op.constant(0, dtypes.int64))
with session.Session() as sess:
run_metadata = config_pb2.RunMetadata()
options = config_pb2.RunOptions(output_partition_graphs=True)
sess.run(
r, feed_dict={x: -10.}, options=options, run_metadata=run_metadata)
self.assertLen(run_metadata.partition_graphs, 2)
# Check that the Switch for `arg` gets placed on CPU.
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "CPU",
dtypes.variant), 1)
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "GPU",
dtypes.variant), 0)
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testCondSwitchColocatedWithInputWhenInputOnGPU(self):
x = array_ops.placeholder(dtypes.float32)
# `arg` is used in the cond then branch so a Switch node is created for it.
# We test that the Switch node gets placed on the same device as `arg`.
# Note: `arg` gets placed on GPU by default by the placer.
arg = x + 10.
def true_fn():
with ops.device("CPU:0"):
return arg + 1
r = control_flow_ops.cond(constant_op.constant(True), true_fn, lambda: 0.)
with session.Session() as sess:
run_metadata = config_pb2.RunMetadata()
options = config_pb2.RunOptions(output_partition_graphs=True)
sess.run(
r, feed_dict={x: -10.}, options=options, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 2)
# Check that the Switch for `arg` gets placed on GPU.
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "CPU",
dtypes.float32), 0)
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "GPU",
dtypes.float32), 1)
def testCondAccessTrueBranchTensorInFalseBranchRaises(self):
@def_function.function
def f():
c = constant_op.constant(1.)
inputs = {"c": c}
def true_fn(inputs):
inputs["c"] = array_ops.identity(inputs["c"], name="true_branch")
return inputs["c"]
def false_fn(inputs):
return array_ops.identity(inputs["c"])
pred = constant_op.constant(True)
return control_flow_ops.cond(
pred, lambda: true_fn(inputs), lambda: false_fn(inputs))
# This was needed for backwards compatibility with TF2 Estimators which
# rely on variable names.
prefix = "cond/" if context.executing_eagerly() else ""
with self.assertRaisesRegex(
ValueError,
"Tensor %strue_branch:0 in true_fn is accessed from false_fn." %
prefix):
f()
def testSwitchCaseAccessBranch1TensorInBranch4Raises(self):
@def_function.function
def f():
c = constant_op.constant(1.)
inputs = {"c": c}
def br1_fn(inputs):
inputs["c"] = array_ops.identity(inputs["c"], name="br1_identity")
return inputs["c"]
def br4_fn(inputs):
return array_ops.identity(inputs["c"])
def other_fn():
return array_ops.identity(c)
return control_flow_ops.switch_case(
constant_op.constant(2),
[other_fn, lambda: br1_fn(inputs), other_fn, other_fn,
lambda: br4_fn(inputs)])
# This was needed for backwards compatibility with TF2 Estimators which
# rely on variable names.
prefix = "switch_case/indexed_case/" if context.executing_eagerly() else ""
with self.assertRaisesRegex(
ValueError, "Tensor %sbr1_identity:0 in branch 1 is "
"accessed from branch 4." % prefix):
f()
def testCondListOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, y), math_ops.add(x, y)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertListEqual([210, 210], test_result)
def testTupleOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: (math_ops.add(x, y), math_ops.add(x, y))
fn2 = lambda: (y, y)
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertTupleEqual((210, 210), test_result)
def testDictOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: {"a": math_ops.add(x, y), "b": math_ops.add(x, y)}
fn2 = lambda: {"a": y, "b": y}
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertDictEqual({"a": 210, "b": 210}, test_result)
def testEmbeddedListOutput(self):
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [[math_ops.add(x, y), math_ops.add(x, y)]]
fn2 = lambda: [[y, y]]
# Pass strict=True flag as cond_v2 allows for tensors to be
# in nested output structures as singletons
r = control_flow_ops.cond(pred, fn1, fn2, strict=True)
test_result = self.evaluate(r)
self.assertListEqual([[210, 210]], test_result)
def testEmbeddedTupleOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: ((math_ops.add(x, y), math_ops.add(x, y)))
fn2 = lambda: ((y, y))
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertTupleEqual(((210, 210)), test_result)
def testEmbeddedDictOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: {"a": {"c": math_ops.add(x, y)},
"b": {"d": math_ops.add(x, y)}}
fn2 = lambda: {"a": {"c": y},
"b": {"d": y}}
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertDictEqual({"a": {"c": 210}, "b": {"d": 210}}, test_result)
@test_util.run_v1_only("b/120545219")
def testCheckNestedOutputStruct(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: {"a": math_ops.add(x, y), "b": math_ops.add(x, y)}
fn2 = lambda: {"c": y, "d": y}
v1_msg = "The two structures don't have the same nested structure"
v2_msg = ("true_fn and false_fn arguments to tf.cond must have the same "
"number, type, and overall structure of return values.")
with self.assertRaisesRegex(
TypeError if control_flow_util.ENABLE_CONTROL_FLOW_V2 else ValueError,
v2_msg if control_flow_util.ENABLE_CONTROL_FLOW_V2 else v1_msg):
control_flow_ops.cond(pred, fn1, fn2)
@test_util.run_deprecated_v1
def testCondRef(self):
with self.cached_session():
x = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="x",
container="",
shared_name="")
true_fn = lambda: x
false_fn = lambda: constant_op.constant([2.0])
r = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
self.assertAllEqual([2.0], self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testCondWithControl(self):
with self.cached_session() as sess:
control_holder = array_ops.placeholder(dtypes.float32, shape=())
a = constant_op.constant(3)
def true_branch():
with ops.control_dependencies([control_holder]):
_ = a + 1
return a + 2
r = control_flow_ops.cond(
constant_op.constant(True), true_branch,
lambda: constant_op.constant(1))
result = sess.run(r, feed_dict={control_holder: 5.})
self.assertEqual(5, result)
@test_util.run_v1_only("b/120545219")
def testUninitializedRefIdentity(self):
with self.cached_session() as sess:
v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="v",
container="",
shared_name="")
inited = state_ops.is_variable_initialized(v)
v_f, v_t = control_flow_ops.ref_switch(v, inited)
# Both v_f and v_t are uninitialized references. However, an actual use
# of the reference in the 'true' branch in the 'tf.identity' op will
# not 'fire' when v is uninitialized, so this is a valid construction.
# This test tests that ref_identity allows uninitialized ref as input
# so that this construction is allowed.
v_f_op = gen_array_ops.ref_identity(v_f)
v_t_op = gen_array_ops.ref_identity(v_t)
with ops.control_dependencies([v_f_op]):
assign_v = state_ops.assign(v, [1.0])
with ops.control_dependencies([v_t_op]):
orig_v = array_ops.identity(v)
merged_op = control_flow_ops.merge([assign_v, orig_v])
self.assertAllEqual([1.0], self.evaluate(merged_op.output))
def testCondSwitchIdentity(self):
# Make sure the recv identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(r)
def testCondRecvIdentity(self):
# Make sure the switch identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
with ops.device(test.gpu_device_name()):
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
with ops.device("/cpu:0"):
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(r)
@test_util.run_deprecated_v1
@test_util.enable_control_flow_v2
def testDisableLoweringSwitchMerge(self):
if test_util.is_gpu_available():
self.skipTest(
"Single threaded executor doesn't support partitioned graphs. "
"Skipping GPU test.")
# Make pred feedable to ensure we don't constant-fold it out.
run_opts = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata_no_lowering = config_pb2.RunMetadata()
run_metadata_with_lowering = config_pb2.RunMetadata()
config = opt_cfg(do_constant_folding=False)
pred = array_ops.placeholder_with_default(
constant_op.constant(True), shape=())
r = control_flow_ops.cond(pred, lambda: True, lambda: False)
with session.Session(config=config) as sess:
r_value = sess.run(
r, options=run_opts, run_metadata=run_metadata_with_lowering)
self.assertEqual(r_value, True)
# Use the single threaded executor, which disables control flow lowering.
config.experimental.executor_type = "SINGLE_THREADED_EXECUTOR"
with session.Session(config=config) as sess:
r_value = sess.run(
r, options=run_opts, run_metadata=run_metadata_no_lowering)
self.assertEqual(r_value, True)
self.assertTrue( # pylint: disable=g-complex-comprehension
any("switch" in ns.node_name
for dev_stat in run_metadata_with_lowering.step_stats.dev_stats
for ns in dev_stat.node_stats))
self.assertTrue( # pylint: disable=g-complex-comprehension
all("switch" not in ns.node_name
for dev_stat in run_metadata_no_lowering.step_stats.dev_stats
for ns in dev_stat.node_stats))
@test_util.run_v1_only("b/120545219")
def testCondGrad_1(self):
with self.cached_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
fn1 = lambda: array_ops.identity(x)
fn2 = lambda: array_ops.identity(x)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(1.0, self.evaluate(grad))
@test_util.run_deprecated_v1
@test_util.enable_control_flow_v2
def testCondComputeGradAfterSessRunFails(self):
with self.cached_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
def true_fn():
a = x * x
return a * a
def false_fn():
return x * x
r = control_flow_ops.cond(pred, true_fn, false_fn)
self.assertAllEqual(r, 10000.)
grad = gradients_impl.gradients(r, [x])[0]
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Connecting to invalid output 1 of source node cond which has 1 "
r"outputs. Try using "
"tf.compat.v1.experimental.output_all_intermediates\(True\)."):
self.evaluate(grad)
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testCondComputeGradAfterSessRun(self):
with self.cached_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
def true_fn():
a = x * x
return a * a
def false_fn():
return x * x
r = control_flow_ops.cond(pred, true_fn, false_fn)
self.assertAllEqual(r, 10000.)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(grad, 4000.)
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testNestedCondComputeGradAfterSessRun(self):
with self.cached_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
def true_fn():
def inner_true_fn():
a = x * x
return a * a
def inner_false_fn():
return x * x
return control_flow_ops.cond(
constant_op.constant(True), inner_true_fn, inner_false_fn)
def false_fn():
return x * x
r = control_flow_ops.cond(pred, true_fn, false_fn)
self.assertAllEqual(r, 10000.)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(grad, 4000.)
@test_util.run_deprecated_v1
def testCondGrad_2(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
x = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
fn1 = lambda: math_ops.multiply(x, 42.0)
fn2 = lambda: math_ops.multiply(x, 3.0)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
@test_util.disable_control_flow_v2(
"b/110550782 (gradient w.r.t external variable)")
@test_util.run_deprecated_v1
def testCondGrad_3(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
ox = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
def fn1(x):
m = x * x
return gradients_impl.gradients(m, [ox])[0]
fn2 = lambda: math_ops.multiply(ox, 3.0)
y = math_ops.multiply(7.0, ox)
r = control_flow_ops.cond(pred, lambda: fn1(y), fn2)
self.assertAllEqual(980.0, r.eval(feed_dict={c: 1}))
self.assertAllEqual(30.0, r.eval(feed_dict={c: 3}))
@test_util.run_deprecated_v1
def testCondGradMultiDevice(self):
config = config_pb2.ConfigProto(device_count={"CPU": 2},
allow_soft_placement=True)
with self.cached_session(use_gpu=True, config=config) as sess:
pred = array_ops.placeholder(dtypes.bool, [])
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
with ops.device("/cpu:0"):
z = control_flow_ops.cond(pred, lambda: x * y * 2.0, lambda: 2.0)
with ops.device("/cpu:1"):
grad = gradients_impl.gradients(z, x)[0]
with ops.device("/cpu:0"):
grad_grad = gradients_impl.gradients(grad, x)[0]
self.assertEqual(sess.run(grad, {pred: True, x: 1.0, y: 2.0}), 4.0)
self.assertEqual(sess.run(grad, {pred: False, x: 1.0, y: 2.0}), 0.0)
# v1 control flow gets None second derivative for some reason.
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertIsNone(grad_grad)
return
self.assertEqual(sess.run(grad_grad, {pred: True, x: 1.0, y: 2.0}), 0.0)
self.assertEqual(sess.run(grad_grad, {pred: False, x: 1.0, y: 2.0}), 0.0)
@test_util.run_v1_only("b/120545219")
def testNestedCond_Simple(self):
with self.cached_session():
x = constant_op.constant(0., name="X")
y = control_flow_ops.cond(
constant_op.constant(True), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(y, x)[0]
self.assertEqual(1.0, self.evaluate(result))
z = control_flow_ops.cond(
constant_op.constant(False), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(z, x)[0]
self.assertEqual(1.0, self.evaluate(result))
@test_util.run_v1_only("b/120545219")
def testCondGrad_Gather(self):
with self.cached_session() as sess:
v1 = variables.Variable([1.0, 42.0])
c = array_ops.placeholder(dtypes.int32, shape=[])
pred = math_ops.less(c, 2)
fn1 = lambda: array_ops.identity(v1)
fn2 = lambda: array_ops.gather(v1, [1, 1])
r = control_flow_ops.cond(pred, fn1, fn2)
# The following `grad` is a Tensor since it is the aggregation of an
# IndexedSlice and a Tensor. It is an `IndexedSlices` with control flow
# v2.
grad = gradients_impl.gradients(r, [v1])[0]
self.evaluate(variables.global_variables_initializer())
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertIsInstance(grad, ops.IndexedSlices)
grad_value = sess.run(grad, feed_dict={c: 1})
self.assertAllEqual(gradient_checker_v2._to_numpy(grad_value), [1.0, 1.0])
grad_value = sess.run(grad, feed_dict={c: 3})
self.assertAllEqual(gradient_checker_v2._to_numpy(grad_value), [0.0, 2.0])
@test_util.run_deprecated_v1
def testCondGrad_ResourceVarSparseRead(self):
# NOTE(skyewm): this test is interesting because the
# ResourceVariable.sparse_read gradient function returns IndexedSlices.
var = resource_variable_ops.ResourceVariable(
np.ones((4, 2), dtype=np.float32))
x = constant_op.constant(1.0)
r = control_flow_ops.cond(
constant_op.constant(True),
lambda: x * math_ops.reduce_sum(var.sparse_read([1, 2])),
lambda: constant_op.constant(np.zeros((2, 3)),
dtype=dtypes.float32))
grad = gradients_impl.gradients(r, var)[0]
self.evaluate(variables.global_variables_initializer())
grad_val = self.evaluate(grad)
self.assertIsInstance(grad_val, ops.IndexedSlicesValue)
self.assertAllEqual(gradient_checker_v2._to_numpy(grad_val), [[0., 0.],
[1., 1.],
[1., 1.],
[0., 0.]])
def testCondGrad_MultiGather(self):
# NOTE(skyewm): this test is interesting because the array_ops.gather and
# ResourceVariable.sparse_read gradient functions returns IndexedSlices.
var = resource_variable_ops.ResourceVariable(
np.ones((4, 2), dtype=np.float32))
x1 = constant_op.constant(np.ones((3, 3), dtype=np.float32))
x2 = constant_op.constant(2.0)
def true_fn():
y1 = var.sparse_read([1, 2])
y2 = array_ops.gather(x1, [2]) * x2
y3 = x2 * [1., 1., 1.]
return y1, y2, y3
def false_fn():
y1 = np.zeros((2, 2), dtype=np.float32)
y2 = array_ops.gather(x1, [2]) * x2
y3 = array_ops.gather(x1, [2])
return y1, y2, y3
@def_function.function
def foo():
r = control_flow_ops.cond(constant_op.constant(True), true_fn, false_fn)
return gradients_impl.gradients(r, [var, x1, x2])
grad = foo()
self.evaluate(variables.global_variables_initializer())
var_grad, x1_grad, x2_grad = self.evaluate(grad)
self.assertIsInstance(var_grad, ops.IndexedSlicesValue)
self.assertAllEqual(gradient_checker_v2._to_numpy(var_grad), [[0., 0.],
[1., 1.],
[1., 1.],
[0., 0]])
self.assertIsInstance(x1_grad, ops.IndexedSlicesValue)
self.assertAllEqual(gradient_checker_v2._to_numpy(x1_grad), [[0., 0., 0.],
[0., 0., 0.],
[2., 2., 2.]])
self.assertIsInstance(x1_grad, ops.IndexedSlicesValue)
self.assertEqual(gradient_checker_v2._to_numpy(x2_grad), 6.)
@test_util.run_v1_only("b/120545219")
def testCondPredicateTensor(self):
"""Regression test for lowering predicate from non-first output of an op."""
@eager_function.defun
def foo():
return constant_op.constant("foo"), constant_op.constant(True)
r = control_flow_ops.cond(foo()[1], lambda: 1.0, lambda: 2.0)
self.assertEqual(self.evaluate(r), 1.0)
@test_util.run_v1_only("Tests Session.run() pruning logic.")
def testCondFeedConstantPredicate(self):
with self.cached_session() as sess:
value = constant_op.constant(37.0)
predicate = constant_op.constant(True)
cond_output = control_flow_ops.cond(
predicate, lambda: constant_op.constant(0.0), lambda: value)
result = array_ops.identity(cond_output)
self.assertEqual(37.0, sess.run(result, feed_dict={predicate: False}))
self.assertEqual(0.0, sess.run(result, feed_dict={predicate: True}))
self.assertEqual(0.0, sess.run(result))
@test_util.run_v1_only("Tests Session.run() pruning logic.")
def testCondFeedPlaceholderWithDefaultPredicate(self):
with self.cached_session() as sess:
value = constant_op.constant(37.0)
predicate = array_ops.placeholder_with_default(
constant_op.constant(True), [])
cond_output = control_flow_ops.cond(
predicate, lambda: constant_op.constant(0.0), lambda: value)
result = array_ops.identity(cond_output)
self.assertAllEqual(37.0, sess.run(result, feed_dict={predicate: False}))
self.assertAllEqual(0.0, sess.run(result, feed_dict={predicate: True}))
self.assertAllEqual(0.0, sess.run(result))
@test_util.run_in_graph_and_eager_modes
def testCondAutoControlDeps(self):
if test_util.is_gpu_available():
self.skipTest("b/128676188 causes OOM on opensource gpu tests")
print_prefix = "testCondAutoControlDeps: "
def branch_fn():
enqueue_print_op("A")
enqueue_print_op("B")
with ops.control_dependencies([enqueue_print_op("C")]):
return constant_op.constant(10)
def build_cond():
return control_flow_ops.cond(
constant_op.constant(True), branch_fn, lambda: 0)
def build_nested_cond():
return control_flow_ops.cond(
constant_op.constant(True), build_cond, lambda: 0)
# In v1 graph mode, pruning should make only "C" print.
if not context.executing_eagerly():
with self.cached_session():
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_nested_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
# In defuns, all prints should execute in program order.
# This doesn't work with legacy control flow.
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
@eager_function.defun
def cond():
return build_cond()
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(cond()), 10)
self.assertEqual(["A", "B", "C"],
filter_test_messages(printed.contents()))
@eager_function.defun
def nested_cond():
return build_nested_cond()
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(nested_cond()), 10)
self.assertEqual(["A", "B", "C"],
filter_test_messages(printed.contents()))
# wrap_function should prune.
def pruned_cond():
return build_cond()
pruned_cond = wrap_function.wrap_function(pruned_cond, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
def pruned_nested_cond():
return build_nested_cond()
pruned_nested_cond = wrap_function.wrap_function(pruned_nested_cond, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_nested_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
@test_util.run_in_graph_and_eager_modes
def testWhileAutoControlDeps(self):
# Legacy while_loop fails this test because it produces deprecation notices
# in stderr.
if not control_flow_util.ENABLE_CONTROL_FLOW_V2: return
def cond(i, unused_x):
enqueue_print_op("A")
return i < 2
def body(i, x):
enqueue_print_op("B")
with ops.control_dependencies([enqueue_print_op("C")]):
x = array_ops.identity(x)
with ops.control_dependencies([enqueue_print_op("D")]):
return i + 1, x
def build_while():
return control_flow_ops.while_loop(
cond, body, [constant_op.constant(0), constant_op.constant(0)])
def build_nested_while():
return control_flow_ops.cond(
constant_op.constant(True), build_while, lambda: [0, 0])
# In v1 graph mode, pruning should make only "D" print.
if not context.executing_eagerly():
with self.cached_session():
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_while()[0]), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_nested_while()[0]), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
# In defuns, all prints should execute in program order.
@eager_function.defun
def while_loop():
return build_while()[0]
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(while_loop()), 2)
self.assertEqual(["A", "B", "C", "D", "A", "B", "C", "D", "A"],
filter_test_messages(printed.contents()))
@eager_function.defun
def nested_while_loop():
return build_nested_while()[0]
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(nested_while_loop()), 2)
self.assertEqual(["A", "B", "C", "D", "A", "B", "C", "D", "A"],
filter_test_messages(printed.contents()))
# wrap_function should prune.
def pruned_while():
return build_while()[0]
pruned_while = wrap_function.wrap_function(pruned_while, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_while()), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
def pruned_nested_while():
return build_nested_while()[0]
pruned_nested_while = wrap_function.wrap_function(pruned_nested_while, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_nested_while()), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
# Microbenchmark: 256,000 iterations/s.
def testWhile_1(self):
with self.cached_session():
n = constant_op.constant(0)
c = lambda x: math_ops.less(x, 10000)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileExternalControlDependencies(self):
with self.cached_session():
v = variables.Variable(0.0)
self.evaluate(v.initializer)
increment = v.assign_add(1.0).read_value()
def body_fn(i):
with ops.control_dependencies([increment]):
return i + 1
result = control_flow_ops.while_loop(cond=lambda i: i < 2,
body=body_fn, loop_vars=[1])
self.assertAllEqual(result, 2)
self.assertAllEqual(v.read_value(), 1.0)
@test_util.run_v1_only("b/120545219")
def testWhileExternalControlDependenciesNoInput(self):
with self.cached_session():
v = variables.Variable(0.0)
self.evaluate(v.initializer)
# TODO(apassos): figure out why the reading is necessary here.
increment = v.assign_add(1.0).read_value()
def body_fn(unused_i):
with ops.control_dependencies([increment]):
return constant_op.constant(5, name="five")
result = control_flow_ops.while_loop(cond=lambda i: i < 5,
body=body_fn, loop_vars=[0])
self.evaluate(result)
self.assertAllEqual(self.evaluate(v), 1.0)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileWithRefs_1(self):
with self.cached_session() as sess:
x = variables.VariableV1(0)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 100)
self.assertEqual(x.dtype, dtypes.int32_ref)
def b(i, x):
self.assertEqual(x.dtype, dtypes.int32_ref)
return (i + 1, gen_array_ops.ref_identity(x))
r = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=5)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.int32_ref)
value_i, value_x = self.evaluate(r)
self.assertEqual(100, value_i)
self.assertEqual(0, value_x)
def testWhile_2(self):
with self.cached_session():
s = constant_op.constant(0)
r = isum(s)
self.assertAllEqual(45, self.evaluate(r))
def testWhileWithMaximumIterations(self):
with self.cached_session():
s = constant_op.constant([1, 2, 3, 4, 5])
r = isum(s, maximum_iterations=3)
self.assertAllEqual([1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3], self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileWithMaximumIterationsAndSingleArgument(self):
with self.cached_session():
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [0], maximum_iterations=1)
self.assertEqual(1, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testXLAGradInLoop(self):
# We have an optimization that moves certain reduction ops, this test makes
# sure we don't do that for XLA ops.
# Use dynamic inputs, which triggers the creation of "BroadcastGradientArgs"
# and "Shape" op.
input1 = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
input2 = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
def cond(i1, i2):
return False
def body(i1, i2):
return math_ops.add(i1, i2), math_ops.add(i1, i2)
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
out1, _ = control_flow_ops.while_loop(
cond, body, (input1, input2), maximum_iterations=2)
g = gradients_impl.gradients(out1, [input1])
for op in out1.graph.get_operations():
# Test that the "Shape" is directly passed to BroadcastGradientArgs
# instead of being pushed to the stack.
if op.type == "BroadcastGradientArgs":
self.assertEqual(op.inputs[0].op.type, "Shape")
self.assertEqual(op.inputs[1].op.type, "Shape")
xla_context.Exit()
@test_util.disable_control_flow_v2("b/115776323 (max_iters)")
@test_util.run_v1_only("b/120545219")
def testSingleNestedMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0)
def training_loop_with_gradient(i):
out = control_flow_ops.while_loop(
lambda i_, _: i_ < 3,
lambda i_, j: [i_ + 1, j * v], [0, 1.0],
maximum_iterations=i)
g = gradients_impl.gradients(out, v)
with ops.control_dependencies(g):
return i + 1
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
# Create training loop, ensure we can call gradient() of
# while_loop inside the training loop.
loop = control_flow_ops.while_loop(lambda i: i < 3,
training_loop_with_gradient, [0])
xla_context.Exit()
loop_execute = array_ops.identity(loop) # Because loop is not fetchable.
# Should execute without issue.
self.assertEqual(3, self.evaluate(loop_execute))
@test_util.run_v1_only("b/120545219")
def testInvalidMaximumIterationsWhileLoopGradientInXLAContext(self):
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.skipTest("WhileV2 does lazy evaluation of maximum_iterations")
v = constant_op.constant(1.0)
def inner_body(i, x):
out = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, j: [i + 1, j * v], [0, x],
maximum_iterations=i)
return out
def create_while_loop(maximum_iterations=None):
return control_flow_ops.while_loop(
lambda i, _: i < 3,
inner_body, [0, 1.0],
maximum_iterations=maximum_iterations)
loop_no_xla = create_while_loop(maximum_iterations=5)
# maximum_iterations is fine outside of an XLA scope
gs = gradients_impl.gradients(loop_no_xla, v)
self.evaluate(gs) # This should execute without error.
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop_no_maxiter = create_while_loop()
loop_with_maxiter = create_while_loop(maximum_iterations=2)
xla_context.Exit()
with self.assertRaisesRegex(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside "
r"XLA while_loop because maximum_iterations was not passed to "
r"the tf.while_loop call \('.+'\)."):
_ = gradients_impl.gradients(loop_no_maxiter, v)
with self.assertRaisesRegex(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.+' for while_loop context "
r"'.+' must be statically known \(e.g. a constant value or known "
r"shape dimension\), or be defined at or outside the while loop "
r"context '.*' \(currently defined in '.*'\)"):
_ = gradients_impl.gradients(loop_with_maxiter, v)
@test_util.run_v1_only("b/120545219")
def testInvalidMaximumIterationsFromSiblingContextWhileLoopInXLAContext(self):
v = constant_op.constant(1.0)
def create_while_loop():
max_iter_holder = []
def create_mi():
max_iter_holder.append(array_ops.placeholder(dtypes.int32, shape=()))
return 1.0
_ = control_flow_ops.cond(
constant_op.constant(True), create_mi, create_mi)
return control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, v * x), (0, 1.0),
maximum_iterations=max_iter_holder[0])
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
with self.assertRaisesRegex(ValueError, r"must be from the same graph.*"):
loop = create_while_loop()
xla_context.Exit()
else:
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop = create_while_loop()
xla_context.Exit()
with self.assertRaisesRegex(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.*Placeholder:0' for "
r"while_loop context '.+' must be statically known \(e.g. a constant "
r"value or known shape dimension\), or be defined at or outside the "
r"while loop context '' \(currently defined in 'cond/.+'\)"):
_ = gradients_impl.gradients(loop, v)
@test_util.run_v1_only("b/120545219")
def testNestedWhileLoopWithMaxItersFromOuterContextInXLAContext(self):
if test_util.is_gpu_available():
self.skipTest("b/128646372, b/128645947 fails in opensource build")
v = constant_op.constant(1.0)
p = array_ops.placeholder(dtype=dtypes.int32)
def mid_body_builder(iterations):
def mid_body(i, x):
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, v * x), (0, x),
maximum_iterations=iterations,
name="inner")
return (i + 1, gradients_impl.gradients(x + r[1], v)[0])
return mid_body
def outer_body(i, x):
iterations = array_ops.size(p, name="iterations")
return (i + 1, x + control_flow_ops.while_loop(
lambda *_: True,
mid_body_builder(iterations), (0, x),
maximum_iterations=iterations,
name="mid")[1])
def create_while_loop():
with ops.device("/cpu:0"):
r = control_flow_ops.while_loop(
lambda *_: True,
outer_body, (0, 1.0),
maximum_iterations=5,
name="outer")
return array_ops.identity(r[1])
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
final_with_xla_context = create_while_loop()
xla_context.Exit()
final_without_xla_context = create_while_loop()
with self.session(use_gpu=False) as sess:
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata_without_xla_context = config_pb2.RunMetadata()
run_metadata = config_pb2.RunMetadata()
final_value_without_xla_context = sess.run(
final_without_xla_context,
feed_dict={p: [0, 0, 0]},
options=opts,
run_metadata=run_metadata_without_xla_context)
final_value_with_xla_context = sess.run(
final_with_xla_context,
feed_dict={p: [0, 0, 0]},
options=opts,
run_metadata=run_metadata)
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
# With while_v2 on xla, run_metadata only contains the unlowered While
# op so node_stats does not have statistics for the pushes. So as a
# loose check we check the pushes in the lowered version.
for dev in run_metadata_without_xla_context.step_stats.dev_stats:
if "/device:CPU" in dev.device:
node_stats = dev.node_stats
stack_push_count = len([
x for x in node_stats
if re.match(r".*TensorListPushBack_?\d*", x.node_name)
])
else:
for dev in run_metadata.step_stats.dev_stats:
if "/device:CPU" in dev.device:
node_stats = dev.node_stats
stack_push_op = "StackPushV2"
stack_push_count = len(
[x for x in node_stats if x.node_name.endswith("StackPushV2")])
# Pushes to the stack = product of maximum_iterations values;
# the last two "3"s comes from size(p), when p == [0, 0, 0].
self.assertEqual(stack_push_count, 5 * 3 * 3, str(node_stats))
self.assertAllClose(final_value_with_xla_context,
final_value_without_xla_context)
# Have more than 10 parallel iterations and hence exercise k-bound
# most of the time.
@test_util.run_deprecated_v1
def testWhile_3(self):
with self.cached_session():
def compute(i, m, c, o):
m, c = [math_ops.add(m, 1), math_ops.add(c, 1)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
d = ops.convert_to_tensor(100)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, d),
compute, [i, m, c, o])
result = r[3]
self.assertAllEqual(10100, result)
@test_util.run_deprecated_v1
def testWhile_4(self):
with self.cached_session():
def compute(i, m, c, o):
m, c = [array_ops.gather(x, i), array_ops.gather(x, i)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, s),
compute, [i, m, c, o])
result = r[3]
self.assertAllEqual(42, result)
@test_util.run_v1_only("b/120545219")
def testWhile_5(self):
with self.cached_session():
def compute(i, c, o):
c = array_ops.strided_slice(x, array_ops.expand_dims(i, 0),
[1] + array_ops.expand_dims(i, 0))
o = array_ops.concat([o, c], 0)
i = math_ops.add(i, 1)
return [i, c, o]
i = ops.convert_to_tensor(0)
c = ops.convert_to_tensor([0])
o = ops.convert_to_tensor([0])
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, c, o: math_ops.less(i, s),
compute, [i, c, o], [
i.get_shape(),
tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()
])
result = r[2]
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testWhile_Device(self):
# Body function defined outside of device scope
def body(x):
return math_ops.exp(x)
with ops.device("CPU:0"):
r = control_flow_ops.while_loop(
lambda x: x < 10, body, [constant_op.constant(-10.)])
self.assertIn("cpu", r.device.lower())
with session.Session() as sess:
options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(r, options=options, run_metadata=run_metadata)
# We expect that everything runs on CPU, even if GPU is available.
self.assertEqual(len(run_metadata.partition_graphs), 1)
@test_util.disable_control_flow_v2("b/116338794 (buffer_reuse)")
@test_util.run_v1_only("b/120545219")
def testBufferForwarding(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with self.cached_session() as sess:
with ops.device("/cpu:0"):
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
r_val = sess.run(r, options=run_options, run_metadata=run_metadata)
self.assertEqual(1000, r_val)
self.assertTrue(run_metadata.HasField("step_stats"))
unique_allocs = set()
for node_stat in run_metadata.step_stats.dev_stats[0].node_stats:
for output in node_stat.output:
unique_allocs.add(
output.tensor_description.allocation_description.ptr)
# Prior to cl/147536680, the number of unique allocations was about 1005.
self.assertLess(len(unique_allocs), 756)
def _testWhile_Gpu_1(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
b = lambda x: math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, self.evaluate(r))
def testWhile_Gpu_1(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def _testWhile_Gpu_2(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
return math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, self.evaluate(r))
def testWhile_Gpu_2(self):
self._testWhile_Gpu_2(use_gpu=False)
self._testWhile_Gpu_2(use_gpu=True)
def testWhileShape(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def _b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.tile(j, [2, 2])
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, _b, [i, m],
[i.get_shape(), tensor_shape.unknown_shape()])
r = r[1] * array_ops.ones([8, 8])
self.assertAllEqual(np.ones((8, 8)), self.evaluate(r))
@test_util.disable_control_flow_v2("b/131265085")
@test_util.run_v1_only("b/131265085")
def testWhileBadShape(self):
x = constant_op.constant([2.0, 4.0], name="values")
i = constant_op.constant(0)
c = lambda i, _: math_ops.less(i, 10)
b = lambda i, x: [i + 1, x + 1]
with self.assertRaisesRegex(ValueError, "is not compatible with"):
# Shape of x is [2], but we specify a shape of [5].
control_flow_ops.while_loop(
c, b, [i, x], [i.shape, tensor_shape.TensorShape([5])])
@test_util.run_in_graph_and_eager_modes
def testWhileBadBodyReturn(self):
x = constant_op.constant([2.0, 4.0], name="values")
i = constant_op.constant(0)
c = lambda i, *x: math_ops.less(i, 10)
# body accepts N values and returns N+1 values.
b = lambda i, *x: (i, i) + x
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
control_flow_ops.while_loop(c, b, [i, x])
@test_util.run_deprecated_v1
def testWhileWithNonTensorInput_Scalar(self):
with self.cached_session():
n = 0
c = lambda x: x < 10000
b = lambda x: x + 1
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, self.evaluate(r))
def testWhileWithNonTensorInput_Vector(self):
with self.cached_session():
n = np.array([0]) # Note, [0] would not work here; that is a list
c = lambda x: x[0] < 10000
b = lambda x: array_ops.stack([x[0] + 1])
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual([10000], self.evaluate(r))
def testWhileShapeInference(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.concat([j, j], 0)
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, b, [i, m],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertTrue(r[1].shape.is_compatible_with([8, 2]))
@test_util.run_v1_only("b/120545219")
def testWhileShapeInferenceBadShape(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
b = lambda i, j: [i + 1, array_ops.concat([j, j], 0)]
with self.assertRaisesRegex(
ValueError,
r"Input tensor 'ones:0' enters the loop with shape \(2, 2\), but has "
r"shape \(4, 2\) after one iteration. To allow the shape to vary "
r"across iterations, use the `shape_invariants` argument of "
r"tf.while_loop to specify a less-specific shape."):
control_flow_ops.while_loop(c, b, [i, m])
def testWhileShapeInferenceSparseTensor(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b1(i, x): # modifies values. (shape of components is not changed.)
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
def b2(i, x): # adds new values. (shape of components is changed.)
return [
i + 1,
sparse_ops.sparse_add(
x,
sparse_tensor.SparseTensor(
indices=math_ops.cast(
array_ops.fill([1, 1], i), dtypes.int64),
values=array_ops.fill([1], 1.0),
dense_shape=x.dense_shape))
]
def b3(i, x): # modifies rank. (shape of all components is changed.)
return [
i + 1,
sparse_tensor.SparseTensor(
array_ops.concat([x.indices, [[i], [i]]], axis=1), x.values * 2.0,
array_ops.concat([x.dense_shape, [10]], axis=0))
]
def check_shapes(r, indices, values, dense_shape):
self.assertTrue(r.indices.shape.is_compatible_with(indices))
self.assertTrue(r.values.shape.is_compatible_with(values))
self.assertTrue(r.dense_shape.shape.is_compatible_with(dense_shape))
# Default shape invariant; b1 only modifies values.
_, r = control_flow_ops.while_loop(c, b1, [i, x])
check_shapes(r, indices=[None, 1], values=[None], dense_shape=[1])
# Default shape invariant; b2 adds new values
_, r = control_flow_ops.while_loop(c, b2, [i, x])
check_shapes(r, indices=[None, 1], values=[None], dense_shape=[1])
# Explicit shape invariant, allowing any rank; b1 only modifies values.
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None])])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
# Explicit shape invariant, allowing any rank; b3 modifies rank.
_, r = control_flow_ops.while_loop(
c, b3, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None])])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
# Shape invariant with ndims=None. Technically, this isn't supported
# according to the docs, but we support it for backwards compatibility.
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape(None)])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
_, r = control_flow_ops.while_loop(
c, b3, [i, x],
[i.get_shape(), tensor_shape.TensorShape(None)])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
@test_util.disable_control_flow_v2("b/131265085")
@test_util.run_v1_only("b/131265085")
def testWhileBadShapeSparseTensor(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
c = lambda i, _: i < 10
b1 = lambda i, x: [i+1, x]
def b2(i, x): # modifies rank. (shape of all components is changed.)
return [
i + 1,
sparse_tensor.SparseTensor(
array_ops.concat([x.indices, [[i], [i]]], axis=1), x.values * 2.0,
array_ops.concat([x.dense_shape, [10]], axis=0))
]
# Explicit shape invariant, with a specific (incompatible) rank.
with self.assertRaisesRegex(ValueError, "is not compatible with"):
control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape([5])])
# Default shape invariant, but b2 modifies rank (which is not allowed).
with self.assertRaises(ValueError):
control_flow_ops.while_loop(c, b2, [i, x])
def testWhileShapeInferenceIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10, 2], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0], 2)
self.assertEqual(r.values.get_shape(), tensor_shape.TensorShape([2, 2]))
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertEqual(r.dense_shape.get_shape()[0], 2)
self.assertTrue(r.values.get_shape().is_compatible_with([None, 2]))
@test_util.disable_control_flow_v2("b/131265085")
@test_util.run_v1_only("b/131265085")
def testWhileBadShapeIndexedSlices(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
c = lambda i, _: 10
b = lambda i, x: [i+1, x]
# Explicit shape invariant, with a specific (incompatible) rank.
with self.assertRaisesRegex(ValueError, "is not compatible with"):
control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([5])])
def testWhileShapeInferenceRaggedTensor(self):
i = constant_op.constant(0)
x = ragged_factory_ops.constant([[1, 2], [3], [4, 5, 6]])
c = lambda i, _: i < 10
def b1(i, x): # Adds new values to rows (but doesn't create new rows)
return [
i + 1,
array_ops.concat([x, x], axis=1)
]
def b2(i, x): # Adds new rows.
return [
i + 1,
array_ops.concat([x, x], axis=0)
]
def check_shapes(r, values, splits):
self.assertTrue(r.values.shape.is_compatible_with(values))
self.assertTrue(r.row_splits.shape.is_compatible_with(splits))
# Default shape invariant; b1 adds new values to rows.
_, r = control_flow_ops.while_loop(c, b1, [i, x])
check_shapes(r, values=[None], splits=[4])
# Default shape invariant; b2 adds new rows (not allowed).
if not context.executing_eagerly():
with self.assertRaises(ValueError):
_, r = control_flow_ops.while_loop(c, b2, [i, x])
# Explicit shape invariant; b1 adds new values to rows.
# (deprecated: use TensorShape instead of RaggedTensorSpec)
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, None])])
check_shapes(r, values=[None], splits=[None])
# Explicit shape invariant; b1 adds new values to rows.
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), ragged_tensor.RaggedTensorSpec([None, None],
dtypes.int32)])
check_shapes(r, values=[None], splits=[None])
# Explicit shape invariant; b2 adds new rows.
_, r = control_flow_ops.while_loop(
c, b2, [i, x],
[i.get_shape(), ragged_tensor.RaggedTensorSpec([None, None],
dtypes.int32)])
check_shapes(r, values=[None], splits=[None])
def testWhileShapeInferenceRaggedTensorRaggedRank2(self):
i = constant_op.constant(0)
x = ragged_factory_ops.constant([[[1, 2], [3], [4, 5, 6]],
[[], [8, 9, 10]]])
c = lambda i, _: i < 10
def b(i, x):
return [
i + 1,
array_ops.concat([x, x[..., i:i+1]], axis=-1)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.row_splits.shape.as_list(), [3])
self.assertTrue(r.values.row_splits.shape.as_list() in ([6], [None]))
self.assertTrue(r.values.values.shape.as_list() in ([49], [None]))
def testWhileShapeInvariantTensorSpec(self):
i = constant_op.constant(0)
x = constant_op.constant([1])
c = lambda i, _: i < 10
b = lambda i, x: (i + 1, array_ops.stack([x, x]))
shape_invariants = [
tensor_spec.TensorSpec([], dtype=dtypes.int32),
tensor_spec.TensorSpec(None, dtype=dtypes.int32)]
control_flow_ops.while_loop(c, b, [i, x], shape_invariants)
# TODO(b/131265085) Remove this decorator when bug is fixed.
@test_util.build_as_function_and_v1_graph
def testWhileShapeInvariantWrongTypeSpecType(self):
c = lambda i, _: i < 10
b = lambda i, x: (i + 1, x)
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor([[0]], [1.0], [10])
shape_invariants = [
tensor_spec.TensorSpec([], dtype=dtypes.int32),
sparse_tensor.SparseTensorSpec([None])]
control_flow_ops.while_loop(c, b, [i, x], shape_invariants)
x2 = constant_op.constant([1])
with self.assertRaises(TypeError):
control_flow_ops.while_loop(c, b, [i, x2], shape_invariants)
x3 = ragged_factory_ops.constant([[1, 2], [3]])
with self.assertRaises(TypeError):
control_flow_ops.while_loop(c, b, [i, x3], shape_invariants)
i2 = constant_op.constant(0.0)
with self.assertRaises(TypeError):
control_flow_ops.while_loop(c, b, [i2, x], shape_invariants)
# TODO(b/131265085) Remove this decorator when bug is fixed.
@test_util.build_as_function_and_v1_graph
def testWhileShapeInvariantBadType(self):
i = constant_op.constant(0)
x = constant_op.constant([1])
c = lambda i, _: i < 10
b = lambda i, x: (i + 1, x)
with self.assertRaises((ValueError, TypeError)):
control_flow_ops.while_loop(c, b, [i, x], ["foo", "bar"])
def _testNestedWhile_1(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(0)
def cpu_sum(s):
c = lambda i, s: math_ops.less(i, 10)
def b(i, s):
i1 = math_ops.add(i, 1)
with ops.device("/cpu:0"):
s1 = math_ops.add(i, s)
return i1, s1
_, r_s = control_flow_ops.while_loop(c, b, [n, s])
return r_s
c = lambda x: math_ops.less(x, 200)
b = lambda x: math_ops.add(x, cpu_sum(n))
r = control_flow_ops.while_loop(c, b, [n])
self.assertEqual(225, self.evaluate(r))
def testNestedWhile_1(self):
self._testNestedWhile_1(use_gpu=False)
self._testNestedWhile_1(use_gpu=True)
def _testNestedWhile_2(self, use_gpu):
# Test the cases that A -> Enter and Exit -> A are partitioned.
with self.cached_session(use_gpu=use_gpu):
s0 = constant_op.constant(2.0)
def inner_loop(s):
c = lambda s: math_ops.less(s, 20.0)
def b(s):
s1 = math_ops.add(s, s)
return s1
r_s = control_flow_ops.while_loop(c, b, [s], parallel_iterations=1)
return r_s
outer_c = lambda x: math_ops.less(x, 3000.0)
def outer_b(x):
x = logging_ops.Print(x, [x]) # Edge "Print -> Enter" is partitioned
x = inner_loop(x)
with ops.device("/cpu:0"):
x = math_ops.square(x) # Edge "Exit -> Square" is partitioned
return x
r = control_flow_ops.while_loop(
outer_c, outer_b, [s0], parallel_iterations=1)
self.assertEqual(1048576.0, self.evaluate(r))
def testNestedWhile_2(self):
self._testNestedWhile_2(use_gpu=False)
self._testNestedWhile_2(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_1(self):
with self.cached_session():
n = constant_op.constant(0)
r = constant_op.constant(0)
condition = lambda n_, r_: math_ops.less(n_, 10)
def body(n_, r_):
n_ = math_ops.add(n_, 1)
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [n_, r_]
res = control_flow_ops.while_loop(
condition, body, [n, r], parallel_iterations=1)
self.assertAllEqual(12, res[1])
@test_util.run_deprecated_v1
def testWhileWithControl_2(self):
with self.cached_session():
r = constant_op.constant(0)
condition = lambda r_: math_ops.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [r_]
res = control_flow_ops.while_loop(
condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, self.evaluate(res))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_3(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(lambda x: x < 10, lambda x: x + c, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_4(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(
lambda x: x < 10, lambda x: x + array_ops.identity(c), [x0])
self.assertEqual(10, sess.run(r, {b: True}))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_5(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
def body(x):
with ops.control_dependencies([b]):
return x + c
r = control_flow_ops.while_loop(lambda x: x < 10, body, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileCondWithControl(self):
# Ensure that no control edges by an outer control dependency context are
# added to nodes inside cond/while contexts.
with self.cached_session() as sess:
const_true = lambda: constant_op.constant(True)
const_false = lambda: constant_op.constant(False)
cond = lambda i: control_flow_ops.cond(i > 0, const_true, const_false)
body = lambda i: control_flow_ops.cond(i > 0, lambda: i - 1, lambda: i)
with ops.control_dependencies([control_flow_ops.no_op()]):
loop = control_flow_ops.while_loop(cond, body,
(constant_op.constant(5),))
self.assertEqual(0, self.evaluate(loop))
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testWhileCondWithControl_1(self):
with self.cached_session():
v = variable_scope.get_variable(
"v", [], initializer=init_ops.constant_initializer(2))
i0 = constant_op.constant(0)
with ops.control_dependencies([i0]):
def loop_condition(i):
return i < 4
def loop_body(i):
some_cond = control_flow_ops.cond(
constant_op.constant(True),
lambda: state_ops.assign(v, math_ops.square(v)), lambda: v)
with ops.control_dependencies([some_cond]):
return i + 1
r = control_flow_ops.while_loop(loop_condition, loop_body, (i0,))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(4, self.evaluate(r))
self.assertAllClose(65536.0, self.evaluate(v))
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testWhileCondExitControl(self):
with self.cached_session():
v = variables.Variable(1)
def false_branch():
cond = lambda i: i < 100
def body(i):
x = state_ops.assign(v, i)
return x + 1
loop = control_flow_ops.while_loop(cond, body, [0])
# Make sure to handle correctly control edge from Exit to a node.
with ops.control_dependencies([loop]):
return constant_op.constant(6.0)
r = control_flow_ops.cond(
constant_op.constant(False), lambda: constant_op.constant(1.0),
false_branch)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(6.0, self.evaluate(r))
self.assertEqual(99, self.evaluate(v))
def testCondWhile_1(self):
with self.cached_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(0, 1), lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: n)
self.assertAllEqual(10, self.evaluate(r))
def testCondWhile_2(self):
with self.cached_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(n, 1),
lambda: control_flow_ops.while_loop(c, b, [n]))
self.assertAllEqual(10, self.evaluate(r))
def _testCondWhile_3(self, use_gpu):
with self.cached_session(use_gpu=use_gpu) as sess:
p = array_ops.placeholder(dtypes.bool)
n = constant_op.constant(0.0)
def c(x):
return math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
x1 = math_ops.add(x, 1.0)
return x1
r = control_flow_ops.cond(p,
lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: math_ops.multiply(n, 2.0))
r1 = gradients_impl.gradients(r, [n])
self.assertEqual(10., sess.run(r, {p: True}))
self.assertEqual([1.0], sess.run(r1, {p: True}))
self.assertEqual(0.0, sess.run(r, {p: False}))
self.assertEqual([2.0], sess.run(r1, {p: False}))
@test_util.run_deprecated_v1
def testCondWhile_3(self):
self._testCondWhile_3(use_gpu=False)
self._testCondWhile_3(use_gpu=True)
def testWhileCond_1(self):
with self.cached_session():
i = ops.convert_to_tensor(0, name="i")
n = ops.convert_to_tensor(10, name="n")
one = ops.convert_to_tensor(1, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.add(x, one), lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [i])
self.assertAllEqual(10, self.evaluate(r))
def testWhileCond_2(self):
with self.cached_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: control_flow_ops.cond(constant_op.constant(True), lambda: math_ops.add(x, 1), lambda: n)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, self.evaluate(r))
def testWhileCond_3(self):
with self.cached_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(math_ops.less(0, 1),
lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, self.evaluate(r))
@test_util.run_deprecated_v1
def testWhileCondGradMultiDevice(self):
config = config_pb2.ConfigProto(device_count={"CPU": 2},
allow_soft_placement=True)
with self.cached_session(use_gpu=True, config=config) as sess:
pred = array_ops.placeholder(dtypes.bool, [])
x_init = constant_op.constant(1.0)
with ops.device("/cpu:0"):
z = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, control_flow_ops.cond(
pred, lambda: x * 2.0, lambda: 10.0)),
[0, x_init])
with ops.device("/cpu:1"):
grad = gradients_impl.gradients(z, x_init)[0]
with ops.device("/cpu:0"):
grad_grad = gradients_impl.gradients(grad, x_init)[0]
self.assertEqual(sess.run(grad, {pred: True}), 8.0)
self.assertEqual(sess.run(grad, {pred: False}), 0.0)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
return
self.assertEqual(sess.run(grad_grad, {pred: True}), 0.0)
self.assertEqual(sess.run(grad_grad, {pred: False}), 0.0)
# NOTE: It is ok to have parallel_iterations > 1
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_deprecated_v1
def testWhileUpdateVariable_1(self):
with self.cached_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3, self.evaluate(r))
result = self.evaluate(select)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_2(self):
with self.cached_session():
select1 = variables.Variable([3.0, 4.0, 5.0])
select2 = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns1 = state_ops.scatter_update(select1, j, 10.0)
ns2 = state_ops.scatter_update(select2, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns1, ns2)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3, self.evaluate(r))
result1 = self.evaluate(select1)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
result2 = self.evaluate(select2)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_3(self):
with self.cached_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j, _):
return math_ops.less(j, 3)
def loop_body(j, _):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
return [nj, ns]
r = control_flow_ops.while_loop(
loop_iterator,
loop_body, [n, array_ops.identity(select)],
parallel_iterations=1)
self.evaluate(variables.global_variables_initializer())
result = r[1]
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_4(self):
with self.cached_session():
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
self.evaluate(variables.global_variables_initializer())
c = constant_op.constant(0, name="c")
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn2 = state_ops.assign_add(var_b, asn1, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1)
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(10, self.evaluate(var_b))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_5(self):
with self.cached_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
self.evaluate(variables.global_variables_initializer())
# Change condition to check var_b
def pred(_):
return math_ops.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
asn1 = state_ops.assign_add(
var_a, constant_op.constant(1), name="a_add")
asn2 = state_ops.assign_add(
var_b, constant_op.constant(1), name="b_add")
with ops.control_dependencies([asn1, asn2]):
inc_b = array_ops.identity(var_b)
return inc_b
lpa = control_flow_ops.while_loop(
pred, loop_body, [var_b], parallel_iterations=1, name="loop")
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(10, self.evaluate(var_a))
self.assertEqual(10, self.evaluate(var_b))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_6(self):
with self.cached_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
c = constant_op.constant(0)
self.evaluate(variables.global_variables_initializer())
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
with ops.control_dependencies([asn1]):
asn2 = state_ops.assign_add(var_b, var_a, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1, name="loop")
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(55, self.evaluate(var_b))
self.assertEqual(10, self.evaluate(var_a))
@test_util.run_v1_only("b/120545219")
def testWhileQueue_1(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(-1, dtypes.int32)
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
self.assertEqual([10], self.evaluate(r))
for i in xrange(10):
self.assertEqual([i], self.evaluate(q.dequeue()))
@test_util.run_v1_only("b/120545219")
def testWhileTimeOut(self):
run_options = config_pb2.RunOptions(timeout_in_ms=1)
with self.cached_session() as sess:
n = constant_op.constant(0)
c = lambda x: True
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n])
with self.assertRaises(errors_impl.DeadlineExceededError):
sess.run(r, options=run_options)
@test_util.disable_control_flow_v2("b/117119329 (stack)")
@test_util.run_v1_only("b/120545219")
def testWhileStack_1(self):
with self.cached_session():
s = gen_data_flow_ops.stack_v2(-1, dtypes.int32, stack_name="foo")
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies(
[gen_data_flow_ops.stack_push_v2(s, i)], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
x = constant_op.constant(0)
def c1(i, _):
return math_ops.greater(i, 0)
def b1(i, x):
ni = math_ops.subtract(i, 1)
nx = x + gen_data_flow_ops.stack_pop_v2(s, dtypes.int32)
return [ni, nx]
_, rx = control_flow_ops.while_loop(
c1,
b1, [r, x],
[r.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
self.assertEqual(45, self.evaluate(rx))
def _testWhileGrad_ColocateGradients(self, colocate):
gpu_dev_name = test.gpu_device_name() if test.is_gpu_available(
) else "/device:CPU:0"
graph = ops.Graph()
with graph.as_default():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
def b(x):
with ops.device(gpu_dev_name):
return math_ops.square(x)
loop = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(
loop, v, colocate_gradients_with_ops=colocate)[0]
r_ops = graph.get_operations()
r_devices = [(op.name, op.device) for op in r_ops]
self.assertTrue(any("Square" in op.name for op in r_ops))
for (name, dev) in r_devices:
if not colocate and name.endswith("Square"):
# Only forward graph contain gpu in Square device
self.assertTrue(gpu_dev_name in dev)
elif colocate and "Square" in name:
# Forward and backward graphs contain gpu in Square/Square_grad devices
self.assertTrue(gpu_dev_name in dev)
else:
self.assertFalse(gpu_dev_name in dev)
with self.session(graph=graph) as sess:
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.disable_control_flow_v2("b/116351701 (colocation)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ColocateGradients(self):
self._testWhileGrad_ColocateGradients(colocate=False)
self._testWhileGrad_ColocateGradients(colocate=True)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Square(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = control_flow_ops.cond(math_ops.less(1, 2), lambda: r, lambda: v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Shape(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=[None])
v = constant_op.constant([2.0], name="v")
n = constant_op.constant(0, name="n")
c = lambda i, v: math_ops.less(i, 5)
b = lambda i, v: [i + 1, math_ops.multiply(x, v)]
r = control_flow_ops.while_loop(
c,
b, [n, v],
[n.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
r = gradients_impl.gradients(r[1], x)[0]
self.assertEqual([None], r.get_shape().as_list())
self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))
@test_util.run_deprecated_v1
def testWhileGrad_BaseShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32, [None])
v0 = constant_op.constant([2.0, 2.0], name="v")
c = lambda v: constant_op.constant(False)
b = lambda v: math_ops.multiply(v, x)
r = control_flow_ops.while_loop(c, b, [v0])
y = math_ops.square(x)
r = gradients_impl.gradients([r, y], x)[0]
self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]}))
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testWhileGradAfterSessionRun(self):
v0 = constant_op.constant(2.)
r = control_flow_ops.while_loop(
lambda _: True, lambda v: v * v, [v0], maximum_iterations=3)
self.assertAllEqual(r, 256.)
grad = gradients_impl.gradients(r, v0)[0]
self.assertAllClose(grad, 1024.)
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testNestedWhileGradAfterSessionRun(self):
v0 = constant_op.constant(2.)
def body(v):
inner_v0 = constant_op.constant(1.)
return control_flow_ops.while_loop(
lambda _: True, lambda x: x * v, [inner_v0], maximum_iterations=2)
r = control_flow_ops.while_loop(
lambda _: True, body, [v0], maximum_iterations=3)
self.assertAllEqual(r, 256.)
grad = gradients_impl.gradients(r, v0)[0]
self.assertAllClose(grad, 1024.)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_MultipleUses(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.multiply(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertEqual(524288.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_LoopAdd(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.add(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(2048.0, self.evaluate(r))
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.cached_session(use_gpu=use_gpu) as sess:
a = constant_op.constant(3.0, name="a")
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=p_iters)
grad_a, grad_v = gradients_impl.gradients(r, [a, v])
grad_a_val, grad_v_val = self.evaluate([grad_a, grad_v])
self.assertAllClose(216.0, grad_a_val)
self.assertAllClose(81.0, grad_v_val)
@test_util.run_deprecated_v1
def testWhileGrad_Mul(self):
self._testWhileGrad_Mul(use_gpu=False, p_iters=1)
self._testWhileGrad_Mul(use_gpu=False, p_iters=10)
self._testWhileGrad_Mul(use_gpu=True, p_iters=1)
self._testWhileGrad_Mul(use_gpu=True, p_iters=10)
def testWhileGradInControlDeps(self):
@def_function.function
def f():
x_init = constant_op.constant(2.)
loop_cond = lambda i, x: math_ops.less(i, 2)
loop_body = lambda i, x: [i + 1, x**2]
_, x = control_flow_ops.while_loop(loop_cond, loop_body, [0, x_init])
with ops.control_dependencies([x]):
(grad,) = gradients_impl.gradients(x, x_init)
return grad
self.assertAllEqual(f(), 4. * 2.**3) # 4 * x_init ^ 3
@test_util.run_deprecated_v1
def testTfFunctionInV1WhileLoop(self):
# This test specifically tests that creating a Const node inside a
# tf.function inside a v1 while_loop while inlining is turned on works.
config = opt_cfg()
assert config.graph_options.optimizer_options.do_function_inlining
with session.Session(config=config):
@def_function.function
def loop_body(i):
# Here we create the const.
return i + 1.
loop_cond = lambda i: True
x = control_flow_ops.while_loop(
loop_cond, loop_body, [0.], maximum_iterations=5)
self.assertAllEqual(x, 5.)
def _testNestedWhileCondWhileGrad(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
def b(x):
return control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.square(inner_loop(x)[1]),
lambda: math_ops.multiply(x, 2.0))
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileCondWhileGrad(self):
self._testNestedWhileCondWhileGrad(use_gpu=False)
@test_util.run_deprecated_v1
def testNestedWhileCondWhileGradGpu(self):
self._testNestedWhileCondWhileGrad(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Variable(self):
with self.cached_session():
a = variables.Variable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(r, a)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(216.0, r[0])
@test_util.run_deprecated_v1
def testWhileGrad_ResourceVariable(self):
with self.cached_session():
a = resource_variable_ops.ResourceVariable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
g = gradients_impl.gradients(r, a)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(216.0, g[0])
def testWhileGrad_EagerResourceVariable(self):
with context.eager_mode():
a = resource_variable_ops.ResourceVariable(
np.ones([2, 2], dtype=np.float32))
v = constant_op.constant(1.0)
@eager_function.defun
def fn():
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, x * math_ops.reduce_sum(a) * v),
[0, 1.0])[1]
return gradients_impl.gradients(r, [v])[0]
self.assertEqual(self.evaluate(fn()), 32.)
def testWhileGrad_ResourceVarInFunctionCall(self):
@def_function.function
def foo(x, var):
return x + math_ops.reduce_sum(var.sparse_read([1, 3]))
@def_function.function
def bar(var):
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, foo(x, var)),
[0, 0.0])[1]
return gradients_impl.gradients(r, var)[0]
var = resource_variable_ops.ResourceVariable([1., 2., 3., 4.])
self.evaluate(variables.global_variables_initializer())
grad = self.evaluate(bar(var))
self.assertAllEqual(gradient_checker_v2._to_numpy(grad), [0., 2., 0., 2.])
def testWhileGrad_ResourceVarInNestedFunctionCall(self):
@def_function.function
def foo(x, var):
return x + math_ops.reduce_sum(var.sparse_read([1, 3]))
@def_function.function
def foo2(x, var):
return foo(x, var)
@def_function.function
def bar(var):
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, foo2(x, var)),
[0, 0.0])[1]
return gradients_impl.gradients(r, var)[0]
var = resource_variable_ops.ResourceVariable([1., 1., 1., 1.])
self.evaluate(variables.global_variables_initializer())
grad = self.evaluate(bar(var))
self.assertAllEqual(gradient_checker_v2._to_numpy(grad), [0., 2., 0., 2.])
def testWhileGrad_ResourceVarInLoopInFunctionCall(self):
if test.is_gpu_available():
self.skipTest("b/128635252")
@def_function.function
def foo(x, var):
return control_flow_ops.while_loop(
lambda j, _: j < 3,
lambda j, y: (j + 1,
y + math_ops.reduce_sum(var.sparse_read([1, 2]))),
[0, x])[1]
@def_function.function
def bar(var):
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, foo(x, var)),
[0, 0.0])[1]
return gradients_impl.gradients(r, var)[0]
var = resource_variable_ops.ResourceVariable([1., 1., 1., 1.])
self.evaluate(variables.global_variables_initializer())
grad = self.evaluate(bar(var))
self.assertAllEqual(gradient_checker_v2._to_numpy(grad), [0., 6., 6., 0.])
def testWhileCondGrad_ResourceVarInFunctionCall(self):
@def_function.function
def foo(x, var):
return x + var.sparse_read([1])[0]
def body(i, x):
return (i + 1, control_flow_ops.cond(
math_ops.equal(i % 2, 0),
lambda: foo(x, var1),
lambda: foo(x, var2)))
@def_function.function
def bar(var1, var2):
r = control_flow_ops.while_loop(
lambda i, _: i < 4, body, [0, 0.0])
return gradients_impl.gradients(r, [var1, var2])
var1 = resource_variable_ops.ResourceVariable([1., 2., 3.])
var2 = resource_variable_ops.ResourceVariable([4., 5.])
self.evaluate(variables.global_variables_initializer())
grads = self.evaluate(bar(var1, var2))
self.assertAllEqual(gradient_checker_v2._to_numpy(grads[0]), [0., 2., 0.])
self.assertAllEqual(gradient_checker_v2._to_numpy(grads[1]), [0., 2.])
@test_util.run_deprecated_v1
def testWhileGrad_ResourceVarSparseRead(self):
# NOTE(skyewm): this test is interesting because the gradient is the
# aggregation result of IndexedSlices and Tensors.
var = resource_variable_ops.ResourceVariable(np.ones(5),
dtype=dtypes.float32)
r = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, x * math_ops.reduce_sum(var.sparse_read([1, 3]))),
[0, constant_op.constant(1.0)])[1]
grad = gradients_impl.gradients(r, var)[0]
self.evaluate(variables.global_variables_initializer())
grad_val = self.evaluate(grad)
arr = gradient_checker_v2._to_numpy(grad_val)
self.assertAllEqual(arr, [0., 12., 0., 12., 0.])
@test_util.run_deprecated_v1
def testWhileGrad_MultiResourceVarSparseRead(self):
# NOTE(skyewm): this test is interesting because the gradient is the
# aggregation result of IndexedSlices and Tensors.
var1 = resource_variable_ops.ResourceVariable(np.ones(5),
dtype=dtypes.float32)
var2 = resource_variable_ops.ResourceVariable(np.ones(3),
dtype=dtypes.float32)
x1_init = constant_op.constant([0., 0.])
x2_init = constant_op.constant(1.)
x3_init = constant_op.constant(1.)
def body(i, unused_x1, x2, x3):
y1 = var1.sparse_read([1, 3])
y2 = x2 * 2
y3 = x3 * math_ops.reduce_sum(var2.sparse_read([0]))
return i + 1, y1, y2, y3
r = control_flow_ops.while_loop(
lambda i, x1, x2, x3: i < 3, body,
[0, x1_init, x2_init, x3_init])[1:]
var1_grad, var2_grad = gradients_impl.gradients(r, [var1, var2])
self.evaluate(variables.global_variables_initializer())
var1_grad_val = self.evaluate(var1_grad)
var2_grad_val = self.evaluate(var2_grad)
self.assertAllEqual(gradient_checker_v2._to_numpy(var1_grad_val),
[0., 1., 0., 1., 0.])
self.assertAllEqual(gradient_checker_v2._to_numpy(var2_grad_val),
[3., 0., 0.])
def testWhileGrad_Gather(self):
# NOTE(skyewm): this test is interesting because the gather gradient
# function returns an IndexedSlices.
@tf_function_in_tf2
def fn():
x = constant_op.constant([1., 1., 1., 1., 1.])
y = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, x + array_ops.gather(x, [0])),
[0, x[:1]])[1]
z = y * 3.0
grad = gradients_impl.gradients(z, x)[0]
return y, grad
y, grad = fn()
self.assertEqual(self.evaluate(y), 8.)
self.assertAllEqual(self.evaluate(grad), [24., 0., 0., 0., 0.])
def testWhileGrad_GatherNoFanOut(self):
# NOTE(skyewm): this test is interesting because the gather gradient
# function returns an IndexedSlices.
@tf_function_in_tf2
def fn():
x = constant_op.constant([1., 1., 1., 1., 1.])
y = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, array_ops.gather(x, [0])),
[0, x[:1]])[1]
z = y * 3.0
grad = gradients_impl.gradients(z, x)[0]
return y, grad
y, grad = fn()
self.assertEqual(self.evaluate(y), 1.)
self.assertAllEqual(self.evaluate(grad), [3., 0., 0., 0., 0.])
@test_util.run_v1_only("b/120545219")
def testWhileGradInCond(self):
with self.cached_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def fn1():
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)[0]
r = control_flow_ops.cond(math_ops.less(1, 2), fn1, lambda: x)
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
@test_util.disable_control_flow_v2("b/116340060")
@test_util.run_v1_only("b/120545219")
def testGradInWhileWrtInitialLoopVal(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=(), name="x")
y = x + 1
def body(i, v):
z = v * 2
return i + 1, gradients_impl.gradients(z, x)[0]
with self.assertRaisesRegex(
ValueError,
"Cannot compute gradient inside while loop with respect to op 'x'. "
"We do not support taking the gradient wrt or through the initial "
"value of a loop variable. Gradients can be computed through "
"loop invariants or wrt the input parameters to the loop body."):
control_flow_ops.while_loop(lambda i, x: i < 3, body, [0, y])
@test_util.run_v1_only("b/120545219")
def testWhileGradInWhile(self):
with self.cached_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def b1(n):
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.while_loop(lambda n: n < 6.0, b1, [n],
[tensor_shape.unknown_shape()])
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
@test_util.run_v1_only("b/120545219")
def testCondGradInNestedWhiles(self):
def outer_body(i, x):
_, x = control_flow_ops.while_loop(
lambda j, x: j < 3, inner_body, [0, 0.0])
return i + 1, x
def inner_body(j, x):
y = control_flow_ops.cond(math_ops.less(x, 1), lambda: 2 * x, lambda: x)
return j + 1, gradients_impl.gradients(y, x)[0]
i, x = control_flow_ops.while_loop(lambda i, x: i < 3, outer_body, [0, 0.0])
with self.cached_session() as sess:
i_val, x_val = self.evaluate([i, x])
self.assertEqual(i_val, 3)
self.assertAllClose(x_val, 1.0)
@test_util.run_gpu_only
def testGpuResourceAccess(self):
with ops.device(test.gpu_device_name()):
var = resource_variable_ops.ResourceVariable(constant_op.constant(3.0))
@def_function.function
def foo():
return control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, control_flow_ops.cond(
constant_op.constant(True),
lambda: x + var,
lambda: x)),
[0, 0.0])[1]
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(foo()), 9.0)
def testNestedResourceAccess(self):
var = resource_variable_ops.ResourceVariable(constant_op.constant(3.0))
@eager_function.defun
def test_fn():
x = constant_op.constant(0.0)
r = control_flow_ops.while_loop(
# Outer loop condition
lambda i, y: i < 2,
# Outer loop body
lambda i, y: (i + 1, y + control_flow_ops.cond(
constant_op.constant(True),
# True branch
lambda: control_flow_ops.while_loop(
# Inner loop condition
lambda j, z: j < 3,
# Inner loop body
lambda j, z: (j + 1, z + math_ops.square(var)),
# Inner initial loop value
[0, y])[1],
# False branch
lambda: (0.0))),
# Outer initial loop value
[0, x])[1]
grad = gradients_impl.gradients(r, x)[0]
return r, grad
self.evaluate(variables.global_variables_initializer())
r, grad = self.evaluate(test_fn())
# 2 * 3 * 3^2
self.assertEqual(r, 81.0)
# v1 control flow gets the wrong answer!!!
# Gradient computation:
# f(x) = x + 3^2
# inner_loop(x) = f(f(f(x))) = x + 3*3^2 = x + 27
# g(x) = x + inner_loop(x) = 2x + 27
# outer_loop(x) = g(g(x)) = 4x + 81
# outer_loop'(x) = 4
# Note that v1 control flow gets 4.0 as well if the cond is removed.
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertEqual(grad, 4.0)
def testWhile_NestedInput(self):
with self.cached_session() as sess:
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, lv2):
lv0 = named(a=lv0.a + 1, b=lv0.b)
lv1 = (lv1[0] + 1, lv1[1])
lv2 += 2
return [lv0, lv1, lv2]
r = control_flow_ops.while_loop(c, b, loop_vars)
self.assertTrue(isinstance(r, list))
self.assertTrue(isinstance(r[0], named))
self.assertTrue(isinstance(r[1], tuple))
self.assertTrue(isinstance(r[2], ops.Tensor))
r_flattened = nest.flatten(r)
self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0],
self.evaluate(r_flattened))
@test_util.run_v1_only("b/120545219")
def testWhile_NestedBadArityFails(self):
with self.cached_session():
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, _):
return [lv0, lv1]
with self.assertRaisesRegex(ValueError, "the same number of elements"):
control_flow_ops.while_loop(c, b, loop_vars)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ys_xs(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.add(x, y)
x1 = math_ops.multiply(x, y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y], parallel_iterations=1)
r = gradients_impl.gradients([rx, ry], x)
self.assertAllClose(304.0, r[0])
r = gradients_impl.gradients([rx, ry], y)
self.assertAllClose(124.0, r[0])
r = gradients_impl.gradients([rx], x)
self.assertAllClose(295.0, r[0])
r = gradients_impl.gradients([rx], y)
self.assertAllClose(120.0, r[0])
@test_util.run_deprecated_v1
def testWhileGrad_Dependency(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 10)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
ri, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
r = gradients_impl.gradients([ri, rx], x)
self.assertAllClose(1024.0, r[0])
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0])
@test_util.run_v1_only("b/120545219")
def testWhileGrad_NoGradient(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], back_prop=False)
r = math_ops.add(r, v)
r = gradients_impl.gradients(r, v)
self.assertAllClose(1.0, r[0])
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_NoDependency(self):
with self.cached_session() as sess:
variable = variables.Variable(array_ops.ones([2, 3]))
duration = array_ops.zeros([], dtype=dtypes.int32)
def cond(duration, tensor, _):
del tensor
return duration < 10
def body(duration, tensor, _):
return (duration + 1, tensor, tensor)
loop_vars = [duration, variable, variable]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[2])
grad = gradients_impl.gradients(cost, [variable])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(np.ones([2, 3]), sess.run(grad[0]))
@test_util.run_deprecated_v1
def testWhileGrad_Const(self):
with self.cached_session() as sess:
c0 = constant_op.constant(0.0, name="c0")
c1 = constant_op.constant(1.0, name="c1")
duration = constant_op.constant(0, name="t")
def cond(duration, _):
return duration < 1
def body(duration, _):
return duration + 1, c1
loop_vars = [duration, c0]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[1])
grad = gradients_impl.gradients(cost, [c0])
self.assertAllClose(0.0, sess.run(grad[0]))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_SerialTwoLoops(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, rx = control_flow_ops.while_loop(c, b, [i, rx], parallel_iterations=1)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0])
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ParallelTwoLoops(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, r1 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, r2 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
rx = math_ops.add(r1, r2)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(64.0, r[0])
@test_util.run_v1_only("b/120545219")
def testWhileGrad_OneOutputWithControlDependencyOnSecond(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(1.0, name="y")
c = lambda i, *_: math_ops.less(i, 1, name="cond_less")
def b(i, xi, yi):
# return (i + 1, xi, xi + yi)
return (math_ops.add(i, 1, name="inc"), array_ops.identity(
xi, name="xi"), math_ops.add(xi, yi, name="xi_plus_yi"))
_, x_f, y_f = control_flow_ops.while_loop(c, b, [i, x, y])
with ops.control_dependencies([x_f]):
y_f_d = array_ops.identity(y_f, name="y_f_d")
self.assertAllClose(2.0, self.evaluate(y_f_d)) # y_f_d = 1.0 + 1.0
g = gradients_impl.gradients([y_f_d], [x])[0]
self.assertTrue(g is not None)
self.assertAllClose(1.0,
self.evaluate(g)) # y_f_d = x + 1.0, dy_f_d/dx = 1.0
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
c = lambda x: math_ops.less(x, 4.0)
b = lambda x: math_ops.multiply(x, 2.0)
return control_flow_ops.while_loop(c, b, [s])
c = lambda x: math_ops.less(x, 2.0)
b = lambda x: math_ops.multiply(inner_loop(x), 2.0)
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(8.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileGrad_Simple(self):
self._testNestedWhileGrad_Simple(use_gpu=False)
self._testNestedWhileGrad_Simple(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testNestedWhileGrad_SerialInner(self):
with self.cached_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: inner_loop2(inner_loop1(x)[1])[1]
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(256.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileGrad_ParallelInner(self):
with self.cached_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: math_ops.multiply(inner_loop1(x)[1], inner_loop2(x)[1])
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testNestedWhileGrad_ParallelIterations(self):
# Make sure the stack pushes and pops of an inner loop are executed in
# the sequential order of the iterations of its outer loop.
with self.cached_session() as sess:
def inner_loop(t):
fn = lambda n: n + math_ops.square(var)
return map_fn.map_fn(fn=fn, elems=t, parallel_iterations=10)
def outer_loop(inp):
return map_fn.map_fn(
fn=inner_loop, elems=inp, parallel_iterations=10)
var = variables.Variable(constant_op.constant(3.0))
inp = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
res = outer_loop(inp)
optimizer = adam.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(math_ops.reduce_mean(math_ops.square(res)))
self.evaluate(variables.global_variables_initializer())
self.evaluate(train_op)
self.assertAllClose(2.999, var.read_value())
def _testWhileCondGrad_Simple(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
v = ops.convert_to_tensor(2.0, name="v")
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testWhileCondGrad_Simple(self):
self._testWhileCondGrad_Simple(use_gpu=False)
self._testWhileCondGrad_Simple(use_gpu=True)
@test_util.run_deprecated_v1
def testWhileCondGrad_UnknownShape(self):
with self.cached_session() as sess:
v = array_ops.placeholder(dtypes.float32)
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
@test_util.run_deprecated_v1
def testWhileGrad_Concat(self):
with self.cached_session() as sess:
x = variable_scope.get_variable("x", initializer=[[1., 2.]])
i0 = constant_op.constant(0)
h0 = array_ops.zeros([0, 2])
def condition(i, _):
return i < 2
def body(i, h):
return i + 1, array_ops.concat([h, x], 0)
_, h = control_flow_ops.while_loop(
condition, body, [i0, h0],
[i0.get_shape(), tensor_shape.TensorShape([None, 2])])
s = math_ops.reduce_sum(h)
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
op = optimizer.minimize(s)
self.evaluate(variables.global_variables_initializer())
self.evaluate(op)
self.assertAllClose([[0.98000002, 1.98000002]], self.evaluate(x))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileWithRefsWithGradients_1(self):
with self.cached_session() as sess:
x = variables.VariableV1(0.)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 10)
self.assertEqual(x.dtype, dtypes.float32_ref)
def body(i, x):
self.assertEqual(x.dtype, dtypes.float32_ref)
return [i + 1, gen_array_ops.ref_identity(x)]
r = control_flow_ops.while_loop(c, body, [i, x], parallel_iterations=5)
grad_ys = [variables.VariableV1(73)._ref()] # pylint: disable=protected-access
grad = gradients_impl.gradients([r[1]], [x], grad_ys=grad_ys)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.float32_ref)
value_i, value_x, value_x_grad = sess.run(r + grad)
self.assertEqual(10, value_i)
self.assertEqual(0, value_x)
self.assertEqual(73, value_x_grad)
@test_util.deprecated_graph_mode_only
def testWhileGrad_IndexedSlices(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r))
@test_util.deprecated_graph_mode_only
def testWhileGrad_SparseTensor(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r))
@test_util.deprecated_graph_mode_only
def testCallGradInLoop(self):
with self.cached_session() as sess:
i0 = constant_op.constant(0)
params = constant_op.constant(5.0)
params_1 = math_ops.square(params)
def c(i, _):
return i < 10
def b(i, x):
data = constant_op.constant([1.0, 2.0, 3.0])
data = math_ops.multiply(data, params_1)
x1 = x + gradients_impl.gradients(data, params)[0]
return i + 1, x1
output_grad = control_flow_ops.while_loop(
c, b, [i0, constant_op.constant(0.0)])
self.assertAllClose(600.0, self.evaluate(output_grad)[1])
@test_util.run_deprecated_v1
def testWhileAndTensorArray(self):
with self.cached_session() as sess:
param = constant_op.constant(2.0)
n0 = constant_op.constant(0)
y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
def c(i, _):
return i < 10
def b(i, y):
return [
i + 1,
map_fn.map_fn(lambda x: math_ops.multiply(x, param), y)
]
r = control_flow_ops.while_loop(c, b, [n0, y0], parallel_iterations=1)
r = gradients_impl.gradients(r, param)[0]
self.assertAllClose(107520.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileAndTensorArray(self):
n = constant_op.constant(3.0)
def Body(row, ta):
def InnerBody(row, col, ta):
# Note: row and col are 1-based.
ta = ta.write(
math_ops.cast(n * (row - 1.) + col - 1., dtypes.int32), row * col)
return row, col + 1., ta
ta = control_flow_ops.while_loop(
lambda _, col, _1: col <= n,
InnerBody, [row, constant_op.constant(1.), ta],
return_same_structure=False)[2]
return row + 1., ta
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=9)
ta = control_flow_ops.while_loop(
lambda row, _: row <= n,
Body, [constant_op.constant(1.), ta],
return_same_structure=False)[1]
output = array_ops.reshape(ta.stack(), [3, 3])
self.assertAllEqual(
self.evaluate(output), [[1., 2., 3.], [2., 4., 6.], [3., 6., 9.]])
# TODO(b/117675481): This does not work with current TA. Enable with new TA.
# grad = gradients_impl.gradients(output, [n])
# self.assertEqual(self.evaluate(grad), 3.5)
@test_util.run_deprecated_v1
def testWhileGrad_StopGrad(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.square(y)
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertEqual(136.0, self.evaluate(r))
r = gradients_impl.gradients(ry, y)[0]
self.assertEqual(32.0, self.evaluate(r))
r = gradients_impl.gradients(array_ops.stop_gradient(rx), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(array_ops.stop_gradient(ry), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.square(rx)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), x)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(math_ops.add(rx, ry), y)[0]
self.assertEqual(168.0, self.evaluate(r))
r = gradients_impl.gradients(
math_ops.add(rx, array_ops.stop_gradient(ry)), y)[0]
self.assertEqual(136.0, self.evaluate(r))
r = gradients_impl.gradients(
math_ops.add(array_ops.stop_gradient(rx), ry), y)[0]
self.assertEqual(32.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testWhileGrad_StopGradInside(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertAllClose(0.0, self.evaluate(r))
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose(156.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testWhileGrad_StopGradInsideNoShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = lambda x, y: math_ops.less(math_ops.reduce_sum(x), 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y, name="stopped"))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
grad_y = gradients_impl.gradients(rx, y)[0]
grad_x = gradients_impl.gradients(rx, x)[0]
feed_dict = {x: [3.0, 4.0], y: [2.0, 3.0]}
self.assertAllClose([0.0, 0.0], sess.run(grad_y, feed_dict=feed_dict))
self.assertAllClose([156.0, 400.0], sess.run(grad_x, feed_dict=feed_dict))
name = "gradients/while/stopped_grad"
all_ops = x.graph.get_operations()
self.assertFalse(any(name in op.name for op in all_ops))
@test_util.run_deprecated_v1
def testWhileGradGradFail(self):
theta = variables.Variable(initial_value=1.)
def fn(prev, x):
return prev + x * theta
result = functional_ops.scan(fn, np.array([1., 2., 3.], dtype=np.float32))
grad_theta = gradients_impl.gradients(result, theta)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
with self.assertRaisesRegex(TypeError, "Second-order gradient"):
gradients_impl.gradients(grad_theta, theta)
grad_theta_stopped = array_ops.stop_gradient(grad_theta)
gradients_impl.gradients(grad_theta_stopped, theta)
@test_util.run_deprecated_v1
def testStopGradOnWhileGrad(self):
with self.cached_session():
x = constant_op.constant(2.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x: math_ops.less(x, 100.0)
b = lambda x: math_ops.multiply(x, y)
rx = control_flow_ops.while_loop(c, b, [x])
rg = gradients_impl.gradients(rx, y)[0]
rg = array_ops.stop_gradient(rg)
r = math_ops.add(math_ops.square(y), rx)
r = math_ops.add(r, rg)
r = gradients_impl.gradients(r, y)[0]
self.assertEqual(388.0, self.evaluate(r))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_deprecated_v1
def testWhileGradientWithNontrainablePath1(self):
q = variables.Variable([7., 8.])
def cond(_, y):
del y
return False
def body(x, _):
return x, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.cached_session() as sess:
self.evaluate(q.initializer)
self.assertAllClose([0., 0.], self.evaluate(dy_dq))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileGradientWithNontrainablePath2(self):
q = variables.Variable([7., 8.])
def cond(_, y):
return math_ops.equal(y, 0.)
def body(x, _):
zero = constant_op.constant(0, dtype=dtypes.int64)
return zero, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.cached_session() as sess:
self.evaluate(q.initializer)
self.assertAllClose([1., 1.], self.evaluate(dy_dq))
@test_util.run_v1_only("b/120545219")
def testIssue16504(self):
c = constant_op.constant(np.arange(100), dtype=dtypes.float32)
w = variables.Variable(
initial_value=np.ones(100), dtype=dtypes.float32) / 100
k = variables.Variable(0, dtype=dtypes.int32)
chg_w = constant_op.constant(np.inf, dtype=dtypes.float32)
def cond(k, _, chg_w):
return math_ops.logical_and(k < 10, chg_w > 1e-3)
def body(k, w, chg_w):
grad, = gradients_impl.gradients(-math_ops.reduce_sum(w * c), w)
w_n = w * math_ops.exp(-0.1 * grad)
w_n /= math_ops.reduce_sum(w_n)
chg_w = (
math_ops.reduce_sum(math_ops.abs(w_n - w)) / math_ops.reduce_sum(
math_ops.abs(w)))
return k + 1, w_n, chg_w
_, w, _ = control_flow_ops.while_loop(cond, body, [k, w, chg_w])
grad, = gradients_impl.gradients(w, c)
self.assertIsNotNone(grad)
@test_util.run_v1_only("b/120545219")
def testStopGradMultiFlows(self):
with self.cached_session():
def body(i, y, r):
x = variable_scope.get_variable(
"x",
shape=(),
dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
y *= x
return [i + 1, y, r + math_ops.reduce_sum(y)]
i0 = constant_op.constant(0)
y0 = array_ops.ones(5)
r0 = constant_op.constant(0.0)
cond = lambda i, y, r: i < 1
_, _, r = control_flow_ops.while_loop(
cond, body, [i0, y0, r0], back_prop=True)
vars_ = variables.global_variables()
grads = linalg_ops.norm(gradients_impl.gradients(r, vars_)[0])
z = math_ops.add(r, array_ops.stop_gradient(math_ops.reduce_sum(grads)))
result = gradients_impl.gradients(z, vars_)[0]
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(result))
@test_util.run_v1_only("b/120545219")
def testOneValueCond(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
one = ops.convert_to_tensor(1, name="one")
two = ops.convert_to_tensor(2, name="two")
p = math_ops.greater_equal(c, 1)
i = control_flow_ops.cond(p, lambda: one, lambda: two)
self.assertTrue(isinstance(i, ops.Tensor))
# True case: c = 2 is >= 1
self.assertEqual([1], i.eval(feed_dict={c: 2}))
# False case: c = 0 is not >= 1
self.assertEqual([2], i.eval(feed_dict={c: 0}))
@test_util.run_deprecated_v1
def testExampleCond(self):
with self.cached_session():
x = ops.convert_to_tensor([-2.0, 2.0], name="x")
d = array_ops.placeholder(dtypes.int32, shape=[])
def l2():
return math_ops.sqrt(math_ops.reduce_sum(math_ops.square(x)))
def l1():
return math_ops.reduce_sum(math_ops.abs(x))
i = control_flow_ops.cond(math_ops.equal(d, 2), l2, l1)
self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
@test_util.run_v1_only("b/120545219")
def testCase(self):
with self.cached_session():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
{
x < y: f1,
x > z: f2
}, default=f3, exclusive=True)
self.assertAllEqual(r1, 17)
r2 = control_flow_ops.case([(y > z, f1), (y > x, f2)], default=f3)
self.assertAllEqual(r2, 23)
# Duplicate events can happen, first one is selected
r3 = control_flow_ops.case([(x < y, f1), (x < y, f2)], default=f3)
self.assertAllEqual(r3, 17)
# Duplicate events cause an error if exclusive = True
r4 = control_flow_ops.case(
[(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError("Input error:"):
self.evaluate(r4)
# Check that the default is called if none of the others are
r5 = control_flow_ops.case({x > y: f1}, default=f3)
self.assertAllEqual(r5, -1)
ran_once = [False, False, False]
def break_run_twice(ix):
def _break():
ran_once[ix] = True
return constant_op.constant(ix)
return _break
# Should not fail - each conditional gets called exactly once
# except default. Default gets called twice: once to create an
# empty output and once for the actual cond switch.
r6 = control_flow_ops.case(
[(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
default=lambda: constant_op.constant(2))
self.assertAllEqual(r6, 0)
@test_util.run_v1_only("b/120545219")
def testCaseSideEffects(self):
with self.cached_session() as sess:
v0 = variables.Variable(-1)
v1 = variables.Variable(-1)
v2 = variables.Variable(-1)
a = lambda: control_flow_ops.with_dependencies([state_ops.assign(v0, 0)], 0)
b = lambda: control_flow_ops.with_dependencies([state_ops.assign(v1, 1)], 1)
c = lambda: control_flow_ops.with_dependencies([state_ops.assign(v2, 2)], 2)
x = constant_op.constant(1)
y = constant_op.constant(2)
r0 = control_flow_ops.case(
((x < y, a), (x > y, b)), default=c, exclusive=True)
r1 = control_flow_ops.case(
((x > y, a), (x < y, b)), default=c, exclusive=True)
r2 = control_flow_ops.case(
((x > y, a), (x > y, b)), default=c, exclusive=True)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3)
self.assertEqual(2, self.evaluate(r2))
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1, -1, 2])
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3)
self.assertEqual(1, self.evaluate(r1))
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1, 1, -1])
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3)
self.assertEqual(0, self.evaluate(r0))
self.assertAllEqual(self.evaluate([v0, v1, v2]), [0, -1, -1])
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testOneOpCond(self):
with self.cached_session():
v = variables.Variable(0)
c = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
two = ops.convert_to_tensor(2)
p = math_ops.greater_equal(c, 1)
def a():
return state_ops.assign(v, one)
def b():
return state_ops.assign(v, two)
i = control_flow_ops.cond(p, a, b)
self.assertTrue(isinstance(i, ops.Tensor))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(v))
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, self.evaluate(v))
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, self.evaluate(v))
@test_util.run_v1_only("b/120545219")
def testWithOpsDependencies(self):
with self.cached_session() as sess:
v = variables.VariableV1(0.0)
c = constant_op.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate([c, v])
# Use a control dependency to ensure init_variable is run
# while asking for c
real_v = control_flow_ops.with_dependencies(
name="real_tensor",
output_tensor=v._ref(), # pylint: disable=protected-access
dependencies=[v.initializer])
c_val, real_v_val = self.evaluate([c, real_v])
# Ensure the result of 'real_c' is the same as 'c'
self.assertAllEqual(10, c_val)
# Ensure that 'v' is initialized
self.assertAllClose(0.0, real_v_val)
@test_util.run_v1_only("b/120545219")
def testWithTensorDependencies(self):
with self.cached_session():
v = variables.VariableV1(0.0)
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v", output_tensor=c1, dependencies=[v.initializer])
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
output_tensor=c2,
dependencies=[c1_with_init_v])
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v)
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, self.evaluate(c2_with_c1_dep))
# Ensure that 'v' is initialized
self.assertAllClose(0.0, self.evaluate(v))
@test_util.run_v1_only("b/120545219")
def testWithIndexedSlicesDependencies(self):
with self.cached_session():
v = variables.VariableV1(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v_at_1 = ops.IndexedSlices(v, constant_op.constant([1]))
gather_v_at_1 = array_ops.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
gather_v_at_1_after_init = array_ops.gather(v_at_1_after_init.values,
v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(gather_v_at_1)
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]],
self.evaluate(gather_v_at_1_after_init))
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
self.evaluate(v))
def testDependenciesDevice(self):
with ops.Graph().as_default():
# device set on tensor => same device on dep.
with ops.device("/job:ps"):
vd = variables.VariableV1([0.0])
with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
self.assertTrue("/job:ps" in with_vd_dep.device)
# No device set on tensor => no device on dep.
vnod = variables.VariableV1([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertDeviceEqual(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
vdef = variables.VariableV1([0.0], name="vdef")
with ops.device("/job:worker/device:GPU:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
# The device is empty, but the colocation constraint is set.
self.assertDeviceEqual("", with_vdef_dep.device)
self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testGroup(self):
with self.cached_session() as sess:
v1 = variables.VariableV1([0.0])
v2 = variables.VariableV1([1.0])
# Group init1 and init2 and run.
init = control_flow_ops.group(v1.initializer, v2.initializer)
# Fetching v1 directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v1)
# Runs "init" before fetching v1 and v2.
init.run()
v1_val, v2_val = self.evaluate([v1, v2])
# Ensure that v1 and v2 are initialized
self.assertAllClose([0.0], v1_val)
self.assertAllClose([1.0], v2_val)
@test_util.run_v1_only("b/120545219")
def testGroupEmpty(self):
op = control_flow_ops.group()
self.assertEqual(op.type, "NoOp")
self.assertEqual(op.control_inputs, [])
@test_util.run_deprecated_v1
def testMergeShapes(self):
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
m, index = control_flow_ops.merge([p1, p2, p3])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with different ranks.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2, 3])
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with some dimensions different.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 1])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
# All inputs known with same dimensions.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, None])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
@test_util.run_v1_only("b/120545219")
def testRefSelect(self):
index = array_ops.placeholder(dtypes.int32)
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
v1 = variables.VariableV1(p1, validate_shape=False)
v2 = variables.VariableV1(p2, validate_shape=False)
v3 = variables.VariableV1(p3, validate_shape=False)
self.assertIs(None, v1.get_shape().ndims)
s = control_flow_ops.ref_select(index, [v1, v2, v3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
v1 = variables.VariableV1([[1, 2]])
v2 = variables.VariableV1([[2], [1]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known and same.
v1 = variables.VariableV1([[1, 2]])
v2 = variables.VariableV1([[1, 2]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
v1 = variables.VariableV1([[1., 2.]])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
v2 = variables.VariableV1(p2, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual(None, s.get_shape())
@test_util.run_deprecated_v1
def testRunLoopTensor(self):
with self.cached_session() as sess:
tensor_list = []
def condition(t):
return t < constant_op.constant(5)
def body(_):
tensor_list.append(constant_op.constant(5))
return constant_op.constant(10)
result = control_flow_ops.while_loop(condition, body,
[constant_op.constant(4)])
self.assertEqual(10, self.evaluate(result))
# Ensure that we cannot run a tensor that escapes the loop body
# accidentally.
with self.assertRaises(ValueError):
sess.run(tensor_list[0])
@test_util.run_v1_only("b/120545219")
def testWhilePyFuncBasic(self):
def func(x):
return np.square(x)
with self.cached_session():
r = control_flow_ops.while_loop(
lambda i, v: i < 4,
lambda i, v: [i + 1, script_ops.py_func(func, [v], [dtypes.float32])[0]],
[constant_op.constant(0), constant_op.constant(2.0, dtypes.float32)],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(self.evaluate(r[1]), 65536.0)
@test_util.run_v1_only("b/120545219")
def testWhileFuncBasic(self):
@function.Defun(dtypes.float32)
def func(x):
return math_ops.square(math_ops.square(x))
with self.cached_session():
x = constant_op.constant(2.0, dtypes.float32)
r = control_flow_ops.while_loop(
lambda i, v: i < 2, lambda i, v: [i + 1, func(v)],
[constant_op.constant(0), x],
[tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()])
grad = gradients_impl.gradients(r, x)[0]
self.assertEqual(self.evaluate(r[1]), 65536.0)
self.assertEqual(self.evaluate(grad), 524288.0)
# while_v2 does not have stacks.
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertEqual(
len([op for op in x.graph.get_operations() if op.type == "StackV2"
]), 1)
@test_util.run_v1_only("b/120545219")
def testQIntSwitchMerge(self):
with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
constant_qint = constant_op.constant(np.array([42]), dtypes.qint8)
cond = constant_op.constant(True, dtypes.bool)
v_f, v_t = control_flow_ops.switch(constant_qint, cond)
result = control_flow_ops.merge([v_f, v_t])
self.evaluate(result)
@test_util.run_v1_only("b/120545219")
def testQIntRefSwitchMerge(self):
with self.cached_session(use_gpu=test.is_gpu_available()) as sess:
var_qint = gen_state_ops.variable(
shape=[1], dtype=dtypes.qint8, name="v", container="", shared_name="")
assign_op = state_ops.assign(
var_qint, constant_op.constant(np.array([42]), dtypes.qint8))
self.evaluate(assign_op)
cond = constant_op.constant(True, dtypes.bool)
v_f, v_t = control_flow_ops.ref_switch(var_qint, cond)
result = control_flow_ops.ref_merge([v_f, v_t])
self.evaluate(result)
@test_util.run_v1_only("b/120545219")
def testUInt64SwitchMerge(self):
with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
constant_uint64 = constant_op.constant(np.array([42]), dtypes.uint64)
cond = constant_op.constant(True, dtypes.bool)
v_f, v_t = control_flow_ops.switch(constant_uint64, cond)
result = control_flow_ops.merge([v_f, v_t])
self.evaluate(result)
def testSwitchEagerMode(self):
if not context.executing_eagerly():
return
input_data = [1, 2, 3, 4]
vf, vt = control_flow_ops.switch(input_data, False)
self.assertAllEqual(vf, input_data)
self.assertAllEqual(vt, [])
@test_util.run_deprecated_v1
def testQIntArgAndRet(self):
@function.Defun(dtypes.qint8)
def func(x):
return x
with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
qint = constant_op.constant(np.array([42]), dtypes.qint8)
result = func(qint)
self.evaluate(result)
def testSparseIdentity(self):
st1 = sparse_tensor.SparseTensor([[0, 5]], ['x'], [10, 10])
st2 = control_flow_ops._Identity(st1)
self.assertAllEqual(st1.indices, st2.indices)
self.assertAllEqual(st1.values, st2.values)
self.assertAllEqual(st1.dense_shape, st2.dense_shape)
def testSparseEnterExit(self):
st1 = sparse_tensor.SparseTensor([[0, 5]], ['x'], [10, 10])
st2 = control_flow_ops._Enter(st1, "foo_1")
st3 = control_flow_ops.exit(st2)
self.assertAllEqual(st1.indices, st3.indices)
self.assertAllEqual(st1.values, st3.values)
self.assertAllEqual(st1.dense_shape, st3.dense_shape)
def _buildWhileWithShapeInvariants(self, shape_invariants):
r = constant_op.constant([1, 2])
def cond(_):
return False
def body(_):
return constant_op.constant([1])
return control_flow_ops.while_loop(
cond, body, [r], shape_invariants=shape_invariants)
def testWhileOutputShapeWithShapeInvariantsUnknownRank(self):
@def_function.function
def runTest():
while_output = self._buildWhileWithShapeInvariants(
[tensor_shape.TensorShape(None)])
self.assertIsNone(while_output.shape.rank)
runTest()
def testWhileOutputShapeWithShapeInvariantsPartialShape(self):
@def_function.function
def runTest():
while_output = self._buildWhileWithShapeInvariants(
[tensor_shape.TensorShape([None])])
self.assertAllEqual(while_output.shape.as_list(), [None])
runTest()
def testFunctionInWhile(self):
@def_function.function
def body(x):
return x + 1
r = control_flow_ops.while_loop(lambda x: x < 5, body, [0])
self.assertAllEqual(r, 5.)
class ControlFlowContextCheckTest(test.TestCase):
def _getWhileTensor(self):
"""Creates and returns a tensor from a while context."""
tensor = []
def body(i):
if not tensor:
tensor.append(constant_op.constant(1))
return i + tensor[0]
control_flow_ops.while_loop(lambda i: i < 10, body, [0])
return tensor[0]
def _getCondTensor(self):
cond_tensor = []
def true_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
return cond_tensor[0]
@test_util.run_v1_only("b/120545219")
def testInvalidContext(self):
# Accessing a while loop tensor outside of control flow is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegex(
ValueError,
"Cannot use 'while/Const_1' as input to 'Add' because 'while/Const_1' "
"is in a while loop. See info log for more details."):
math_ops.add(1, while_tensor)
@test_util.run_v1_only("b/120545219")
def testInvalidContextInCond(self):
# Accessing a while loop tensor in cond is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegex(
ValueError, "Cannot use 'while/Const_1' as input to 'cond/Add' because "
"'while/Const_1' is in a while loop. See info log for more details."):
# TODO(skyewm): this passes if we return while_tensor directly instead
# of using it as input to another op.
control_flow_ops.cond(
math_ops.less(1, 2), lambda: math_ops.add(1, while_tensor),
lambda: constant_op.constant(0))
@test_util.run_v1_only("b/120545219")
def testInvalidContextInWhile(self):
# Accessing a while loop tensor in a different while loop is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegex(
ValueError,
"Cannot use 'while/Const_1' as input to 'while_1/Add' because they are "
"in different while loops. See info log for more details."):
control_flow_ops.while_loop(lambda i: i < 10,
lambda x: math_ops.add(1, while_tensor), [0])
with self.assertRaisesRegex(
ValueError,
"Cannot use 'while/Const_1' as input to 'while_2/NextIteration' "
"because they are in different while loops. See info log for more "
"details."):
control_flow_ops.while_loop(lambda i: i < 10, lambda i: while_tensor, [0])
def testValidCondContext(self):
# Accessing a tensor from a cond context is OK (although dangerous).
cond_tensor = self._getCondTensor()
math_ops.add(1, cond_tensor)
def testValidCondContextBranches(self):
# Accessing a tensor from a cond context from the other branch's cond
# context is OK (although dangerous).
cond_tensor = []
def branch_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(math_ops.less(1, 2), branch_fn, branch_fn)
@test_util.run_v1_only("b/120545219")
def testValidWhileContext(self):
# Accessing a tensor in a nested while is OK.
def body(_):
c = constant_op.constant(1)
return control_flow_ops.while_loop(lambda i: i < 3, lambda i: i + c, [0])
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
@test_util.run_v1_only("b/120545219")
def testValidNestedContexts(self):
# Accessing a tensor from a cond context in a while context, all inside an
# outer while context, is OK.
def body(_):
cond_tensor = self._getCondTensor()
# Create another cond containing the while loop for good measure
return control_flow_ops.cond(
math_ops.less(1, 2),
lambda: control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + cond_tensor, [0]),
lambda: constant_op.constant(0))
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
@test_util.run_v1_only("b/120545219")
def testInvalidNestedContexts(self):
# Accessing a tensor from a while context in a different while context, all
# inside a cond context, is illegal.
def true_fn():
while_tensor = self._getWhileTensor()
return control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + while_tensor, [0])
with self.assertRaisesRegex(
ValueError,
"Cannot use 'cond/while/Const_1' as input to 'cond/while_1/add' because"
" they are in different while loops. See info log for more details."):
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
class TupleTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testTensors(self):
for v1_first in [True, False]:
with self.cached_session():
v1 = variables.VariableV1([1.0])
add1 = math_ops.add(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
2.0)
v2 = variables.VariableV1([10.0])
add2 = math_ops.add(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
20.0)
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v1)
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v2)
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], self.evaluate(t1))
self.assertAllClose([10.0], self.evaluate(v2))
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], self.evaluate(t2))
self.assertAllClose([1.0], self.evaluate(v1))
@test_util.run_v1_only("b/120545219")
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.cached_session():
v1 = variables.VariableV1(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
v1_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
v2 = variables.VariableV1(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
v2_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = array_ops.gather(st1.values, st1.indices)
g2 = array_ops.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v1)
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v2)
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], self.evaluate(g1))
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
self.evaluate(v2))
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], self.evaluate(g2))
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
self.evaluate(v1))
def testAcceptTensorsAsControlInputs(self):
with self.cached_session():
var = variables.VariableV1(0)
assign = state_ops.assign(var, 1)
t, = control_flow_ops.tuple(
[constant_op.constant(0)], control_inputs=[assign])
# Should trigger the assign.
self.evaluate(t)
self.assertEqual(1, self.evaluate(var))
class AssertTest(test.TestCase):
@test_util.run_deprecated_v1
def testGuardedAssertDoesNotCopyWhenTrue(self):
if test_util.is_gpu_available():
self.skipTest("b/128646478 fails in opensource")
with self.session(use_gpu=True) as sess:
with ops.device(test.gpu_device_name()):
value = constant_op.constant(1.0)
with ops.device("/cpu:0"):
true = constant_op.constant(True)
guarded_assert = control_flow_ops.Assert(true, [value], name="guarded")
unguarded_assert = gen_logging_ops._assert(
true, [value], name="unguarded")
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
guarded_metadata = config_pb2.RunMetadata()
sess.run(guarded_assert, options=opts, run_metadata=guarded_metadata)
unguarded_metadata = config_pb2.RunMetadata()
sess.run(unguarded_assert, options=opts, run_metadata=unguarded_metadata)
guarded_nodestat_names = [
n.node_name
for d in guarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
unguarded_nodestat_names = [
n.node_name
for d in unguarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
guarded_memcpy_nodestat_names = [
n for n in guarded_nodestat_names if "MEMCPYDtoH" in n
]
unguarded_memcpy_nodestat_names = [
n for n in unguarded_nodestat_names if "MEMCPYDtoH" in n
]
if "GPU" in [d.device_type for d in device_lib.list_local_devices()]:
# A copy was performed for the unguarded assert
self.assertLess(0, len(unguarded_memcpy_nodestat_names),
str(unguarded_nodestat_names))
# No copy was performed for the guarded assert
self.assertEqual([], guarded_memcpy_nodestat_names)
class WhileOpBenchmark(test.Benchmark):
"""Evaluate the performance of while_loop op."""
def _getInitVariables(self):
batch_size = 10
image_size = 256
kernel_size = 3
depth = 16
init_step = constant_op.constant(-1)
image = variable_scope.get_variable(
"image",
initializer=random_ops.random_normal(
[batch_size, image_size, image_size, depth],
dtype=dtypes.float32,
stddev=1e-1))
kernel = variable_scope.get_variable(
"weights",
initializer=random_ops.truncated_normal(
[kernel_size, kernel_size, depth, depth],
dtype=dtypes.float32,
stddev=1e-1))
return init_step, image, kernel
def _runOneBenchmark(self,
default_device,
num_iters=10,
static_unroll=False,
steps=10):
"""Evaluate the while loop performance.
Args:
default_device: The default device to run all ops except the loop_body.
loop_body is always run on GPU.
num_iters: Number of iterations to run.
static_unroll: If true, run unrolled version; otherwise, run while_loop.
steps: Total number of repeated steps to run the loop.
Returns:
The duration of the run in seconds.
"""
def loop_body(i, x):
with ops.device("/gpu:0"):
# Always put loop body on GPU.
nx = nn_ops.conv2d(
input=x,
filter=kernel,
strides=[1, 1, 1, 1],
padding="SAME",
data_format="NHWC",
name="conv2d")
ni = math_ops.add(i, 1)
return ni, nx
ops.reset_default_graph()
with session.Session() as sess, ops.device(default_device):
# Get the initial id i, input x, and kernel.
i, x, kernel = self._getInitVariables()
self.evaluate(variables.global_variables_initializer())
if static_unroll:
for _ in xrange(steps):
i, x = loop_body(i, x)
else:
i, x = control_flow_ops.while_loop(
lambda i, _: i < steps,
loop_body, [i, x],
parallel_iterations=steps,
swap_memory=True)
r = math_ops.reduce_sum(x)
dx, dk = gradients_impl.gradients(r, [x, kernel])
# Use group to avoid fetching back results.
r = control_flow_ops.group(dx, dk)
for _ in xrange(3):
# exclude warm up time
self.evaluate(r)
start_time = time.time()
for _ in xrange(num_iters):
self.evaluate(r)
return (time.time() - start_time) / num_iters
def benchmarkWhileOpCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_cross_device", iters=iters, wall_time=duration)
def benchmarkWhileOpSameDevicePlacement(self):
iters = 10
# Run all ops on the same GPU device.
duration = self._runOneBenchmark("gpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_same_device", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_cross_device_cpu", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollSameDevicePlacement(self):
iters = 10
# Run all ops on GPU.
duration = self._runOneBenchmark("gpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_same_device", iters=iters, wall_time=duration)
@test_util.with_control_flow_v2
class EagerTest(test.TestCase):
def testCond(self):
with context.eager_mode():
pred = math_ops.less(1, 2)
fn1 = lambda: [constant_op.constant(10)]
fn2 = lambda: [constant_op.constant(20)]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual(r.numpy(), 10)
self.assertFalse(isinstance(r, list))
# TODO(b/117279927): Re-enable once msan failure is fixed.
def DISABLED_testCondInDefun(self):
with context.eager_mode():
@eager_function.defun
def foo(pred):
# TODO(b/111124878): this only needs to output one element.
fn1 = lambda: (constant_op.constant(10), constant_op.constant(100))
fn2 = lambda: (constant_op.constant(20), constant_op.constant(200))
return control_flow_ops.cond(constant_op.constant(pred), fn1, fn2)
r = foo(True)
self.assertAllEqual(r[0].numpy(), 10)
self.assertNotIsInstance(r, list)
r = foo(False)
self.assertAllEqual(r[0].numpy(), 20)
self.assertFalse(isinstance(r, list))
def testWhileLoop(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(isum(tensor).numpy(), [46, 47, 48, 49, 50])
def testWhileLoopWithMaxIterations(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(
isum(tensor, maximum_iterations=3).numpy(),
[1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3])
@test_util.run_v1_only("b/120545219")
def testWhileWithMaximumIterationsAndSingleArgument(self):
with context.eager_mode():
tensor = constant_op.constant(0)
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [tensor], maximum_iterations=1)
self.assertEqual(1, r.numpy())
def testWithDependencies(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
t3 = control_flow_ops.with_dependencies(t1, t2)
self.assertAllEqual(t2.numpy(), t3.numpy())
def testTuple(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
tup1, tup2 = control_flow_ops.tuple([t1, t2])
self.assertAllEqual(t1.numpy(), tup1.numpy())
self.assertAllEqual(t2.numpy(), tup2.numpy())
@test_util.run_v1_only("b/120545219")
def testCase(self):
with context.eager_mode():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
[(x < y, f1), (x > z, f2)], default=f3, exclusive=True)
self.assertAllEqual(r1.numpy(), 17)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mausvt/seagate_central_cns3420_2-6-35 | scripts/proc_static_files_table.py | 2 | 2411 | #!/usr/bin/env python
# Usage: progname <directory>
#
# This program scans <directory> and generates C code on its stdout
# that contains an array of psf_entry structures, one for each file
# in <directory>. This code may be used to instantiate a file in procfs
# whose contents is identical to each file in <directory>
#
# Author: Dale Farnsworth <[email protected]>
#
# Copyright 2009 (c) MontaVista Software, Inc. This file is licensed
# under the terms of the GNU General Public License version 2.
# This program is licensed "as is" without any warranty of any kind,
# whether express or implied.
import sys
import os
import glob
import subprocess
class Entry:
def __init__(self, path, parent):
self.path = path
self.parent = parent
self.isdir = os.path.isdir(path)
@classmethod
def all_entries(self, path=""):
def recurse(parent_path, parent):
for path in glob.glob(os.path.join(parent_path, "*")):
entry = Entry(path, parent)
entry.index = len(entries)
entries.append(entry)
if entry.isdir:
recurse(path, entry)
entries = []
recurse(path, None)
return entries
def output_file_data(entries):
for entry in entries:
if entry.isdir:
continue
sys.stdout.write("/* %s */\n" % entry.path)
sys.stdout.write("static char data_%s[] = \n" % entry.index)
sys.stdout.flush()
bin2c = os.path.join(os.environ["objtree"], "scripts/bin2c")
f = open(entry.path)
subprocess.call([bin2c], stdin=f)
f.close()
sys.stdout.write("\t;\n\n")
def output_psf_entries(entries):
sys.stdout.write("static struct psf_entry psf_table[] = {\n")
for entry in entries:
if entry.parent:
parent_addr = "&psf_table[%d]" % entry.parent.index
else:
parent_addr = "NULL"
if entry.isdir:
data = "NULL"
size = "-1"
else:
data = "data_%d" % entry.index
size = "sizeof(%s) - 1" % data
sys.stdout.write(' /*%d*/\t{ "%s", %s, %s, %s },\n' %
(entry.index,
os.path.basename(entry.path),
parent_addr, data, size))
sys.stdout.write("};\n")
def main():
progname = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s <directory>\n" % progname)
sys.exit(1)
dir = sys.argv[1]
if not os.path.isdir(dir):
sys.stderr.write("%s: %s: not a directory\n" % (progname, dir))
sys.exit(1)
os.chdir(dir)
entries = Entry.all_entries()
output_file_data(entries)
output_psf_entries(entries)
main()
| gpl-2.0 |
iamgreaser/pysnip | feature_server/console.py | 8 | 2950 | # Copyright (c) Mathias Kaerlev 2011-2012.
# This file is part of pyspades.
# pyspades is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pyspades is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pyspades. If not, see <http://www.gnu.org/licenses/>.
import sys
import commands
from twisted.internet import reactor
from twisted.protocols.basic import LineReceiver
from pyspades.types import AttributeSet
stdout = sys.__stdout__
if sys.platform == 'win32':
# StandardIO on Windows does not work, so we create a silly replacement
import msvcrt
class StandardIO(object):
disconnecting = False
interval = 0.01
input = u''
def __init__(self, protocol):
self.protocol = protocol
protocol.makeConnection(self)
self.get_input()
def get_input(self):
while msvcrt.kbhit():
c = msvcrt.getwch()
if c == u'\r': # new line
c = u'\n'
stdout.write(c)
self.input += c
self.protocol.dataReceived(self.input)
self.input = ''
elif c in (u'\xE0', u'\x00'):
# ignore special characters
msvcrt.getwch()
elif c == u'\x08': # delete
self.input = self.input[:-1]
stdout.write('\x08 \x08')
else:
self.input += c
stdout.write(c)
reactor.callLater(self.interval, self.get_input)
def write(self, data):
stdout.write(data)
def writeSequence(self, seq):
stdout.write(''.join(seq))
else:
from twisted.internet.stdio import StandardIO
class ConsoleInput(LineReceiver):
name = 'Console'
admin = True
delimiter = '\n'
def __init__(self, protocol):
self.protocol = protocol
self.user_types = AttributeSet(['admin', 'console'])
self.rights = AttributeSet()
for user_type in self.user_types:
self.rights.update(commands.rights.get(user_type, ()))
def lineReceived(self, line):
if line.startswith('/'):
line = line[1:]
result = commands.handle_input(self, line)
if result is not None:
print result
else:
self.protocol.send_chat(line)
def create_console(protocol):
console = ConsoleInput(protocol)
StandardIO(console) | gpl-3.0 |
architecture-building-systems/CityEnergyAnalyst | cea/technologies/constants.py | 2 | 4135 | """
Constants used throughout the cea.technologies package.
History lesson: This is a first step at removing the `cea.globalvars.GlobalVariables` object.
"""
# Heat Exchangers
U_COOL = 2500.0 # W/m2K
U_HEAT = 2500.0 # W/m2K
DT_HEAT = 5.0 # K - pinch delta at design conditions
DT_COOL = 2.0 # K - pinch delta at design conditions
DT_INTERNAL_HEX = 2.0 # K - minimum difference between cold side outflow and hot side inflow temperatures
HEAT_EX_EFFECTIVENESS = 0.9 # assume starting value for heat exchanger effectiveness (exergy)
MAX_NODE_FLOW = 22.0 # kg/s
# Heat pump
HP_MAX_SIZE = 20.0E6 # max thermal design size [Wth]
HP_MIN_SIZE = 1.0E6 # min thermal design size [Wth]
HP_ETA_EX = 0.6 # exergetic efficiency of WSHP [L. Girardin et al., 2010]_
HP_DELTA_T_COND = 2.0 # pinch for condenser [K]
HP_DELTA_T_EVAP = 2.0 # pinch for evaporator [K]
HP_MAX_T_COND = 140 + 273.0 # max temperature at condenser [K]
HP_AUXRATIO = 0.83 # Wdot_comp / Wdot_total (circulating pumps)
# Substation data
ROUGHNESS = 0.02 / 1000 # roughness coefficient for heating network pipe in m (for a steel pipe, from Li &
NETWORK_DEPTH = 1 # m
# Initial Diameter guess
REDUCED_TIME_STEPS = 50 # number of time steps of maximum demand which are evaluated as an initial guess of the edge diameters
MAX_INITIAL_DIAMETER_ITERATIONS = 20 #number of initial guess iterations for pipe diameters
# Cogeneration (CCGT)
SPEC_VOLUME_STEAM = 0.0010 # m3/kg
# Storage tank
TANK_HEX_EFFECTIVENESS = 0.9 # assuming 90% effectiveness
#Chiller
G_VALUE_CENTRALIZED = 0.47
G_VALUE_DECENTRALIZED = 0.4 # calculated from ESP4401_Part-2 Air conditioning system_AY2016_17.pdf assuming singapore wet bulb temp and 7.5degC at cold side
T_EVAP_AHU = 280.5 #K form CEA demand calculation
T_EVAP_ARU = 280.5 #K form CEA demand calculation
T_EVAP_SCU = 291 #K form CEA demand calculation
DT_NETWORK_CENTRALIZED = 2 # Assumption for network losses. This value is based on a sample calculation with all loads supplied by the newtork.
CHILLER_DELTA_T_APPROACH = 2.8 # K , ESP4401_Part-2 Air conditioning system_AY2016_17.pdf
CHILLER_DELTA_T_HEX_CT = 1.5 # K , Approximation, approach temperature of the HEX b/t the condenser loop and CT
CENTRALIZED_AUX_PERCENTAGE = 38 # % , Power needed by auxiliary Chiller and CT, calculation based on UTown plant
DECENTRALIZED_AUX_PERCENTAGE = 27 # % , Power needed by auxiliary Chiller and CT, backwards calulation based on Clark D (CUNDALL). Chiller energy efficiency 2013.
COMPRESSOR_TYPE_LIMIT_LOW = 1055056 # in W, according to ASHRAE 90.1 Appendix G. below this limit (300 RT), one water-cooled screw chiller should be implemented
COMPRESSOR_TYPE_LIMIT_HIGH = 2110112 # in W, according to ASHRAE 90.1 Appendix G. below this limit (600 RT), two water-cooled screw chiller should be implemented, while above 2 centrifugal water source chllers hall be implemented, not larger then 800 RT (2813 kW)
ASHRAE_CAPACITY_LIMIT = 2813482 # in W, according to ASHRAE 90.1 Appendix G, chiller shall notbe larger than 800 RT
# Cooling Towers
CT_MIN_PARTLOAD_RATIO = 0.15 # from Grahovac, M. et al. (2012). VC CHILLERS AND PV PANELS: A GENERIC PLANNING TOOL PROVIDING THE OPTIMAL DIMENSIONS TO MINIMIZE COSTS OR EMISSIONS.
#Furnace
FURNACE_MIN_LOAD = 0.2 # Minimum load possible (does not affect Model itself!)
FURNACE_MIN_ELECTRIC = 0.3 # Minimum load for electricity generation in furnace plant
FURNACE_FUEL_COST_WET = 0.057 * 1E-3 # CHF / Wh = 5.7 Rp / kWh for wet (50wt%) Wood Chips, after
FURNACE_FUEL_COST_DRY = 0.07 * 1E-3 # CHF / Wh = 7 Rp / kWh for dry (30wt%) Wood Chips,
# Boiler
# Operating figures, quality parameters and investment costs for district heating systems (AFO)
# ELCO-Loesungsbeispiel-Huber.pdf
BOILER_C_FUEL = 20.0 # eu / MWh_therm_bought(for LHV), AFO
BOILER_P_AUX = 0.026 # 0.026 Wh/Wh_th_sold = 26 kWh_el / MWh_th_sold, bioenergy 2020
BOILER_MIN = 0.05 # minimum Part Load of Boiler
BOILER_EQU_RATIO = 0.2 # 20% own capital required (equity ratio)
BOILER_ETA_HP = 0.9
#natural gas conncetion
GAS_CONNECTION_COST = 15.5 / 1000 # CHF / W, from Energie360 15.5 CHF / kW
| mit |
cryptobanana/ansible | lib/ansible/modules/network/f5/bigiq_regkey_pool.py | 1 | 11321 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigiq_regkey_pool
short_description: Manages registration key pools on BIG-IQ
description:
- Manages registration key (regkey) pools on a BIG-IQ. These pools function as
a container in-which you will add lists of registration keys. To add registration
keys, use the C(bigiq_regkey_license) module.
version_added: "2.5"
options:
name:
description:
- Specifies the name of the registration key pool.
- You must be mindful to name your registration pools unique names. While
BIG-IQ does not require this, this module does. If you do not do this,
the behavior of the module is undefined and you may end up putting
licenses in the wrong registration key pool.
required: True
description:
description:
- A description to attach to the pool.
state:
description:
- The state of the regkey pool on the system.
- When C(present), guarantees that the pool exists.
- When C(absent), removes the pool, and the licenses it contains, from the
system.
default: present
choices:
- absent
- present
requirements:
- BIG-IQ >= 5.3.0
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a registration key (regkey) pool to hold individual device licenses
bigiq_regkey_pool:
name: foo-pool
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
'''
RETURN = r'''
description:
description: New description of the regkey pool.
returned: changed
type: string
sample: My description
'''
from ansible.module_utils.f5_utils import AnsibleF5Client
from ansible.module_utils.f5_utils import AnsibleF5Parameters
from ansible.module_utils.f5_utils import HAS_F5SDK
from ansible.module_utils.f5_utils import F5ModuleError
from ansible.module_utils.six import iteritems
from collections import defaultdict
try:
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
}
api_attributes = [
'description'
]
returnables = [
'description'
]
updatables = [
'description'
]
def __init__(self, params=None):
self._values = defaultdict(lambda: None)
self._values['__warnings'] = []
if params:
self.update(params=params)
def update(self, params=None):
if params:
for k, v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have
# an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleParameters(Parameters):
@property
def uuid(self):
"""Returns UUID of a given name
Will search for a given name and return the first one returned to us. If no name,
and therefore no ID, is found, will return the string "none". The string "none"
is returned because if we were to return the None value, it would cause the
license loading code to append a None string to the URI; essentially asking the
remote device for its collection (which we dont want and which would cause the SDK
to return an False error.
:return:
"""
collection = self.client.api.cm.device.licensing.pool.regkey.licenses_s.get_collection()
resource = next((x for x in collection if x.name == self._values['name']), None)
if resource:
return resource.id
else:
return "none"
class ApiParameters(Parameters):
@property
def uuid(self):
return self._values['id']
class Changes(Parameters):
pass
class ReportableChanges(Changes):
pass
class UsableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.want = ModuleParameters(self.client.module.params)
self.want.update({'client': client})
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = Changes(changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
result = self.client.api.cm.device.licensing.pool.regkey.licenses_s.licenses.exists(
id=self.want.uuid
)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.client.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.want.api_params()
self.client.api.cm.device.licensing.pool.regkey.licenses_s.licenses.create(
name=self.want.name,
**params
)
def update_on_device(self):
params = self.changes.api_params()
resource = self.client.api.cm.device.licensing.pool.regkey.licenses_s.licenses.load(
id=self.want.uuid
)
resource.modify(**params)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
resource = self.client.api.cm.device.licensing.pool.regkey.licenses_s.licenses.load(
id=self.want.uuid
)
if resource:
resource.delete()
def read_current_from_device(self):
resource = self.client.api.cm.device.licensing.pool.regkey.licenses_s.licenses.load(
id=self.want.uuid
)
result = resource.attrs
return ApiParameters(result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(required=True),
description=dict(),
state=dict(
default='present',
choices=['absent', 'present']
)
)
self.f5_product_name = 'bigiq'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
sebadiaz/rethinkdb | external/v8_3.30.33.16/build/gyp/test/mac/gyptest-app.py | 75 | 4193 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that app bundles are built correctly.
"""
import TestGyp
import os
import plistlib
import subprocess
import sys
def GetStdout(cmdlist):
return subprocess.Popen(cmdlist,
stdout=subprocess.PIPE).communicate()[0].rstrip('\n')
def ExpectEq(expected, actual):
if expected != actual:
print >>sys.stderr, 'Expected "%s", got "%s"' % (expected, actual)
test.fail_test()
def ls(path):
'''Returns a list of all files in a directory, relative to the directory.'''
result = []
for dirpath, _, files in os.walk(path):
for f in files:
result.append(os.path.join(dirpath, f)[len(path) + 1:])
return result
def XcodeVersion():
stdout = subprocess.check_output(['xcodebuild', '-version'])
version = stdout.splitlines()[0].split()[-1].replace('.', '')
return (version + '0' * (3 - len(version))).zfill(4)
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='app-bundle')
test.build('test.gyp', test.ALL, chdir='app-bundle')
# Binary
test.built_file_must_exist('Test App Gyp.app/Contents/MacOS/Test App Gyp',
chdir='app-bundle')
# Info.plist
info_plist = test.built_file_path('Test App Gyp.app/Contents/Info.plist',
chdir='app-bundle')
test.must_exist(info_plist)
test.must_contain(info_plist, 'com.google.Test-App-Gyp') # Variable expansion
test.must_not_contain(info_plist, '${MACOSX_DEPLOYMENT_TARGET}');
if test.format != 'make':
# TODO: Synthesized plist entries aren't hooked up in the make generator.
plist = plistlib.readPlist(info_plist)
ExpectEq(GetStdout(['sw_vers', '-buildVersion']),
plist['BuildMachineOSBuild'])
# Prior to Xcode 5.0.0, SDKROOT (and thus DTSDKName) was only defined if
# set in the Xcode project file. Starting with that version, it is always
# defined.
expected = ''
if XcodeVersion() >= '0500':
version = GetStdout(['xcodebuild', '-version', '-sdk', '', 'SDKVersion'])
expected = 'macosx' + version
ExpectEq(expected, plist['DTSDKName'])
sdkbuild = GetStdout(
['xcodebuild', '-version', '-sdk', '', 'ProductBuildVersion'])
if not sdkbuild:
# Above command doesn't work in Xcode 4.2.
sdkbuild = plist['BuildMachineOSBuild']
ExpectEq(sdkbuild, plist['DTSDKBuild'])
xcode, build = GetStdout(['xcodebuild', '-version']).splitlines()
xcode = xcode.split()[-1].replace('.', '')
xcode = (xcode + '0' * (3 - len(xcode))).zfill(4)
build = build.split()[-1]
ExpectEq(xcode, plist['DTXcode'])
ExpectEq(build, plist['DTXcodeBuild'])
# Resources
strings_files = ['InfoPlist.strings', 'utf-16be.strings', 'utf-16le.strings']
for f in strings_files:
strings = test.built_file_path(
os.path.join('Test App Gyp.app/Contents/Resources/English.lproj', f),
chdir='app-bundle')
test.must_exist(strings)
# Xcodes writes UTF-16LE with BOM.
contents = open(strings, 'rb').read()
if not contents.startswith('\xff\xfe' + '/* Localized'.encode('utf-16le')):
test.fail_test()
test.built_file_must_exist(
'Test App Gyp.app/Contents/Resources/English.lproj/MainMenu.nib',
chdir='app-bundle')
# Packaging
test.built_file_must_exist('Test App Gyp.app/Contents/PkgInfo',
chdir='app-bundle')
test.built_file_must_match('Test App Gyp.app/Contents/PkgInfo', 'APPLause',
chdir='app-bundle')
# Check that no other files get added to the bundle.
if set(ls(test.built_file_path('Test App Gyp.app', chdir='app-bundle'))) != \
set(['Contents/MacOS/Test App Gyp',
'Contents/Info.plist',
'Contents/Resources/English.lproj/MainMenu.nib',
'Contents/PkgInfo',
] +
[os.path.join('Contents/Resources/English.lproj', f)
for f in strings_files]):
test.fail_test()
test.pass_test()
| agpl-3.0 |
VinceZK/phantomjs | src/qt/qtwebkit/Tools/Scripts/run-inspector-perf-tests.py | 131 | 1884 | #!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run Inspector's perf tests in perf mode."""
import logging
import sys
from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
_log = logging.getLogger(__name__)
if '__main__' == __name__:
logging.basicConfig(level=logging.INFO, format="%(message)s")
sys.exit(PerfTestsRunner(args=['inspector']).run())
| bsd-3-clause |
patrikpettersson/rest-engine | lib/aniso8601/tests/test_time.py | 3 | 20403 | # -*- coding: utf-8 -*-
# Copyright (c) 2014, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import unittest
import datetime
from aniso8601.time import get_time_resolution, parse_time, parse_datetime, \
_parse_time_naive, _parse_hour, _parse_minute_time, _parse_second_time, \
_build_time, _split_tz
from aniso8601.resolution import TimeResolution
class TestTimeFunctions(unittest.TestCase):
def test_get_time_resolution(self):
self.assertEqual(get_time_resolution('01:23:45'), TimeResolution.Seconds)
self.assertEqual(get_time_resolution('24:00:00'), TimeResolution.Seconds)
self.assertEqual(get_time_resolution('23:21:28.512400'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('01:23'), TimeResolution.Minutes)
self.assertEqual(get_time_resolution('24:00'), TimeResolution.Minutes)
self.assertEqual(get_time_resolution('01:23.4567'), TimeResolution.Minutes)
self.assertEqual(get_time_resolution('012345'), TimeResolution.Seconds)
self.assertEqual(get_time_resolution('240000'), TimeResolution.Seconds)
self.assertEqual(get_time_resolution('0123'), TimeResolution.Minutes)
self.assertEqual(get_time_resolution('2400'), TimeResolution.Minutes)
self.assertEqual(get_time_resolution('01'), TimeResolution.Hours)
self.assertEqual(get_time_resolution('24'), TimeResolution.Hours)
self.assertEqual(get_time_resolution('12.5'), TimeResolution.Hours)
self.assertEqual(get_time_resolution('232128.512400+00:00'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('0123.4567+00:00'),
TimeResolution.Minutes)
self.assertEqual(get_time_resolution('01.4567+00:00'), TimeResolution.Hours)
self.assertEqual(get_time_resolution('01:23:45+00:00'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('24:00:00+00:00'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('23:21:28.512400+00:00'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('01:23+00:00'), TimeResolution.Minutes)
self.assertEqual(get_time_resolution('24:00+00:00'), TimeResolution.Minutes)
self.assertEqual(get_time_resolution('01:23.4567+00:00'),
TimeResolution.Minutes)
self.assertEqual(get_time_resolution('23:21:28.512400+11:15'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('23:21:28.512400-12:34'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('23:21:28.512400Z'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('06:14:00.000123Z'),
TimeResolution.Seconds)
def test_parse_time(self):
time = parse_time('01:23:45')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
self.assertEqual(time.second, 45)
time = parse_time('24:00:00')
self.assertEqual(time.hour, 0)
self.assertEqual(time.minute, 0)
self.assertEqual(time.second, 0)
time = parse_time('23:21:28.512400')
self.assertEqual(time.hour, 23)
self.assertEqual(time.minute, 21)
self.assertEqual(time.second, 28)
self.assertEqual(time.microsecond, 512400)
time = parse_time('01:23')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
time = parse_time('24:00')
self.assertEqual(time.hour, 0)
self.assertEqual(time.minute, 0)
time = parse_time('01:23.4567')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
self.assertEqual(time.second, 27)
self.assertEqual(time.microsecond, 402000)
time = parse_time('012345')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
self.assertEqual(time.second, 45)
time = parse_time('240000')
self.assertEqual(time.hour, 0)
self.assertEqual(time.minute, 0)
self.assertEqual(time.second, 0)
time = parse_time('0123')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
time = parse_time('2400')
self.assertEqual(time.hour, 0)
self.assertEqual(time.minute, 0)
time = parse_time('01')
self.assertEqual(time.hour, 1)
time = parse_time('24')
self.assertEqual(time.hour, 0)
time = parse_time('12.5')
self.assertEqual(time.hour, 12)
self.assertEqual(time.minute, 30)
time = parse_time('232128.512400+00:00')
self.assertEqual(time.hour, 23)
self.assertEqual(time.minute, 21)
self.assertEqual(time.second, 28)
self.assertEqual(time.microsecond, 512400)
tzinfoobject = time.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=0))
self.assertEqual(tzinfoobject.tzname(None), '+00:00')
time = parse_time('0123.4567+00:00')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
self.assertEqual(time.second, 27)
self.assertEqual(time.microsecond, 402000)
tzinfoobject = time.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=0))
self.assertEqual(tzinfoobject.tzname(None), '+00:00')
time = parse_time('01.4567+00:00')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 27)
self.assertEqual(time.second, 24)
self.assertEqual(time.microsecond, 120000)
tzinfoobject = time.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=0))
self.assertEqual(tzinfoobject.tzname(None), '+00:00')
time = parse_time('01:23:45+00:00')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
self.assertEqual(time.second, 45)
tzinfoobject = time.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=0))
self.assertEqual(tzinfoobject.tzname(None), '+00:00')
time = parse_time('24:00:00+00:00')
self.assertEqual(time.hour, 0)
self.assertEqual(time.minute, 0)
self.assertEqual(time.second, 0)
tzinfoobject = time.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=0))
self.assertEqual(tzinfoobject.tzname(None), '+00:00')
time = parse_time('23:21:28.512400+00:00')
self.assertEqual(time.hour, 23)
self.assertEqual(time.minute, 21)
self.assertEqual(time.second, 28)
self.assertEqual(time.microsecond, 512400)
tzinfoobject = time.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=0))
self.assertEqual(tzinfoobject.tzname(None), '+00:00')
time = parse_time('01:23+00:00')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
tzinfoobject = time.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=0))
self.assertEqual(tzinfoobject.tzname(None), '+00:00')
time = parse_time('24:00+00:00')
self.assertEqual(time.hour, 0)
self.assertEqual(time.minute, 0)
tzinfoobject = time.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=0))
self.assertEqual(tzinfoobject.tzname(None), '+00:00')
time = parse_time('01:23.4567+00:00')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
self.assertEqual(time.second, 27)
self.assertEqual(time.microsecond, 402000)
tzinfoobject = time.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=0))
self.assertEqual(tzinfoobject.tzname(None), '+00:00')
time = parse_time('23:21:28.512400+11:15')
self.assertEqual(time.hour, 23)
self.assertEqual(time.minute, 21)
self.assertEqual(time.second, 28)
self.assertEqual(time.microsecond, 512400)
tzinfoobject = time.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=11, minutes=15))
self.assertEqual(tzinfoobject.tzname(None), '+11:15')
time = parse_time('23:21:28.512400-12:34')
self.assertEqual(time.hour, 23)
self.assertEqual(time.minute, 21)
self.assertEqual(time.second, 28)
self.assertEqual(time.microsecond, 512400)
tzinfoobject = time.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), -datetime.timedelta(hours=12, minutes=34))
self.assertEqual(tzinfoobject.tzname(None), '-12:34')
time = parse_time('23:21:28.512400Z')
self.assertEqual(time.hour, 23)
self.assertEqual(time.minute, 21)
self.assertEqual(time.second, 28)
self.assertEqual(time.microsecond, 512400)
tzinfoobject = time.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=0))
self.assertEqual(tzinfoobject.tzname(None), 'UTC')
time = parse_time('06:14:00.000123Z')
self.assertEqual(time.hour, 6)
self.assertEqual(time.minute, 14)
self.assertEqual(time.second, 00)
self.assertEqual(time.microsecond, 123)
tzinfoobject = time.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=0))
self.assertEqual(tzinfoobject.tzname(None), 'UTC')
def test_parse_datetime(self):
resultdatetime = parse_datetime('1981-04-05T23:21:28.512400Z')
self.assertEqual(resultdatetime.year, 1981)
self.assertEqual(resultdatetime.month, 4)
self.assertEqual(resultdatetime.day, 5)
self.assertEqual(resultdatetime.hour, 23)
self.assertEqual(resultdatetime.minute, 21)
self.assertEqual(resultdatetime.second, 28)
self.assertEqual(resultdatetime.microsecond, 512400)
tzinfoobject = resultdatetime.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=0))
self.assertEqual(tzinfoobject.tzname(None), 'UTC')
resultdatetime = parse_datetime('1981095T23:21:28.512400-12:34')
self.assertEqual(resultdatetime.year, 1981)
self.assertEqual(resultdatetime.month, 4)
self.assertEqual(resultdatetime.day, 5)
self.assertEqual(resultdatetime.hour, 23)
self.assertEqual(resultdatetime.minute, 21)
self.assertEqual(resultdatetime.second, 28)
self.assertEqual(resultdatetime.microsecond, 512400)
tzinfoobject = resultdatetime.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), -datetime.timedelta(hours=12, minutes=34))
self.assertEqual(tzinfoobject.tzname(None), '-12:34')
resultdatetime = parse_datetime('19810405T23:21:28+00')
self.assertEqual(resultdatetime.year, 1981)
self.assertEqual(resultdatetime.month, 4)
self.assertEqual(resultdatetime.day, 5)
self.assertEqual(resultdatetime.hour, 23)
self.assertEqual(resultdatetime.minute, 21)
self.assertEqual(resultdatetime.second, 28)
tzinfoobject = resultdatetime.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=0))
self.assertEqual(tzinfoobject.tzname(None), '+00')
resultdatetime = parse_datetime('19810405T23:21:28+00:00')
self.assertEqual(resultdatetime.year, 1981)
self.assertEqual(resultdatetime.month, 4)
self.assertEqual(resultdatetime.day, 5)
self.assertEqual(resultdatetime.hour, 23)
self.assertEqual(resultdatetime.minute, 21)
self.assertEqual(resultdatetime.second, 28)
tzinfoobject = resultdatetime.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=0))
self.assertEqual(tzinfoobject.tzname(None), '+00:00')
def test_parse_datetime_spaceseperated(self):
resultdatetime = parse_datetime('2004-W53-6 23:21:28.512400-12:34', ' ')
self.assertEqual(resultdatetime.year, 2005)
self.assertEqual(resultdatetime.month, 1)
self.assertEqual(resultdatetime.day, 1)
self.assertEqual(resultdatetime.hour, 23)
self.assertEqual(resultdatetime.minute, 21)
self.assertEqual(resultdatetime.second, 28)
self.assertEqual(resultdatetime.microsecond, 512400)
tzinfoobject = resultdatetime.tzinfo
self.assertEqual(tzinfoobject.utcoffset(None), -datetime.timedelta(hours=12, minutes=34))
self.assertEqual(tzinfoobject.tzname(None), '-12:34')
def test_parse_time_naive(self):
time = _parse_time_naive('01:23:45')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
self.assertEqual(time.second, 45)
time = _parse_time_naive('24:00:00')
self.assertEqual(time.hour, 0)
self.assertEqual(time.minute, 0)
self.assertEqual(time.second, 0)
time = _parse_time_naive('23:21:28.512400')
self.assertEqual(time.hour, 23)
self.assertEqual(time.minute, 21)
self.assertEqual(time.second, 28)
self.assertEqual(time.microsecond, 512400)
time = _parse_time_naive('01:23')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
time = _parse_time_naive('24:00')
self.assertEqual(time.hour, 0)
self.assertEqual(time.minute, 0)
time = _parse_time_naive('01:23.4567')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
self.assertEqual(time.second, 27)
self.assertEqual(time.microsecond, 402000)
time = _parse_time_naive('012345')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
self.assertEqual(time.second, 45)
time = _parse_time_naive('240000')
self.assertEqual(time.hour, 0)
self.assertEqual(time.minute, 0)
self.assertEqual(time.second, 0)
time = _parse_time_naive('0123')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
time = _parse_time_naive('2400')
self.assertEqual(time.hour, 0)
self.assertEqual(time.minute, 0)
time = _parse_time_naive('01')
self.assertEqual(time.hour, 1)
time = _parse_time_naive('24')
self.assertEqual(time.hour, 0)
time = _parse_time_naive('232128.512400')
self.assertEqual(time.hour, 23)
self.assertEqual(time.minute, 21)
self.assertEqual(time.second, 28)
self.assertEqual(time.microsecond, 512400)
time = _parse_time_naive('0123.4567')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
self.assertEqual(time.second, 27)
self.assertEqual(time.microsecond, 402000)
time = _parse_time_naive('01.4567')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 27)
self.assertEqual(time.second, 24)
self.assertEqual(time.microsecond, 120000)
def test_parse_hour(self):
time = _parse_hour('01')
self.assertEqual(time.hour, 1)
time = _parse_hour('24')
self.assertEqual(time.hour, 0)
time = _parse_hour('01.4567')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 27)
self.assertEqual(time.second, 24)
self.assertEqual(time.microsecond, 120000)
time = _parse_hour('12.5')
self.assertEqual(time.hour, 12)
self.assertEqual(time.minute, 30)
def test_parse_minute_time(self):
time = _parse_minute_time('01:23')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
time = _parse_minute_time('24:00')
self.assertEqual(time.hour, 0)
self.assertEqual(time.minute, 0)
time = _parse_minute_time('01:23.4567')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
self.assertEqual(time.second, 27)
self.assertEqual(time.microsecond, 402000)
time = _parse_minute_time('0123')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
time = _parse_minute_time('2400')
self.assertEqual(time.hour, 0)
self.assertEqual(time.minute, 0)
time = _parse_minute_time('0123.4567')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
self.assertEqual(time.second, 27)
self.assertEqual(time.microsecond, 402000)
def test_parse_second_time(self):
time = _parse_second_time('01:23:45')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
self.assertEqual(time.second, 45)
time = _parse_second_time('24:00:00')
self.assertEqual(time.hour, 0)
self.assertEqual(time.minute, 0)
self.assertEqual(time.second, 0)
time = _parse_second_time('23:21:28.512400')
self.assertEqual(time.hour, 23)
self.assertEqual(time.minute, 21)
self.assertEqual(time.second, 28)
self.assertEqual(time.microsecond, 512400)
time = _parse_second_time('012345')
self.assertEqual(time.hour, 1)
self.assertEqual(time.minute, 23)
self.assertEqual(time.second, 45)
time = _parse_second_time('240000')
self.assertEqual(time.hour, 0)
self.assertEqual(time.minute, 0)
self.assertEqual(time.second, 0)
time = _parse_second_time('232128.512400')
self.assertEqual(time.hour, 23)
self.assertEqual(time.minute, 21)
self.assertEqual(time.second, 28)
self.assertEqual(time.microsecond, 512400)
def test_build_time(self):
self.assertEqual(_build_time(datetime.time(hour=1), datetime.timedelta(hours=1.1, minutes=2.2, seconds=3.3)), datetime.time(hour=2, minute=8, second=15, microsecond=300000))
#Make sure it overflows correctly
self.assertEqual(_build_time(datetime.time.max, datetime.timedelta(microseconds=1)), datetime.time.min)
def test_split_tz(self):
self.assertEqual(_split_tz('01:23:45'), ('01:23:45', None))
self.assertEqual(_split_tz('24:00:00'), ('24:00:00', None))
self.assertEqual(_split_tz('23:21:28.512400'), ('23:21:28.512400', None))
self.assertEqual(_split_tz('01:23'), ('01:23', None))
self.assertEqual(_split_tz('24:00'), ('24:00', None))
self.assertEqual(_split_tz('01:23.4567'), ('01:23.4567', None))
self.assertEqual(_split_tz('012345'), ('012345', None))
self.assertEqual(_split_tz('240000'), ('240000', None))
self.assertEqual(_split_tz('0123'), ('0123', None))
self.assertEqual(_split_tz('2400'), ('2400', None))
self.assertEqual(_split_tz('01'), ('01', None))
self.assertEqual(_split_tz('24'), ('24', None))
self.assertEqual(_split_tz('12.5'), ('12.5', None))
self.assertEqual(_split_tz('232128.512400+00:00'), ('232128.512400', '+00:00'))
self.assertEqual(_split_tz('0123.4567+00:00'), ('0123.4567', '+00:00'))
self.assertEqual(_split_tz('01.4567+00:00'), ('01.4567', '+00:00'))
self.assertEqual(_split_tz('01:23:45+00:00'), ('01:23:45', '+00:00'))
self.assertEqual(_split_tz('24:00:00+00:00'), ('24:00:00', '+00:00'))
self.assertEqual(_split_tz('23:21:28.512400+00:00'), ('23:21:28.512400', '+00:00'))
self.assertEqual(_split_tz('01:23+00:00'), ('01:23', '+00:00'))
self.assertEqual(_split_tz('24:00+00:00'), ('24:00', '+00:00'))
self.assertEqual(_split_tz('01:23.4567+00:00'), ('01:23.4567', '+00:00'))
self.assertEqual(_split_tz('23:21:28.512400+11:15'), ('23:21:28.512400', '+11:15'))
self.assertEqual(_split_tz('23:21:28.512400-12:34'), ('23:21:28.512400', '-12:34'))
self.assertEqual(_split_tz('23:21:28.512400Z'), ('23:21:28.512400', 'Z'))
self.assertEqual(_split_tz('06:14:00.000123Z'), ('06:14:00.000123', 'Z'))
| mit |
wetek-enigma/oe-alliance-core | meta-oe/recipes-devtools/python/python_2.7.12/sitecustomize.py | 228 | 1125 | # OpenEmbedded sitecustomize.py (C) 2002-2008 Michael 'Mickey' Lauer <[email protected]>
# GPLv2 or later
# Version: 20081123
# Features:
# * set proper default encoding
# * enable readline completion in the interactive interpreter
# * load command line history on startup
# * save command line history on exit
import os
def __exithandler():
try:
readline.write_history_file( "%s/.python-history" % os.getenv( "HOME", "/tmp" ) )
except IOError:
pass
def __registerExitHandler():
import atexit
atexit.register( __exithandler )
def __enableReadlineSupport():
readline.set_history_length( 1000 )
readline.parse_and_bind( "tab: complete" )
try:
readline.read_history_file( "%s/.python-history" % os.getenv( "HOME", "/tmp" ) )
except IOError:
pass
def __enableDefaultEncoding():
import sys
try:
sys.setdefaultencoding( "utf8" )
except LookupError:
pass
import sys
try:
import rlcompleter, readline
except ImportError:
pass
else:
__enableDefaultEncoding()
__registerExitHandler()
__enableReadlineSupport()
| gpl-2.0 |
Maximilian-Reuter/SickRage-1 | lib/hachoir_parser/image/tiff.py | 74 | 2337 | """
TIFF image parser.
Authors: Victor Stinner, Sebastien Ponce, Robert Xiao
Creation date: 30 september 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import FieldSet, SeekableFieldSet, RootSeekableFieldSet, Bytes
from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from hachoir_parser.image.exif import TIFF
def getStrips(ifd):
data = {}
for i, entry in enumerate(ifd.array('entry')):
data[entry['tag'].display] = entry
# image data
if "StripOffsets" in data and "StripByteCounts" in data:
offs = ifd.getEntryValues(data["StripOffsets"])
bytes = ifd.getEntryValues(data["StripByteCounts"])
for off, byte in zip(offs, bytes):
yield off.value, byte.value
class ImageFile(SeekableFieldSet):
def __init__(self, parent, name, description, ifd):
SeekableFieldSet.__init__(self, parent, name, description, None)
self._ifd = ifd
def createFields(self):
for off, byte in getStrips(self._ifd):
self.seekByte(off, relative=False)
yield Bytes(self, "strip[]", byte)
class TiffFile(RootSeekableFieldSet, Parser):
PARSER_TAGS = {
"id": "tiff",
"category": "image",
"file_ext": ("tif", "tiff"),
"mime": (u"image/tiff",),
"min_size": 8*8,
"magic": (("II\x2A\0", 0), ("MM\0\x2A", 0)),
"description": "TIFF picture"
}
# Correct endian is set in constructor
endian = LITTLE_ENDIAN
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
if self.stream.readBytes(0, 2) == "MM":
self.endian = BIG_ENDIAN
Parser.__init__(self, stream, **args)
def validate(self):
endian = self.stream.readBytes(0, 2)
if endian not in ("MM", "II"):
return "Invalid endian (%r)" % endian
if self["version"].value != 42:
return "Unknown TIFF version"
return True
def createFields(self):
for field in TIFF(self):
yield field
for ifd in self.array('ifd'):
offs = (off for off, byte in getStrips(ifd))
self.seekByte(min(offs), relative=False)
image = ImageFile(self, "image[]", "Image File", ifd)
yield image
| gpl-3.0 |
isard-vdi/isard | engine/engine/engine/api/evaluate.py | 1 | 2700 | import inspect
import time
from flask import jsonify, request
from engine.controllers.eval_controller import EvalController
from engine.services.csv.eval import eval_to_csv
from engine.services.db.eval import insert_eval_result
from . import api
@api.route('/create_domains', methods=['GET'])
def create_domains():
eval_ctrl = EvalController()
data = eval_ctrl.create_domains()
return jsonify(eval=data), 200
@api.route('/destroy_domains', methods=['GET'])
def destroy_domains():
eval_ctrl = EvalController()
data = eval_ctrl.destroy_domains()
return jsonify(eval=data), 200
@api.route('/start_domains', methods=['GET'])
def start_domains():
eval_ctrl = EvalController()
data = eval_ctrl.start_domains()
return jsonify(eval=data), 200
@api.route('/stop_domains', methods=['GET'])
def stop_domains():
eval_ctrl = EvalController()
data = eval_ctrl.stop_domains()
return jsonify(eval=data), 200
@api.route('/clear_domains', methods=['GET'])
def clear_domains():
eval_ctrl = EvalController()
data = eval_ctrl.clear_domains()
return jsonify(eval={"clean":data}), 200
@api.route('/eval', methods=['GET'])
def eval():
eval_ctrl = EvalController()
data = eval_ctrl.run()
return jsonify(eval=data), 200
@api.route('/remove-eval', methods=['GET'])
def remove_eval():
eval_ctrl = EvalController()
data = eval_ctrl._removeEvalDomains()
return jsonify(eval=data), 200
@api.route('/eval/statistics', methods=['GET'])
def eval_statistics():
eval_ctrl = EvalController()
data = eval_ctrl._evaluate()
return jsonify(eval=data), 200
@api.route('/eval', methods=['POST'])
def new_eval():
"""
templates = [{'id': "_admin_ubuntu_17_eval_wget", 'weight': 100}]
evaluators = ["load"]
:return:
"""
kwargs = request.json
code = kwargs.get("code")
eval_ctrl_class = EvalController
args = inspect.getfullargspec(eval_ctrl_class.__init__).args
params = {k: v for k, v in kwargs.items() if k in args}
eval_ctrl = eval_ctrl_class(**params)
iterations = kwargs.get("iterations", 1)
objs=[]
for i in range(iterations):
data = eval_ctrl.run()
now = time.time()
obj = {
"id": "{}_{}".format(code, now),
"code": code,
"params": params,
"result": data,
"when": now
}
insert_eval_result(obj)
eval_to_csv(code, data)
d_load = data["load"]["total_started_domains"] if data.get("load") else None
d_ux = data["ux"]["total"]["score"] if data.get("ux") else None
objs.append((d_load, d_ux))
time.sleep(40)
return jsonify(objs), 200
| agpl-3.0 |
AMGitsKriss/Battlegrounds | battlegrounds_write_graph.py | 2 | 1492 | import pandas as pd
#Enumerate colors.
class COLOR:
RED = "tomato"
GREEN = "yellowgreen"
BLUE = "lightblue"
NEWLINE_INDENT = "\n "
def fill(color):
return f"[style=filled fillcolor=\"{color}\"]"
def dual_label(weapon, n):
return f"[label=\"{weapon}\" taillabel=\"{n}\"]"
def solo_node(player, color):
return f"{NEWLINE_INDENT}\"{player}\" {fill(color)};"
def inter_node(actor, victim, weapon, n):
return f"{NEWLINE_INDENT}\"{actor}\" -> \"{victim}\" {dual_label(weapon, n)};"
def digraphWrite(data, name):
print("Writing digraph code...")
with open(f"{name}.dot","w") as f:
f.write("digraph {")
# We're rounding all the values to the neaerest 100
# We need to define the colours first for them to work
for i in data.index:
row = data.iloc[i]
temp = ""
if(row['Deed'] == "died"):
if (row['Weapon'] == "Blue Zone"):
temp = solo_node(row['Player'], COLOR.BLUE)
else:
temp = solo_node(row['Player'], COLOR.RED)
elif(row['Deed'] == "won"):
temp = solo_node(row['Player'], COLOR.GREEN)
f.write(temp)
# Then we can define the graph edges
n = 0
for i in data.index:
row = data.iloc[i]
if(row['Deed'] == "killed"):
n += 1
f.write(inter_node(row['Player'], row['Target'], row['Weapon'], n))
f.write("\n}")
print(f"Outputted graph script to {name}.dot...")
def main():
data = pd.read_csv("battlegrounds.csv", low_memory=False)
digraphWrite(data, "kill_map")
# Load data
if __name__ == '__main__':
main()
| mit |
astrofrog/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/gloo/wrappers.py | 21 | 24948 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from copy import deepcopy
from . import gl
from ..ext.six import string_types
from ..color import Color
#from ..util import logger
__all__ = ('set_viewport', 'set_depth_range', 'set_front_face', # noqa
'set_cull_face', 'set_line_width', 'set_polygon_offset', # noqa
'clear', 'set_clear_color', 'set_clear_depth', 'set_clear_stencil', # noqa
'set_blend_func', 'set_blend_color', 'set_blend_equation', # noqa
'set_scissor', 'set_stencil_func', 'set_stencil_mask', # noqa
'set_stencil_op', 'set_depth_func', 'set_depth_mask', # noqa
'set_color_mask', 'set_sample_coverage', # noqa
'get_state_presets', 'set_state', 'finish', 'flush', # noqa
'read_pixels', 'set_hint', # noqa
'get_gl_configuration', '_check_valid',
'GlooFunctions', 'global_gloo_functions', )
_setters = [s[4:] for s in __all__
if s.startswith('set_') and s != 'set_state']
# NOTE: If these are updated to have things beyond glEnable/glBlendFunc
# calls, set_preset_state will need to be updated to deal with it.
_gl_presets = {
'opaque': dict(
depth_test=True,
cull_face=False,
blend=False),
'translucent': dict(
depth_test=True,
cull_face=False,
blend=True,
blend_func=('src_alpha', 'one_minus_src_alpha')),
'additive': dict(
depth_test=False,
cull_face=False,
blend=True,
blend_func=('src_alpha', 'one')),
}
def get_current_canvas():
""" Proxy for context.get_current_canvas to avoud circular import.
This function replaces itself with the real function the first
time it is called. (Bah)
"""
from .context import get_current_canvas
globals()['get_current_canvas'] = get_current_canvas
return get_current_canvas()
# Helpers that are needed for efficient wrapping
def _check_valid(key, val, valid):
"""Helper to check valid options"""
if val not in valid:
raise ValueError('%s must be one of %s, not "%s"'
% (key, valid, val))
def _to_args(x):
"""Convert to args representation"""
if not isinstance(x, (list, tuple, np.ndarray)):
x = [x]
return x
def _check_conversion(key, valid_dict):
"""Check for existence of key in dict, return value or raise error"""
if key not in valid_dict and key not in valid_dict.values():
# Only show users the nice string values
keys = [v for v in valid_dict.keys() if isinstance(v, string_types)]
raise ValueError('value must be one of %s, not %s' % (keys, key))
return valid_dict[key] if key in valid_dict else key
class BaseGlooFunctions(object):
""" Class that provides a series of GL functions that do not fit
in the object oriented part of gloo. An instance of this class is
associated with each canvas.
"""
##########################################################################
# PRIMITIVE/VERTEX
#
# Viewport, DepthRangef, CullFace, FrontFace, LineWidth, PolygonOffset
#
def set_viewport(self, *args):
"""Set the OpenGL viewport
This is a wrapper for gl.glViewport.
Parameters
----------
*args : tuple
X and Y coordinates, plus width and height. Can be passed in as
individual components, or as a single tuple with four values.
"""
x, y, w, h = args[0] if len(args) == 1 else args
self.glir.command('FUNC', 'glViewport', int(x), int(y), int(w), int(h))
def set_depth_range(self, near=0., far=1.):
"""Set depth values
Parameters
----------
near : float
Near clipping plane.
far : float
Far clipping plane.
"""
self.glir.command('FUNC', 'glDepthRange', float(near), float(far))
def set_front_face(self, mode='ccw'):
"""Set which faces are front-facing
Parameters
----------
mode : str
Can be 'cw' for clockwise or 'ccw' for counter-clockwise.
"""
self.glir.command('FUNC', 'glFrontFace', mode)
def set_cull_face(self, mode='back'):
"""Set front, back, or both faces to be culled
Parameters
----------
mode : str
Culling mode. Can be "front", "back", or "front_and_back".
"""
self.glir.command('FUNC', 'glCullFace', mode)
def set_line_width(self, width=1.):
"""Set line width
Parameters
----------
width : float
The line width.
"""
width = float(width)
if width < 0:
raise RuntimeError('Cannot have width < 0')
self.glir.command('FUNC', 'glLineWidth', width)
def set_polygon_offset(self, factor=0., units=0.):
"""Set the scale and units used to calculate depth values
Parameters
----------
factor : float
Scale factor used to create a variable depth offset for
each polygon.
units : float
Multiplied by an implementation-specific value to create a
constant depth offset.
"""
self.glir.command('FUNC', 'glPolygonOffset', float(factor),
float(units))
##########################################################################
# FRAGMENT/SCREEN
#
# glClear, glClearColor, glClearDepthf, glClearStencil
#
def clear(self, color=True, depth=True, stencil=True):
"""Clear the screen buffers
This is a wrapper for gl.glClear.
Parameters
----------
color : bool | str | tuple | instance of Color
Clear the color buffer bit. If not bool, ``set_clear_color`` will
be used to set the color clear value.
depth : bool | float
Clear the depth buffer bit. If float, ``set_clear_depth`` will
be used to set the depth clear value.
stencil : bool | int
Clear the stencil buffer bit. If int, ``set_clear_stencil`` will
be used to set the stencil clear index.
"""
bits = 0
if isinstance(color, np.ndarray) or bool(color):
if not isinstance(color, bool):
self.set_clear_color(color)
bits |= gl.GL_COLOR_BUFFER_BIT
if depth:
if not isinstance(depth, bool):
self.set_clear_depth(depth)
bits |= gl.GL_DEPTH_BUFFER_BIT
if stencil:
if not isinstance(stencil, bool):
self.set_clear_stencil(stencil)
bits |= gl.GL_STENCIL_BUFFER_BIT
self.glir.command('FUNC', 'glClear', bits)
def set_clear_color(self, color='black', alpha=None):
"""Set the screen clear color
This is a wrapper for gl.glClearColor.
Parameters
----------
color : str | tuple | instance of Color
Color to use. See vispy.color.Color for options.
alpha : float | None
Alpha to use.
"""
self.glir.command('FUNC', 'glClearColor', *Color(color, alpha).rgba)
def set_clear_depth(self, depth=1.0):
"""Set the clear value for the depth buffer
This is a wrapper for gl.glClearDepth.
Parameters
----------
depth : float
The depth to use.
"""
self.glir.command('FUNC', 'glClearDepth', float(depth))
def set_clear_stencil(self, index=0):
"""Set the clear value for the stencil buffer
This is a wrapper for gl.glClearStencil.
Parameters
----------
index : int
The index to use when the stencil buffer is cleared.
"""
self.glir.command('FUNC', 'glClearStencil', int(index))
# glBlendFunc(Separate), glBlendColor, glBlendEquation(Separate)
def set_blend_func(self, srgb='one', drgb='zero',
salpha=None, dalpha=None):
"""Specify pixel arithmetic for RGB and alpha
Parameters
----------
srgb : str
Source RGB factor.
drgb : str
Destination RGB factor.
salpha : str | None
Source alpha factor. If None, ``srgb`` is used.
dalpha : str
Destination alpha factor. If None, ``drgb`` is used.
"""
salpha = srgb if salpha is None else salpha
dalpha = drgb if dalpha is None else dalpha
self.glir.command('FUNC', 'glBlendFuncSeparate',
srgb, drgb, salpha, dalpha)
def set_blend_color(self, color):
"""Set the blend color
Parameters
----------
color : str | tuple | instance of Color
Color to use. See vispy.color.Color for options.
"""
self.glir.command('FUNC', 'glBlendColor', *Color(color).rgba)
def set_blend_equation(self, mode_rgb, mode_alpha=None):
"""Specify the equation for RGB and alpha blending
Parameters
----------
mode_rgb : str
Mode for RGB.
mode_alpha : str | None
Mode for Alpha. If None, ``mode_rgb`` is used.
Notes
-----
See ``set_blend_equation`` for valid modes.
"""
mode_alpha = mode_rgb if mode_alpha is None else mode_alpha
self.glir.command('FUNC', 'glBlendEquationSeparate',
mode_rgb, mode_alpha)
# glScissor, glStencilFunc(Separate), glStencilMask(Separate),
# glStencilOp(Separate),
def set_scissor(self, x, y, w, h):
"""Define the scissor box
Parameters
----------
x : int
Left corner of the box.
y : int
Lower corner of the box.
w : int
The width of the box.
h : int
The height of the box.
"""
self.glir.command('FUNC', 'glScissor', int(x), int(y), int(w), int(h))
def set_stencil_func(self, func='always', ref=0, mask=8,
face='front_and_back'):
"""Set front or back function and reference value
Parameters
----------
func : str
See set_stencil_func.
ref : int
Reference value for the stencil test.
mask : int
Mask that is ANDed with ref and stored stencil value.
face : str
Can be 'front', 'back', or 'front_and_back'.
"""
self.glir.command('FUNC', 'glStencilFuncSeparate',
face, func, int(ref), int(mask))
def set_stencil_mask(self, mask=8, face='front_and_back'):
"""Control the front or back writing of individual bits in the stencil
Parameters
----------
mask : int
Mask that is ANDed with ref and stored stencil value.
face : str
Can be 'front', 'back', or 'front_and_back'.
"""
self.glir.command('FUNC', 'glStencilMaskSeparate', face, int(mask))
def set_stencil_op(self, sfail='keep', dpfail='keep', dppass='keep',
face='front_and_back'):
"""Set front or back stencil test actions
Parameters
----------
sfail : str
Action to take when the stencil fails. Must be one of
'keep', 'zero', 'replace', 'incr', 'incr_wrap',
'decr', 'decr_wrap', or 'invert'.
dpfail : str
Action to take when the stencil passes.
dppass : str
Action to take when both the stencil and depth tests pass,
or when the stencil test passes and either there is no depth
buffer or depth testing is not enabled.
face : str
Can be 'front', 'back', or 'front_and_back'.
"""
self.glir.command('FUNC', 'glStencilOpSeparate',
face, sfail, dpfail, dppass)
# glDepthFunc, glDepthMask, glColorMask, glSampleCoverage
def set_depth_func(self, func='less'):
"""Specify the value used for depth buffer comparisons
Parameters
----------
func : str
The depth comparison function. Must be one of 'never', 'less',
'equal', 'lequal', 'greater', 'gequal', 'notequal', or 'always'.
"""
self.glir.command('FUNC', 'glDepthFunc', func)
def set_depth_mask(self, flag):
"""Toggle writing into the depth buffer
Parameters
----------
flag : bool
Whether depth writing should be enabled.
"""
self.glir.command('FUNC', 'glDepthMask', bool(flag))
def set_color_mask(self, red, green, blue, alpha):
"""Toggle writing of frame buffer color components
Parameters
----------
red : bool
Red toggle.
green : bool
Green toggle.
blue : bool
Blue toggle.
alpha : bool
Alpha toggle.
"""
self.glir.command('FUNC', 'glColorMask', bool(red), bool(green),
bool(blue), bool(alpha))
def set_sample_coverage(self, value=1.0, invert=False):
"""Specify multisample coverage parameters
Parameters
----------
value : float
Sample coverage value (will be clamped between 0. and 1.).
invert : bool
Specify if the coverage masks should be inverted.
"""
self.glir.command('FUNC', 'glSampleCoverage', float(value),
bool(invert))
##########################################################################
# STATE
#
# glEnable/Disable
#
def get_state_presets(self):
"""The available GL state presets
Returns
-------
presets : dict
The dictionary of presets usable with ``set_options``.
"""
return deepcopy(_gl_presets)
def set_state(self, preset=None, **kwargs):
"""Set OpenGL rendering state, optionally using a preset
Parameters
----------
preset : str | None
Can be one of ('opaque', 'translucent', 'additive') to use
use reasonable defaults for these typical use cases.
**kwargs : keyword arguments
Other supplied keyword arguments will override any preset defaults.
Options to be enabled or disabled should be supplied as booleans
(e.g., ``'depth_test=True'``, ``cull_face=False``), non-boolean
entries will be passed as arguments to ``set_*`` functions (e.g.,
``blend_func=('src_alpha', 'one')`` will call ``set_blend_func``).
Notes
-----
This serves three purposes:
1. Set GL state using reasonable presets.
2. Wrapping glEnable/glDisable functionality.
3. Convienence wrapping of other ``gloo.set_*`` functions.
For example, one could do the following:
>>> from vispy import gloo
>>> gloo.set_state('translucent', depth_test=False, clear_color=(1, 1, 1, 1)) # noqa, doctest:+SKIP
This would take the preset defaults for 'translucent', turn
depth testing off (which would normally be on for that preset),
and additionally set the glClearColor parameter to be white.
Another example to showcase glEnable/glDisable wrapping:
>>> gloo.set_state(blend=True, depth_test=True, polygon_offset_fill=False) # noqa, doctest:+SKIP
This would be equivalent to calling
>>> from vispy.gloo import gl
>>> gl.glDisable(gl.GL_BLEND)
>>> gl.glEnable(gl.GL_DEPTH_TEST)
>>> gl.glEnable(gl.GL_POLYGON_OFFSET_FILL)
Or here's another example:
>>> gloo.set_state(clear_color=(0, 0, 0, 1), blend=True, blend_func=('src_alpha', 'one')) # noqa, doctest:+SKIP
Thus arbitrary GL state components can be set directly using
``set_state``. Note that individual functions are exposed e.g.,
as ``set_clear_color``, with some more informative docstrings
about those particular functions.
"""
kwargs = deepcopy(kwargs)
# Load preset, if supplied
if preset is not None:
_check_valid('preset', preset, tuple(list(_gl_presets.keys())))
for key, val in _gl_presets[preset].items():
# only overwrite user input with preset if user's input is None
if key not in kwargs:
kwargs[key] = val
# cull_face is an exception because GL_CULL_FACE, glCullFace both exist
if 'cull_face' in kwargs:
cull_face = kwargs.pop('cull_face')
if isinstance(cull_face, bool):
funcname = 'glEnable' if cull_face else 'glDisable'
self.glir.command('FUNC', funcname, 'cull_face')
else:
self.glir.command('FUNC', 'glEnable', 'cull_face')
self.set_cull_face(*_to_args(cull_face))
# Iterate over kwargs
for key, val in kwargs.items():
if key in _setters:
# Setter
args = _to_args(val)
# these actually need tuples
if key in ('blend_color', 'clear_color') and \
not isinstance(args[0], string_types):
args = [args]
getattr(self, 'set_' + key)(*args)
else:
# Enable / disable
funcname = 'glEnable' if val else 'glDisable'
self.glir.command('FUNC', funcname, key)
#
# glFinish, glFlush, glReadPixels, glHint
#
def finish(self):
"""Wait for GL commands to to finish
This creates a GLIR command for glFinish and then processes the
GLIR commands. If the GLIR interpreter is remote (e.g. WebGL), this
function will return before GL has finished processing the commands.
"""
if hasattr(self, 'flush_commands'):
context = self
else:
context = get_current_canvas().context
context.glir.command('FUNC', 'glFinish')
context.flush_commands() # Process GLIR commands
def flush(self):
"""Flush GL commands
This is a wrapper for glFlush(). This also flushes the GLIR
command queue.
"""
if hasattr(self, 'flush_commands'):
context = self
else:
context = get_current_canvas().context
context.glir.command('FUNC', 'glFlush')
context.flush_commands() # Process GLIR commands
def set_hint(self, target, mode):
"""Set OpenGL drawing hint
Parameters
----------
target : str
The target, e.g. 'fog_hint', 'line_smooth_hint',
'point_smooth_hint'.
mode : str
The mode to set (e.g., 'fastest', 'nicest', 'dont_care').
"""
if not all(isinstance(tm, string_types) for tm in (target, mode)):
raise TypeError('target and mode must both be strings')
self.glir.command('FUNC', 'glHint', target, mode)
class GlooFunctions(BaseGlooFunctions):
@property
def glir(self):
""" The GLIR queue corresponding to the current canvas
"""
canvas = get_current_canvas()
if canvas is None:
msg = ("If you want to use gloo without vispy.app, " +
"use a gloo.context.FakeCanvas.")
raise RuntimeError('Gloo requires a Canvas to run.\n' + msg)
return canvas.context.glir
## Create global functions object and inject names here
# GlooFunctions without queue: use queue of canvas that is current at call-time
global_gloo_functions = GlooFunctions()
for name in dir(global_gloo_functions):
if name.startswith('_') or name in ('glir'):
continue
fun = getattr(global_gloo_functions, name)
if callable(fun):
globals()[name] = fun
## Functions that do not use the glir queue
def read_pixels(viewport=None, alpha=True, out_type='unsigned_byte'):
"""Read pixels from the currently selected buffer.
Under most circumstances, this function reads from the front buffer.
Unlike all other functions in vispy.gloo, this function directly executes
an OpenGL command.
Parameters
----------
viewport : array-like | None
4-element list of x, y, w, h parameters. If None (default),
the current GL viewport will be queried and used.
alpha : bool
If True (default), the returned array has 4 elements (RGBA).
If False, it has 3 (RGB).
out_type : str | dtype
Can be 'unsigned_byte' or 'float'. Note that this does not
use casting, but instead determines how values are read from
the current buffer. Can also be numpy dtypes ``np.uint8``,
``np.ubyte``, or ``np.float32``.
Returns
-------
pixels : array
3D array of pixels in np.uint8 or np.float32 format.
The array shape is (h, w, 3) or (h, w, 4), with the top-left corner
of the framebuffer at index [0, 0] in the returned array.
"""
# Check whether the GL context is direct or remote
context = get_current_canvas().context
if context.shared.parser.is_remote():
raise RuntimeError('Cannot use read_pixels() with remote GLIR parser')
finish() # noqa - finish first, also flushes GLIR commands
type_dict = {'unsigned_byte': gl.GL_UNSIGNED_BYTE,
np.uint8: gl.GL_UNSIGNED_BYTE,
'float': gl.GL_FLOAT,
np.float32: gl.GL_FLOAT}
type_ = _check_conversion(out_type, type_dict)
if viewport is None:
viewport = gl.glGetParameter(gl.GL_VIEWPORT)
viewport = np.array(viewport, int)
if viewport.ndim != 1 or viewport.size != 4:
raise ValueError('viewport should be 1D 4-element array-like, not %s'
% (viewport,))
x, y, w, h = viewport
gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1) # PACK, not UNPACK
fmt = gl.GL_RGBA if alpha else gl.GL_RGB
im = gl.glReadPixels(x, y, w, h, fmt, type_)
gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 4)
# reshape, flip, and return
if not isinstance(im, np.ndarray):
np_dtype = np.uint8 if type_ == gl.GL_UNSIGNED_BYTE else np.float32
im = np.frombuffer(im, np_dtype)
im.shape = h, w, (4 if alpha else 3) # RGBA vs RGB
im = im[::-1, :, :] # flip the image
return im
def get_gl_configuration():
"""Read the current gl configuration
This function uses constants that are not in the OpenGL ES 2.1
namespace, so only use this on desktop systems.
Returns
-------
config : dict
The currently active OpenGL configuration.
"""
# XXX eventually maybe we can ask `gl` whether or not we can access these
gl.check_error('pre-config check')
config = dict()
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
fb_param = gl.glGetFramebufferAttachmentParameter
# copied since they aren't in ES:
GL_FRONT_LEFT = 1024
GL_DEPTH = 6145
GL_STENCIL = 6146
GL_SRGB = 35904
GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING = 33296
GL_STEREO = 3123
GL_DOUBLEBUFFER = 3122
sizes = dict(red=(GL_FRONT_LEFT, 33298),
green=(GL_FRONT_LEFT, 33299),
blue=(GL_FRONT_LEFT, 33300),
alpha=(GL_FRONT_LEFT, 33301),
depth=(GL_DEPTH, 33302),
stencil=(GL_STENCIL, 33303))
for key, val in sizes.items():
config[key + '_size'] = fb_param(gl.GL_FRAMEBUFFER, val[0], val[1])
val = fb_param(gl.GL_FRAMEBUFFER, GL_FRONT_LEFT,
GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING)
if val not in (gl.GL_LINEAR, GL_SRGB):
raise RuntimeError('unknown value for SRGB: %s' % val)
config['srgb'] = True if val == GL_SRGB else False # GL_LINEAR
config['stereo'] = True if gl.glGetParameter(GL_STEREO) else False
config['double_buffer'] = (True if gl.glGetParameter(GL_DOUBLEBUFFER)
else False)
config['samples'] = gl.glGetParameter(gl.GL_SAMPLES)
gl.check_error('post-config check')
return config
| bsd-2-clause |
frmichel/vo-support-tools | CE/monitor-ce/processors/running_ratio_slices.py | 1 | 4606 | #!/usr/bin/python
#
# This tools exploits the data of csv files produced by script collect-ce-job-status.py, to
# compute the number of CEs grouped by slice of ratio R/(R+W) as a function of time:
# between 0 and 0,5, and between 0,5 and 1, exactly 1 or not calculable.
#
# Results are stored in file running_ratio_slices.csv.
import os
import csv
import sys
import globvars
# -------------------------------------------------------------------------
# Compute the number of CEs grouped by ratio R/(R+W) as a function of time
# Input:
# dataFiles: list of tuples: (fileName, datetime, date, hour, rows, sum_VO_Waiting, sum_VO_Running)
# where
# - datetime is formated as "YYYY-MM-DD HH:MM:SS"
# - date is only the date part YYYY:MM:DD, and hour is only the hour HH (used for filtering data in excel file)
# - rows is a dictionnary wich keys are the hostnames and values are another dictionnary with the following keys:
# 'Site'
# 'ImplName', 'ImplVer'
# 'CE_Total', 'VO_Total'
# 'CE_Running', 'VO_Running'
# 'CE_Waiting', 'VO_Waiting'
# 'CE_Running', 'VO_Running'
# 'CE_FreeSlots', 'VO_FreeSlots'
# 'CE_MaxTotal', 'VO_MaxTotal'
# 'CE_MaxWaiting', 'VO_MaxWaiting'
# 'CE_MaxRunning', 'VO_MaxRunning'
# 'CE_WRT', 'VO_WRT'
# 'CE_MaxTotal', 'VO_MaxTotal'
# 'CE_ERT', 'VO_ERT'
# 'CE_Status'
# -------------------------------------------------------------------------
def process(dataFiles):
# Global variables
DECIMAL_MARK = globvars.DECIMAL_MARK
DEBUG = globvars.DEBUG
OUTPUT_DIR = globvars.OUTPUT_DIR
writer=''
if globvars.STDOUT:
writer = csv.writer(sys.stdout, delimiter=globvars.CSV_DELIMITER,lineterminator=';')
print('<'+os.path.splitext(os.path.basename(__file__))[0]+'>'),
writer.writerow(["Date time", "Nb queues", "0", "0 to 0.5", "0.5 to 1", "1", "n/a"])
else:
print "Computing the number of CEs grouped by slice of ratio R/(R+W) as a function of time..."
outputFile = OUTPUT_DIR + os.sep + "running_ratio_slices.csv"
outputf = open(outputFile, 'wb')
writer = csv.writer(outputf, delimiter=globvars.CSV_DELIMITER)
writer.writerow(["# Date time", "Nb queues", "0", "0 to 0.5", "0.5 to 1", "1", "n/a"])
# Loop on all data files that were acquired
for (fileName, datetime, date, hour, rows, sum_VO_Waiting, sum_VO_Running) in dataFiles:
nb_0 = nb_0_05 = nb_05_1 = nb_1 = nb_na = 0.0
#Loop on all rows of the file
for (hostname, structRow) in rows.iteritems():
W = float(structRow['VO_Waiting'])
R = float(structRow['VO_Running'])
if R+W == 0:
nb_na += 1
else:
ratio = R/(R+W)
if ratio == 0: nb_0 += 1
if ratio >= 0 and ratio < 0.5: nb_0_05 += 1
else:
if ratio >= 0.5 and ratio <= 1: nb_05_1 += 1
if ratio == 1: nb_1 += 1
if globvars.STDOUT:
if globvars.PERCENT:
nbQ = len(rows)
writer.writerow([datetime, nbQ,
str(round(nb_0*100/nbQ,2)).replace('.', DECIMAL_MARK),
str(round(nb_0_05*100/nbQ,2)).replace('.', DECIMAL_MARK),
str(round(nb_05_1*100/nbQ,2)).replace('.', DECIMAL_MARK),
str(round(nb_1*100/nbQ,2)).replace('.', DECIMAL_MARK),
str(round(nb_na*100/nbQ,2)).replace('.', DECIMAL_MARK)
])
else:
nbQ = len(rows)
writer.writerow([datetime, nbQ,
str(round(nb_0/nbQ, 4)).replace('.', DECIMAL_MARK),
str(round(nb_0_05/nbQ, 4)).replace('.', DECIMAL_MARK),
str(round(nb_05_1/nbQ, 4)).replace('.', DECIMAL_MARK),
str(round(nb_1/nbQ, 4)).replace('.', DECIMAL_MARK),
str(round(nb_na/nbQ, 4)).replace('.', DECIMAL_MARK)
])
else:
if globvars.PERCENT:
nbQ = len(rows)
writer.writerow([datetime, nbQ,
str(round(nb_0*100/nbQ,2)).replace('.', DECIMAL_MARK),
str(round(nb_0_05*100/nbQ,2)).replace('.', DECIMAL_MARK),
str(round(nb_05_1*100/nbQ,2)).replace('.', DECIMAL_MARK),
str(round(nb_1*100/nbQ,2)).replace('.', DECIMAL_MARK),
str(round(nb_na*100/nbQ,2)).replace('.', DECIMAL_MARK)
])
else:
nbQ = len(rows)
writer.writerow([datetime, nbQ,
str(round(nb_0/nbQ, 4)).replace('.', DECIMAL_MARK),
str(round(nb_0_05/nbQ, 4)).replace('.', DECIMAL_MARK),
str(round(nb_05_1/nbQ, 4)).replace('.', DECIMAL_MARK),
str(round(nb_1/nbQ, 4)).replace('.', DECIMAL_MARK),
str(round(nb_na/nbQ, 4)).replace('.', DECIMAL_MARK)
])
if globvars.STDOUT: print('</'+os.path.splitext(os.path.basename(__file__))[0]+'>')
if not globvars.STDOUT: outputf.close()
| mit |
victorg590/ampadb | usermanager/models.py | 2 | 2784 | from django.core import validators
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.db import models
from contactboard.models import Alumne
class Profile(models.Model):
class Meta:
verbose_name = 'perfil'
ordering = ['alumne']
user = models.OneToOneField(
User,
verbose_name='usuari',
blank=True,
null=True,
on_delete=models.SET_NULL)
unregisteredUser = models.OneToOneField(
'UnregisteredUser',
verbose_name='usuari (no registrat)',
blank=True,
null=True,
on_delete=models.SET_NULL)
alumne = models.OneToOneField(
Alumne, primary_key=True, on_delete=models.CASCADE)
@classmethod
def cleanup(cls):
"""Elimina tots els registres sense `user` ni `unregisteredUser`"""
return cls.objects.filter(
user__isnull=True, unregisteredUser__isnull=True).delete()
def clean(self):
super().clean()
if self.user and self.unregisteredUser:
raise ValidationError("Només es pot definir un de: `user` i"
" `unregisteredUser`")
def __str__(self):
return str(self.alumne)
# Senyal associada: .signals.profile_pre_delete
def validate_username_unique(value):
if User.objects.filter(username=value).exists():
raise ValidationError('Username exists!')
def validate_alumne_unique(value):
if Profile.objects.filter(alumne=value).exists():
raise ValidationError('Alumne already has a user associated!')
class UnregisteredUser(models.Model):
class Meta:
verbose_name = 'usuari no registrat'
verbose_name_plural = 'usuaris no registrats'
ordering = ['username']
username = models.CharField(
"nom d'usuari",
max_length=30,
primary_key=True,
validators=[
validators.RegexValidator(r'^[\w.@+-]{1,30}$'),
validate_username_unique
])
# El codi és d'un sol ús, pel que, a diferència de la contrasenya,
# no és necessari protegir-lo amb un hash
codi = models.CharField(
max_length=6,
blank=False,
validators=[validators.RegexValidator(r'^[0-9]{6}$')],
help_text=(
"Un codi numèric de 6 dígits per confirmar que l'usuari "
"pertany a aquesta persona. Si no s'entra cap, es generarà un "
"automàticament"))
def clean(self):
super().clean()
if User.objects.filter(username=self.username).exists():
raise ValidationError("Aquest nom d'usuari ja exiteix i està "
"registrat")
def __str__(self):
return self.username + ' (*)'
| mit |
rahul67/hue | desktop/core/ext-py/Django-1.6.10/tests/forms_tests/tests/test_media.py | 131 | 45513 | # -*- coding: utf-8 -*-
from django.forms import TextInput, Media, TextInput, CharField, Form, MultiWidget
from django.template import Template, Context
from django.test import TestCase
from django.test.utils import override_settings
@override_settings(
STATIC_URL=None,
MEDIA_URL='http://media.example.com/media/',
)
class FormsMediaTestCase(TestCase):
"""Tests for the media handling on widgets and forms"""
def test_construction(self):
# Check construction of media objects
m = Media(css={'all': ('path/to/css1','/path/to/css2')}, js=('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3'))
self.assertEqual(str(m), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
class Foo:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(str(m3), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(str(w1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Media objects can be interrogated by media type
self.assertEqual(str(w1.media['css']), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />""")
self.assertEqual(str(w1.media['js']), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(str(w1.media + w2.media + w3.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Check that media addition hasn't affected the original objects
self.assertEqual(str(w1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super(MyWidget5, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super(MyWidget6, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(str(w6.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(str(w7.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w8 = MyWidget8()
self.assertEqual(str(w8.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(str(w9.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w11 = MyWidget11()
self.assertEqual(str(w11.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w12 = MyWidget12()
self.assertEqual(str(w12.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1','/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1','/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(str(multimedia.media), """<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super(MyMultiWidget, self).__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(str(mymulti.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(str(f1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(str(f1.media + f2.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(str(f3.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>""")
# Media works in templates
self.assertEqual(Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script><link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />""")
@override_settings(
STATIC_URL='http://media.example.com/static/',
MEDIA_URL='http://media.example.com/media/',
)
class StaticFormsMediaTestCase(TestCase):
"""Tests for the media handling on widgets and forms"""
def test_construction(self):
# Check construction of media objects
m = Media(css={'all': ('path/to/css1','/path/to/css2')}, js=('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3'))
self.assertEqual(str(m), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
class Foo:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(str(m3), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(str(w1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Media objects can be interrogated by media type
self.assertEqual(str(w1.media['css']), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />""")
self.assertEqual(str(w1.media['js']), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(str(w1.media + w2.media + w3.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Check that media addition hasn't affected the original objects
self.assertEqual(str(w1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super(MyWidget5, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super(MyWidget6, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(str(w6.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(str(w7.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w8 = MyWidget8()
self.assertEqual(str(w8.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(str(w9.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w11 = MyWidget11()
self.assertEqual(str(w11.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w12 = MyWidget12()
self.assertEqual(str(w12.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1','/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1','/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(str(multimedia.media), """<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super(MyMultiWidget, self).__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(str(mymulti.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(str(f1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(str(f1.media + f2.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(str(f3.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>""")
# Media works in templates
self.assertEqual(Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script><link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />""")
| apache-2.0 |
mcmontero/tinyAPI | base/services/geo/api/tests/CountryCode_tests.py | 1 | 1390 | # ----- Info ------------------------------------------------------------------
__author__ = 'Michael Montero <[email protected]>'
# ----- Imports ---------------------------------------------------------------
from tinyAPI.base.services.geo.api.CountryCode import CountryCode
import tinyAPI
import unittest
# ----- Tests -----------------------------------------------------------------
class CountryCodeTestCase(unittest.TestCase):
def test_get_errors(self):
try:
CountryCode().get(-1)
self.fail('Was able to get country code even though the ID '
+ 'provided was invalid.')
except RuntimeError as e:
self.assertEqual('no such country code ID "-1"', str(e))
def test_get_country_name_errors(self):
try:
CountryCode().get_country_name(-1)
self.fail('Was able to get country name even though the ID '
+ 'provided was invalid.')
except RuntimeError as e:
self.assertEqual('no such country code ID "-1"', str(e))
def test_get(self):
self.assertEqual("1", CountryCode().get(1))
def test_get(self):
self.assertEqual("United States", CountryCode().get_country_name(1))
# ----- Main ------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| mit |
CenturylinkTechnology/ansible-modules-extras | monitoring/monit.py | 53 | 6888 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Darryl Stoflet <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import time
DOCUMENTATION = '''
---
module: monit
short_description: Manage the state of a program monitored via Monit
description:
- Manage the state of a program monitored via I(Monit)
version_added: "1.2"
options:
name:
description:
- The name of the I(monit) program/process to manage
required: true
default: null
state:
description:
- The state of service
required: true
default: null
choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
timeout:
description:
- If there are pending actions for the service monitored by monit, then Ansible will check
for up to this many seconds to verify the the requested action has been performed.
Ansible will sleep for five seconds between each check.
required: false
default: 300
version_added: "2.1"
requirements: [ ]
author: "Darryl Stoflet (@dstoflet)"
'''
EXAMPLES = '''
# Manage the state of program "httpd" to be in "started" state.
- monit: name=httpd state=started
'''
def main():
arg_spec = dict(
name=dict(required=True),
timeout=dict(default=300, type='int'),
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
state = module.params['state']
timeout = module.params['timeout']
MONIT = module.get_bin_path('monit', True)
def status():
"""Return the status of the process in monit, or the empty string if not present."""
rc, out, err = module.run_command('%s summary' % MONIT, check_rc=True)
for line in out.split('\n'):
# Sample output lines:
# Process 'name' Running
# Process 'name' Running - restart pending
parts = line.split()
if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name:
return ' '.join(parts[2:]).lower()
else:
return ''
def run_command(command):
"""Runs a monit command, and returns the new status."""
module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True)
return status()
def wait_for_monit_to_stop_pending():
"""Fails this run if there is no status or it's pending/initalizing for timeout"""
timeout_time = time.time() + timeout
sleep_time = 5
running_status = status()
while running_status == '' or 'pending' in running_status or 'initializing' in running_status:
if time.time() >= timeout_time:
module.fail_json(
msg='waited too long for "pending", or "initiating" status to go away ({0})'.format(
running_status
),
state=state
)
time.sleep(sleep_time)
running_status = status()
if state == 'reloaded':
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command('%s reload' % MONIT)
if rc != 0:
module.fail_json(msg='monit reload failed', stdout=out, stderr=err)
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
present = status() != ''
if not present and not state == 'present':
module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state)
if state == 'present':
if not present:
if module.check_mode:
module.exit_json(changed=True)
status = run_command('reload')
if status == '':
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
module.exit_json(changed=False, name=name, state=state)
wait_for_monit_to_stop_pending()
running = 'running' in status()
if running and state in ['started', 'monitored']:
module.exit_json(changed=False, name=name, state=state)
if running and state == 'stopped':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('stop')
if status in ['not monitored'] or 'stop pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not stopped' % name, status=status)
if running and state == 'unmonitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('unmonitor')
if status in ['not monitored'] or 'unmonitor pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not unmonitored' % name, status=status)
elif state == 'restarted':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('restart')
if status in ['initializing', 'running'] or 'restart pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not restarted' % name, status=status)
elif not running and state == 'started':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('start')
if status in ['initializing', 'running'] or 'start pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not started' % name, status=status)
elif not running and state == 'monitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('monitor')
if status not in ['not monitored']:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not monitored' % name, status=status)
module.exit_json(changed=False, name=name, state=state)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
maartenq/ansible | lib/ansible/modules/remote_management/manageiq/manageiq_policies.py | 64 | 11828 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Daniel Korn <[email protected]>
# (c) 2017, Yaacov Zamir <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
module: manageiq_policies
short_description: Management of resource policy_profiles in ManageIQ.
extends_documentation_fragment: manageiq
version_added: '2.5'
author: Daniel Korn (@dkorn)
description:
- The manageiq_policies module supports adding and deleting policy_profiles in ManageIQ.
options:
state:
description:
- absent - policy_profiles should not exist,
- present - policy_profiles should exist,
- list - list current policy_profiles and policies.
choices: ['absent', 'present', 'list']
default: 'present'
policy_profiles:
description:
- list of dictionaries, each includes the policy_profile 'name' key.
- required if state is present or absent.
resource_type:
description:
- the type of the resource to which the profile should be [un]assigned
required: true
choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
'data store', 'group', 'resource pool', 'service', 'service template',
'template', 'tenant', 'user']
resource_name:
description:
- the name of the resource to which the profile should be [un]assigned
required: true
'''
EXAMPLES = '''
- name: Assign new policy_profile for a provider in ManageIQ
manageiq_policies:
resource_name: 'EngLab'
resource_type: 'provider'
policy_profiles:
- name: openscap profile
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name: Unassign a policy_profile for a provider in ManageIQ
manageiq_policies:
state: absent
resource_name: 'EngLab'
resource_type: 'provider'
policy_profiles:
- name: openscap profile
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name: List current policy_profile and policies for a provider in ManageIQ
manageiq_policies:
state: list
resource_name: 'EngLab'
resource_type: 'provider'
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
verify_ssl: False
'''
RETURN = '''
manageiq_policies:
description:
- List current policy_profile and policies for a provider in ManageIQ
returned: always
type: dict
sample: '{
"changed": false,
"profiles": [
{
"policies": [
{
"active": true,
"description": "OpenSCAP",
"name": "openscap policy"
},
{
"active": true,
"description": "Analyse incoming container images",
"name": "analyse incoming container images"
},
{
"active": true,
"description": "Schedule compliance after smart state analysis",
"name": "schedule compliance after smart state analysis"
}
],
"profile_description": "OpenSCAP profile",
"profile_name": "openscap profile"
}
]
}'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
class ManageIQPolicies(object):
"""
Object to execute policies management operations of manageiq resources.
"""
def __init__(self, manageiq, resource_type, resource_id):
self.manageiq = manageiq
self.module = self.manageiq.module
self.api_url = self.manageiq.api_url
self.client = self.manageiq.client
self.resource_type = resource_type
self.resource_id = resource_id
self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
api_url=self.api_url,
resource_type=resource_type,
resource_id=resource_id)
def query_profile_href(self, profile):
""" Add or Update the policy_profile href field
Example:
{name: STR, ...} => {name: STR, href: STR}
"""
resource = self.manageiq.find_collection_resource_or_fail(
"policy_profiles", **profile)
return dict(name=profile['name'], href=resource['href'])
def query_resource_profiles(self):
""" Returns a set of the profile objects objects assigned to the resource
"""
url = '{resource_url}/policy_profiles?expand=resources'
try:
response = self.client.get(url.format(resource_url=self.resource_url))
except Exception as e:
msg = "Failed to query {resource_type} policies: {error}".format(
resource_type=self.resource_type,
error=e)
self.module.fail_json(msg=msg)
resources = response.get('resources', [])
# clean the returned rest api profile object to look like:
# {profile_name: STR, profile_description: STR, policies: ARR<POLICIES>}
profiles = [self.clean_profile_object(profile) for profile in resources]
return profiles
def query_profile_policies(self, profile_id):
""" Returns a set of the policy objects assigned to the resource
"""
url = '{api_url}/policy_profiles/{profile_id}?expand=policies'
try:
response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id))
except Exception as e:
msg = "Failed to query {resource_type} policies: {error}".format(
resource_type=self.resource_type,
error=e)
self.module.fail_json(msg=msg)
resources = response.get('policies', [])
# clean the returned rest api policy object to look like:
# {name: STR, description: STR, active: BOOL}
policies = [self.clean_policy_object(policy) for policy in resources]
return policies
def clean_policy_object(self, policy):
""" Clean a policy object to have human readable form of:
{
name: STR,
description: STR,
active: BOOL
}
"""
name = policy.get('name')
description = policy.get('description')
active = policy.get('active')
return dict(
name=name,
description=description,
active=active)
def clean_profile_object(self, profile):
""" Clean a profile object to have human readable form of:
{
profile_name: STR,
profile_description: STR,
policies: ARR<POLICIES>
}
"""
profile_id = profile['id']
name = profile.get('name')
description = profile.get('description')
policies = self.query_profile_policies(profile_id)
return dict(
profile_name=name,
profile_description=description,
policies=policies)
def profiles_to_update(self, profiles, action):
""" Create a list of policies we need to update in ManageIQ.
Returns:
Whether or not a change took place and a message describing the
operation executed.
"""
profiles_to_post = []
assigned_profiles = self.query_resource_profiles()
# make a list of assigned full profile names strings
# e.g. ['openscap profile', ...]
assigned_profiles_set = set([profile['profile_name'] for profile in assigned_profiles])
for profile in profiles:
assigned = profile.get('name') in assigned_profiles_set
if (action == 'unassign' and assigned) or (action == 'assign' and not assigned):
# add/update the policy profile href field
# {name: STR, ...} => {name: STR, href: STR}
profile = self.query_profile_href(profile)
profiles_to_post.append(profile)
return profiles_to_post
def assign_or_unassign_profiles(self, profiles, action):
""" Perform assign/unassign action
"""
# get a list of profiles needed to be changed
profiles_to_post = self.profiles_to_update(profiles, action)
if not profiles_to_post:
return dict(
changed=False,
msg="Profiles {profiles} already {action}ed, nothing to do".format(
action=action,
profiles=profiles))
# try to assign or unassign profiles to resource
url = '{resource_url}/policy_profiles'.format(resource_url=self.resource_url)
try:
response = self.client.post(url, action=action, resources=profiles_to_post)
except Exception as e:
msg = "Failed to {action} profile: {error}".format(
action=action,
error=e)
self.module.fail_json(msg=msg)
# check all entities in result to be successfull
for result in response['results']:
if not result['success']:
msg = "Failed to {action}: {message}".format(
action=action,
message=result['message'])
self.module.fail_json(msg=msg)
# successfully changed all needed profiles
return dict(
changed=True,
msg="Successfully {action}ed profiles: {profiles}".format(
action=action,
profiles=profiles))
def main():
actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
argument_spec = dict(
policy_profiles=dict(type='list'),
resource_name=dict(required=True, type='str'),
resource_type=dict(required=True, type='str',
choices=manageiq_entities().keys()),
state=dict(required=False, type='str',
choices=['present', 'absent', 'list'], default='present'),
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
('state', 'present', ['policy_profiles']),
('state', 'absent', ['policy_profiles'])
],
)
policy_profiles = module.params['policy_profiles']
resource_type_key = module.params['resource_type']
resource_name = module.params['resource_name']
state = module.params['state']
# get the action and resource type
action = actions[state]
resource_type = manageiq_entities()[resource_type_key]
manageiq = ManageIQ(module)
# query resource id, fail if resource does not exist
resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id']
manageiq_policies = ManageIQPolicies(manageiq, resource_type, resource_id)
if action == 'list':
# return a list of current profiles for this object
current_profiles = manageiq_policies.query_resource_profiles()
res_args = dict(changed=False, profiles=current_profiles)
else:
# assign or unassign the profiles
res_args = manageiq_policies.assign_or_unassign_profiles(policy_profiles, action)
module.exit_json(**res_args)
if __name__ == "__main__":
main()
| gpl-3.0 |
thedrow/django | django/conf/locale/lt/formats.py | 504 | 1830 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'Y \m. E j \d.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'Y \m. E j \d., H:i'
YEAR_MONTH_FORMAT = r'Y \m. F'
MONTH_DAY_FORMAT = r'E j \d.'
SHORT_DATE_FORMAT = 'Y-m-d'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
]
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
'%H.%M.%S', # '14.30.59'
'%H.%M.%S.%f', # '14.30.59.000200'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y %H.%M.%S', # '25.10.06 14.30.59'
'%d.%m.%y %H.%M.%S.%f', # '25.10.06 14.30.59.000200'
'%d.%m.%y %H.%M', # '25.10.06 14.30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
arborh/tensorflow | tensorflow/python/autograph/utils/type_check.py | 22 | 1168 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities used in autograph-generated code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_util
def is_tensor(*args):
"""Check if any arguments are tensors.
Args:
*args: Python objects that may or may not be tensors.
Returns:
True if any *args are TensorFlow types, False if none are.
"""
return any(tensor_util.is_tensor(a) for a in args)
| apache-2.0 |
tchellomello/home-assistant | homeassistant/scripts/macos/__init__.py | 7 | 1824 | """Script to install/uninstall HA into OS X."""
import os
import time
# mypy: allow-untyped-calls, allow-untyped-defs
def install_osx():
"""Set up to run via launchd on OS X."""
with os.popen("which hass") as inp:
hass_path = inp.read().strip()
with os.popen("whoami") as inp:
user = inp.read().strip()
template_path = os.path.join(os.path.dirname(__file__), "launchd.plist")
with open(template_path, encoding="utf-8") as inp:
plist = inp.read()
plist = plist.replace("$HASS_PATH$", hass_path)
plist = plist.replace("$USER$", user)
path = os.path.expanduser("~/Library/LaunchAgents/org.homeassistant.plist")
try:
with open(path, "w", encoding="utf-8") as outp:
outp.write(plist)
except OSError as err:
print(f"Unable to write to {path}", err)
return
os.popen(f"launchctl load -w -F {path}")
print(
"Home Assistant has been installed. \
Open it here: http://localhost:8123"
)
def uninstall_osx():
"""Unload from launchd on OS X."""
path = os.path.expanduser("~/Library/LaunchAgents/org.homeassistant.plist")
os.popen(f"launchctl unload {path}")
print("Home Assistant has been uninstalled.")
def run(args):
"""Handle OSX commandline script."""
commands = "install", "uninstall", "restart"
if not args or args[0] not in commands:
print("Invalid command. Available commands:", ", ".join(commands))
return 1
if args[0] == "install":
install_osx()
return 0
if args[0] == "uninstall":
uninstall_osx()
return 0
if args[0] == "restart":
uninstall_osx()
# A small delay is needed on some systems to let the unload finish.
time.sleep(0.5)
install_osx()
return 0
| apache-2.0 |
costypetrisor/scikit-learn | sklearn/externals/joblib/numpy_pickle.py | 35 | 15580 | """
Utilities for fast persistence of big data, with optional compression.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import pickle
import traceback
import sys
import os
import zlib
import warnings
from ._compat import _basestring
from io import BytesIO
if sys.version_info[0] >= 3:
Unpickler = pickle._Unpickler
Pickler = pickle._Pickler
def asbytes(s):
if isinstance(s, bytes):
return s
return s.encode('latin1')
else:
Unpickler = pickle.Unpickler
Pickler = pickle.Pickler
asbytes = str
_MEGA = 2 ** 20
_MAX_LEN = len(hex(2 ** 64))
# To detect file types
_ZFILE_PREFIX = asbytes('ZF')
###############################################################################
# Compressed file with Zlib
def _read_magic(file_handle):
""" Utility to check the magic signature of a file identifying it as a
Zfile
"""
magic = file_handle.read(len(_ZFILE_PREFIX))
# Pickling needs file-handles at the beginning of the file
file_handle.seek(0)
return magic
def read_zfile(file_handle):
"""Read the z-file and return the content as a string
Z-files are raw data compressed with zlib used internally by joblib
for persistence. Backward compatibility is not guaranteed. Do not
use for external purposes.
"""
file_handle.seek(0)
assert _read_magic(file_handle) == _ZFILE_PREFIX, \
"File does not have the right magic"
length = file_handle.read(len(_ZFILE_PREFIX) + _MAX_LEN)
length = length[len(_ZFILE_PREFIX):]
length = int(length, 16)
# We use the known length of the data to tell Zlib the size of the
# buffer to allocate.
data = zlib.decompress(file_handle.read(), 15, length)
assert len(data) == length, (
"Incorrect data length while decompressing %s."
"The file could be corrupted." % file_handle)
return data
def write_zfile(file_handle, data, compress=1):
"""Write the data in the given file as a Z-file.
Z-files are raw data compressed with zlib used internally by joblib
for persistence. Backward compatibility is not guarantied. Do not
use for external purposes.
"""
file_handle.write(_ZFILE_PREFIX)
length = hex(len(data))
if sys.version_info[0] < 3 and type(length) is long:
# We need to remove the trailing 'L' in the hex representation
length = length[:-1]
# Store the length of the data
file_handle.write(asbytes(length.ljust(_MAX_LEN)))
file_handle.write(zlib.compress(asbytes(data), compress))
###############################################################################
# Utility objects for persistence.
class NDArrayWrapper(object):
""" An object to be persisted instead of numpy arrays.
The only thing this object does, is to carry the filename in which
the array has been persisted, and the array subclass.
"""
def __init__(self, filename, subclass):
"Store the useful information for later"
self.filename = filename
self.subclass = subclass
def read(self, unpickler):
"Reconstruct the array"
filename = os.path.join(unpickler._dirname, self.filename)
# Load the array from the disk
np_ver = [int(x) for x in unpickler.np.__version__.split('.', 2)[:2]]
if np_ver >= [1, 3]:
array = unpickler.np.load(filename,
mmap_mode=unpickler.mmap_mode)
else:
# Numpy does not have mmap_mode before 1.3
array = unpickler.np.load(filename)
# Reconstruct subclasses. This does not work with old
# versions of numpy
if (hasattr(array, '__array_prepare__')
and not self.subclass in (unpickler.np.ndarray,
unpickler.np.memmap)):
# We need to reconstruct another subclass
new_array = unpickler.np.core.multiarray._reconstruct(
self.subclass, (0,), 'b')
new_array.__array_prepare__(array)
array = new_array
return array
#def __reduce__(self):
# return None
class ZNDArrayWrapper(NDArrayWrapper):
"""An object to be persisted instead of numpy arrays.
This object store the Zfile filename in which
the data array has been persisted, and the meta information to
retrieve it.
The reason that we store the raw buffer data of the array and
the meta information, rather than array representation routine
(tostring) is that it enables us to use completely the strided
model to avoid memory copies (a and a.T store as fast). In
addition saving the heavy information separately can avoid
creating large temporary buffers when unpickling data with
large arrays.
"""
def __init__(self, filename, init_args, state):
"Store the useful information for later"
self.filename = filename
self.state = state
self.init_args = init_args
def read(self, unpickler):
"Reconstruct the array from the meta-information and the z-file"
# Here we a simply reproducing the unpickling mechanism for numpy
# arrays
filename = os.path.join(unpickler._dirname, self.filename)
array = unpickler.np.core.multiarray._reconstruct(*self.init_args)
with open(filename, 'rb') as f:
data = read_zfile(f)
state = self.state + (data,)
array.__setstate__(state)
return array
###############################################################################
# Pickler classes
class NumpyPickler(Pickler):
"""A pickler to persist of big data efficiently.
The main features of this object are:
* persistence of numpy arrays in separate .npy files, for which
I/O is fast.
* optional compression using Zlib, with a special care on avoid
temporaries.
"""
def __init__(self, filename, compress=0, cache_size=10):
self._filename = filename
self._filenames = [filename, ]
self.cache_size = cache_size
self.compress = compress
if not self.compress:
self.file = open(filename, 'wb')
else:
self.file = BytesIO()
# Count the number of npy files that we have created:
self._npy_counter = 0
Pickler.__init__(self, self.file,
protocol=pickle.HIGHEST_PROTOCOL)
# delayed import of numpy, to avoid tight coupling
try:
import numpy as np
except ImportError:
np = None
self.np = np
def _write_array(self, array, filename):
if not self.compress:
self.np.save(filename, array)
container = NDArrayWrapper(os.path.basename(filename),
type(array))
else:
filename += '.z'
# Efficient compressed storage:
# The meta data is stored in the container, and the core
# numerics in a z-file
_, init_args, state = array.__reduce__()
# the last entry of 'state' is the data itself
with open(filename, 'wb') as zfile:
write_zfile(zfile, state[-1], compress=self.compress)
state = state[:-1]
container = ZNDArrayWrapper(os.path.basename(filename),
init_args, state)
return container, filename
def save(self, obj):
""" Subclass the save method, to save ndarray subclasses in npy
files, rather than pickling them. Of course, this is a
total abuse of the Pickler class.
"""
if self.np is not None and type(obj) in (self.np.ndarray,
self.np.matrix, self.np.memmap):
size = obj.size * obj.itemsize
if self.compress and size < self.cache_size * _MEGA:
# When compressing, as we are not writing directly to the
# disk, it is more efficient to use standard pickling
if type(obj) is self.np.memmap:
# Pickling doesn't work with memmaped arrays
obj = self.np.asarray(obj)
return Pickler.save(self, obj)
self._npy_counter += 1
try:
filename = '%s_%02i.npy' % (self._filename,
self._npy_counter)
# This converts the array in a container
obj, filename = self._write_array(obj, filename)
self._filenames.append(filename)
except:
self._npy_counter -= 1
# XXX: We should have a logging mechanism
print('Failed to save %s to .npy file:\n%s' % (
type(obj),
traceback.format_exc()))
return Pickler.save(self, obj)
def close(self):
if self.compress:
with open(self._filename, 'wb') as zfile:
write_zfile(zfile, self.file.getvalue(), self.compress)
class NumpyUnpickler(Unpickler):
"""A subclass of the Unpickler to unpickle our numpy pickles.
"""
dispatch = Unpickler.dispatch.copy()
def __init__(self, filename, file_handle, mmap_mode=None):
self._filename = os.path.basename(filename)
self._dirname = os.path.dirname(filename)
self.mmap_mode = mmap_mode
self.file_handle = self._open_pickle(file_handle)
Unpickler.__init__(self, self.file_handle)
try:
import numpy as np
except ImportError:
np = None
self.np = np
def _open_pickle(self, file_handle):
return file_handle
def load_build(self):
""" This method is called to set the state of a newly created
object.
We capture it to replace our place-holder objects,
NDArrayWrapper, by the array we are interested in. We
replace them directly in the stack of pickler.
"""
Unpickler.load_build(self)
if isinstance(self.stack[-1], NDArrayWrapper):
if self.np is None:
raise ImportError('Trying to unpickle an ndarray, '
"but numpy didn't import correctly")
nd_array_wrapper = self.stack.pop()
array = nd_array_wrapper.read(self)
self.stack.append(array)
# Be careful to register our new method.
if sys.version_info[0] >= 3:
dispatch[pickle.BUILD[0]] = load_build
else:
dispatch[pickle.BUILD] = load_build
class ZipNumpyUnpickler(NumpyUnpickler):
"""A subclass of our Unpickler to unpickle on the fly from
compressed storage."""
def __init__(self, filename, file_handle):
NumpyUnpickler.__init__(self, filename,
file_handle,
mmap_mode=None)
def _open_pickle(self, file_handle):
return BytesIO(read_zfile(file_handle))
###############################################################################
# Utility functions
def dump(value, filename, compress=0, cache_size=100):
"""Fast persistence of an arbitrary Python object into a files, with
dedicated storage for numpy arrays.
Parameters
-----------
value: any Python object
The object to store to disk
filename: string
The name of the file in which it is to be stored
compress: integer for 0 to 9, optional
Optional compression level for the data. 0 is no compression.
Higher means more compression, but also slower read and
write times. Using a value of 3 is often a good compromise.
See the notes for more details.
cache_size: positive number, optional
Fixes the order of magnitude (in megabytes) of the cache used
for in-memory compression. Note that this is just an order of
magnitude estimate and that for big arrays, the code will go
over this value at dump and at load time.
Returns
-------
filenames: list of strings
The list of file names in which the data is stored. If
compress is false, each array is stored in a different file.
See Also
--------
joblib.load : corresponding loader
Notes
-----
Memmapping on load cannot be used for compressed files. Thus
using compression can significantly slow down loading. In
addition, compressed files take extra extra memory during
dump and load.
"""
if compress is True:
# By default, if compress is enabled, we want to be using 3 by
# default
compress = 3
if not isinstance(filename, _basestring):
# People keep inverting arguments, and the resulting error is
# incomprehensible
raise ValueError(
'Second argument should be a filename, %s (type %s) was given'
% (filename, type(filename))
)
try:
pickler = NumpyPickler(filename, compress=compress,
cache_size=cache_size)
pickler.dump(value)
pickler.close()
finally:
if 'pickler' in locals() and hasattr(pickler, 'file'):
pickler.file.flush()
pickler.file.close()
return pickler._filenames
def load(filename, mmap_mode=None):
"""Reconstruct a Python object from a file persisted with joblib.dump.
Parameters
-----------
filename: string
The name of the file from which to load the object
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, the arrays are memory-mapped from the disk. This
mode has no effect for compressed files. Note that in this
case the reconstructed object might not longer match exactly
the originally pickled object.
Returns
-------
result: any Python object
The object stored in the file.
See Also
--------
joblib.dump : function to save an object
Notes
-----
This function can load numpy array files saved separately during the
dump. If the mmap_mode argument is given, it is passed to np.load and
arrays are loaded as memmaps. As a consequence, the reconstructed
object might not match the original pickled object. Note that if the
file was saved with compression, the arrays cannot be memmaped.
"""
with open(filename, 'rb') as file_handle:
# We are careful to open the file handle early and keep it open to
# avoid race-conditions on renames. That said, if data are stored in
# companion files, moving the directory will create a race when
# joblib tries to access the companion files.
if _read_magic(file_handle) == _ZFILE_PREFIX:
if mmap_mode is not None:
warnings.warn('file "%(filename)s" appears to be a zip, '
'ignoring mmap_mode "%(mmap_mode)s" flag passed'
% locals(), Warning, stacklevel=2)
unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle)
else:
unpickler = NumpyUnpickler(filename, file_handle=file_handle,
mmap_mode=mmap_mode)
try:
obj = unpickler.load()
finally:
if hasattr(unpickler, 'file_handle'):
unpickler.file_handle.close()
return obj
| bsd-3-clause |
yonglehou/spiderfoot | ext/stem/interpreter/commands.py | 11 | 11489 | # Copyright 2014-2015, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Handles making requests and formatting the responses.
"""
import code
import socket
import stem
import stem.control
import stem.descriptor.remote
import stem.interpreter.help
import stem.util.connection
import stem.util.str_tools
import stem.util.tor_tools
from stem.interpreter import STANDARD_OUTPUT, BOLD_OUTPUT, ERROR_OUTPUT, uses_settings, msg
from stem.util.term import format
def _get_fingerprint(arg, controller):
"""
Resolves user input into a relay fingerprint. This accepts...
* Fingerprints
* Nicknames
* IPv4 addresses, either with or without an ORPort
* Empty input, which is resolved to ourselves if we're a relay
:param str arg: input to be resolved to a relay fingerprint
:param stem.control.Controller controller: tor control connection
:returns: **str** for the relay fingerprint
:raises: **ValueError** if we're unable to resolve the input to a relay
"""
if not arg:
try:
return controller.get_info('fingerprint')
except:
raise ValueError("We aren't a relay, no information to provide")
elif stem.util.tor_tools.is_valid_fingerprint(arg):
return arg
elif stem.util.tor_tools.is_valid_nickname(arg):
try:
return controller.get_network_status(arg).fingerprint
except:
raise ValueError("Unable to find a relay with the nickname of '%s'" % arg)
elif ':' in arg or stem.util.connection.is_valid_ipv4_address(arg):
if ':' in arg:
address, port = arg.split(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address):
raise ValueError("'%s' isn't a valid IPv4 address" % address)
elif port and not stem.util.connection.is_valid_port(port):
raise ValueError("'%s' isn't a valid port" % port)
port = int(port)
else:
address, port = arg, None
matches = {}
for desc in controller.get_network_statuses():
if desc.address == address:
if not port or desc.or_port == port:
matches[desc.or_port] = desc.fingerprint
if len(matches) == 0:
raise ValueError('No relays found at %s' % arg)
elif len(matches) == 1:
return list(matches.values())[0]
else:
response = "There's multiple relays at %s, include a port to specify which.\n\n" % arg
for i, or_port in enumerate(matches):
response += ' %i. %s:%s, fingerprint: %s\n' % (i + 1, address, or_port, matches[or_port])
raise ValueError(response)
else:
raise ValueError("'%s' isn't a fingerprint, nickname, or IP address" % arg)
class ControlInterpretor(code.InteractiveConsole):
"""
Handles issuing requests and providing nicely formed responses, with support
for special irc style subcommands.
"""
def __init__(self, controller):
self._received_events = []
code.InteractiveConsole.__init__(self, {
'stem': stem,
'stem.control': stem.control,
'controller': controller,
'events': self.get_events,
})
self._controller = controller
self._run_python_commands = True
# Indicates if we're processing a multiline command, such as conditional
# block or loop.
self.is_multiline_context = False
# Intercept events our controller hears about at a pretty low level since
# the user will likely be requesting them by direct 'SETEVENTS' calls.
handle_event_real = self._controller._handle_event
def handle_event_wrapper(event_message):
handle_event_real(event_message)
self._received_events.append(event_message)
self._controller._handle_event = handle_event_wrapper
def get_events(self, *event_types):
events = list(self._received_events)
event_types = list(map(str.upper, event_types)) # make filtering case insensitive
if event_types:
events = [e for e in events if e.type in event_types]
return events
def do_help(self, arg):
"""
Performs the '/help' operation, giving usage information for the given
argument or a general summary if there wasn't one.
"""
return stem.interpreter.help.response(self._controller, arg)
def do_events(self, arg):
"""
Performs the '/events' operation, dumping the events that we've received
belonging to the given types. If no types are specified then this provides
all buffered events.
If the user runs '/events clear' then this clears the list of events we've
received.
"""
event_types = arg.upper().split()
if 'CLEAR' in event_types:
del self._received_events[:]
return format('cleared event backlog', *STANDARD_OUTPUT)
return '\n'.join([format(str(e), *STANDARD_OUTPUT) for e in self.get_events(*event_types)])
def do_info(self, arg):
"""
Performs the '/info' operation, looking up a relay by fingerprint, IP
address, or nickname and printing its descriptor and consensus entries in a
pretty fashion.
"""
try:
fingerprint = _get_fingerprint(arg, self._controller)
except ValueError as exc:
return format(str(exc), *ERROR_OUTPUT)
ns_desc = self._controller.get_network_status(fingerprint, None)
server_desc = self._controller.get_server_descriptor(fingerprint, None)
extrainfo_desc = None
micro_desc = self._controller.get_microdescriptor(fingerprint, None)
# We'll mostly rely on the router status entry. Either the server
# descriptor or microdescriptor will be missing, so we'll treat them as
# being optional.
if not ns_desc:
return format('Unable to find consensus information for %s' % fingerprint, *ERROR_OUTPUT)
# More likely than not we'll have the microdescriptor but not server and
# extrainfo descriptors. If so then fetching them.
downloader = stem.descriptor.remote.DescriptorDownloader(timeout = 5)
server_desc_query = downloader.get_server_descriptors(fingerprint)
extrainfo_desc_query = downloader.get_extrainfo_descriptors(fingerprint)
for desc in server_desc_query:
server_desc = desc
for desc in extrainfo_desc_query:
extrainfo_desc = desc
address_extrainfo = []
try:
address_extrainfo.append(socket.gethostbyaddr(ns_desc.address)[0])
except:
pass
try:
address_extrainfo.append(self._controller.get_info('ip-to-country/%s' % ns_desc.address))
except:
pass
address_extrainfo_label = ' (%s)' % ', '.join(address_extrainfo) if address_extrainfo else ''
if server_desc:
exit_policy_label = str(server_desc.exit_policy)
elif micro_desc:
exit_policy_label = str(micro_desc.exit_policy)
else:
exit_policy_label = 'Unknown'
lines = [
'%s (%s)' % (ns_desc.nickname, fingerprint),
format('address: ', *BOLD_OUTPUT) + '%s:%s%s' % (ns_desc.address, ns_desc.or_port, address_extrainfo_label),
]
if server_desc:
lines.append(format('tor version: ', *BOLD_OUTPUT) + str(server_desc.tor_version))
lines.append(format('flags: ', *BOLD_OUTPUT) + ', '.join(ns_desc.flags))
lines.append(format('exit policy: ', *BOLD_OUTPUT) + exit_policy_label)
if server_desc and server_desc.contact:
contact = stem.util.str_tools._to_unicode(server_desc.contact)
# clears up some highly common obscuring
for alias in (' at ', ' AT '):
contact = contact.replace(alias, '@')
for alias in (' dot ', ' DOT '):
contact = contact.replace(alias, '.')
lines.append(format('contact: ', *BOLD_OUTPUT) + contact)
descriptor_section = [
('Server Descriptor:', server_desc),
('Extrainfo Descriptor:', extrainfo_desc),
('Microdescriptor:', micro_desc),
('Router Status Entry:', ns_desc),
]
div = format('-' * 80, *STANDARD_OUTPUT)
for label, desc in descriptor_section:
if desc:
lines += ['', div, format(label, *BOLD_OUTPUT), div, '']
lines += [format(l, *STANDARD_OUTPUT) for l in str(desc).splitlines()]
return '\n'.join(lines)
def do_python(self, arg):
"""
Performs the '/python' operation, toggling if we accept python commands or
not.
"""
if not arg:
status = 'enabled' if self._run_python_commands else 'disabled'
return format('Python support is currently %s.' % status, *STANDARD_OUTPUT)
elif arg.lower() == 'enable':
self._run_python_commands = True
elif arg.lower() == 'disable':
self._run_python_commands = False
else:
return format("'%s' is not recognized. Please run either '/python enable' or '/python disable'." % arg, *ERROR_OUTPUT)
if self._run_python_commands:
response = "Python support enabled, we'll now run non-interpreter commands as python."
else:
response = "Python support disabled, we'll now pass along all commands to tor."
return format(response, *STANDARD_OUTPUT)
@uses_settings
def run_command(self, command, config):
"""
Runs the given command. Requests starting with a '/' are special commands
to the interpreter, and anything else is sent to the control port.
:param stem.control.Controller controller: tor control connection
:param str command: command to be processed
:returns: **list** out output lines, each line being a list of
(msg, format) tuples
:raises: **stem.SocketClosed** if the control connection has been severed
"""
if not self._controller.is_alive():
raise stem.SocketClosed()
# Commands fall into three categories:
#
# * Interpretor commands. These start with a '/'.
#
# * Controller commands stem knows how to handle. We use our Controller's
# methods for these to take advantage of caching and present nicer
# output.
#
# * Other tor commands. We pass these directly on to the control port.
cmd, arg = command.strip(), ''
if ' ' in cmd:
cmd, arg = cmd.split(' ', 1)
output = ''
if cmd.startswith('/'):
cmd = cmd.lower()
if cmd == '/quit':
raise stem.SocketClosed()
elif cmd == '/events':
output = self.do_events(arg)
elif cmd == '/info':
output = self.do_info(arg)
elif cmd == '/python':
output = self.do_python(arg)
elif cmd == '/help':
output = self.do_help(arg)
else:
output = format("'%s' isn't a recognized command" % command, *ERROR_OUTPUT)
else:
cmd = cmd.upper() # makes commands uppercase to match the spec
if cmd.replace('+', '') in ('LOADCONF', 'POSTDESCRIPTOR'):
# provides a notice that multi-line controller input isn't yet implemented
output = format(msg('msg.multiline_unimplemented_notice'), *ERROR_OUTPUT)
elif cmd == 'QUIT':
self._controller.msg(command)
raise stem.SocketClosed()
else:
is_tor_command = cmd in config.get('help.usage', {}) and cmd.lower() != 'events'
if self._run_python_commands and not is_tor_command:
self.is_multiline_context = code.InteractiveConsole.push(self, command)
return
else:
try:
output = format(self._controller.msg(command).raw_content().strip(), *STANDARD_OUTPUT)
except stem.ControllerError as exc:
if isinstance(exc, stem.SocketClosed):
raise exc
else:
output = format(str(exc), *ERROR_OUTPUT)
output += '\n' # give ourselves an extra line before the next prompt
return output
| gpl-2.0 |
amenonsen/ansible | test/units/modules/network/f5/test_bigip_asm_dos_application.py | 21 | 11479 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_asm_dos_application import ApiParameters
from library.modules.bigip_asm_dos_application import ModuleParameters
from library.modules.bigip_asm_dos_application import ModuleManager
from library.modules.bigip_asm_dos_application import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_asm_dos_application import ApiParameters
from ansible.modules.network.f5.bigip_asm_dos_application import ModuleParameters
from ansible.modules.network.f5.bigip_asm_dos_application import ModuleManager
from ansible.modules.network.f5.bigip_asm_dos_application import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
profile='dos_foo',
geolocations=dict(
blacklist=['Argentina', 'Montenegro'],
whitelist=['France', 'Belgium']
),
heavy_urls=dict(
auto_detect=True,
latency_threshold=3000,
exclude=['/exclude1.html', '/exclude2.html'],
include=[dict(url='include1.html', threshold='auto'),
dict(url='include2.html', threshold='2000')],
),
mobile_detection=dict(
enabled=True,
allow_android_rooted_device=True,
allow_any_android_package=True,
allow_any_ios_package=True,
allow_jailbroken_devices=True,
allow_emulators=True,
client_side_challenge_mode='cshui',
ios_allowed_package_names=['foo', 'bar'],
android_publishers=['cert1.crt', 'cert2.crt']
),
rtbh_duration=180,
rtbh_enable=True,
scrubbing_duration=360,
scrubbing_enable=True,
single_page_application=True,
trigger_irule=False,
partition='Common'
)
p = ModuleParameters(params=args)
assert p.profile == 'dos_foo'
assert p.geo_whitelist == ['France', 'Belgium']
assert p.geo_blacklist == ['Argentina', 'Montenegro']
assert p.auto_detect == 'enabled'
assert p.latency_threshold == 3000
assert p.hw_url_exclude == ['/exclude1.html', '/exclude2.html']
assert dict(name='URL/include1.html', threshold='auto', url='/include1.html') in p.hw_url_include
assert dict(name='URL/include2.html', threshold='2000', url='/include2.html') in p.hw_url_include
assert p.allow_android_rooted_device == 'true'
assert p.enable_mobile_detection == 'enabled'
assert p.allow_any_android_package == 'true'
assert p.allow_any_ios_package == 'true'
assert p.allow_jailbroken_devices == 'true'
assert p.allow_emulators == 'true'
assert p.client_side_challenge_mode == 'cshui'
assert p.ios_allowed_package_names == ['foo', 'bar']
assert p.android_publishers == ['/Common/cert1.crt', '/Common/cert2.crt']
assert p.rtbh_duration == 180
assert p.rtbh_enable == 'enabled'
assert p.scrubbing_duration == 360
assert p.scrubbing_enable == 'enabled'
assert p.single_page_application == 'enabled'
assert p.trigger_irule == 'disabled'
def test_api_parameters(self):
args = load_fixture('load_asm_dos.json')
p = ApiParameters(params=args)
assert p.geo_whitelist == ['Aland Islands']
assert p.geo_blacklist == ['Afghanistan']
assert p.auto_detect == 'enabled'
assert p.latency_threshold == 1000
assert p.hw_url_exclude == ['/exclude.html']
assert dict(name='URL/test.htm', threshold='auto', url='/test.htm') in p.hw_url_include
assert dict(name='URL/testy.htm', threshold='auto', url='/testy.htm') in p.hw_url_include
assert p.allow_android_rooted_device == 'false'
assert p.enable_mobile_detection == 'disabled'
assert p.allow_any_android_package == 'false'
assert p.allow_any_ios_package == 'false'
assert p.allow_jailbroken_devices == 'true'
assert p.allow_emulators == 'true'
assert p.client_side_challenge_mode == 'pass'
assert p.ios_allowed_package_names == ['foobarapp']
assert p.android_publishers == ['/Common/ca-bundle.crt']
assert p.rtbh_duration == 300
assert p.rtbh_enable == 'enabled'
assert p.scrubbing_duration == 60
assert p.scrubbing_enable == 'enabled'
assert p.single_page_application == 'enabled'
assert p.trigger_irule == 'enabled'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
try:
self.p1 = patch('library.modules.bigip_asm_dos_application.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
except Exception:
self.p1 = patch('ansible.modules.network.f5.bigip_asm_dos_application.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
def tearDown(self):
self.p1.stop()
def test_create_asm_dos_profile(self, *args):
set_module_args(dict(
profile='dos_foo',
geolocations=dict(
blacklist=['Argentina', 'Montenegro'],
whitelist=['France', 'Belgium']
),
heavy_urls=dict(
auto_detect=True,
latency_threshold=3000,
exclude=['/exclude1.html', '/exclude2.html'],
include=[dict(url='include1.html', threshold='auto'),
dict(url='include2.html', threshold='2000')]
),
mobile_detection=dict(
enabled=True,
allow_android_rooted_device=True,
allow_any_android_package=True,
allow_any_ios_package=True,
allow_jailbroken_devices=True,
allow_emulators=True,
client_side_challenge_mode='cshui',
ios_allowed_package_names=['foo', 'bar'],
android_publishers=['cert1.crt', 'cert2.crt']
),
rtbh_duration=180,
rtbh_enable=True,
scrubbing_duration=360,
scrubbing_enable=True,
single_page_application=True,
trigger_irule=False,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
mm.version_less_than_13_1 = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['geolocations'] == dict(blacklist=['Argentina', 'Montenegro'], whitelist=['France', 'Belgium'])
assert results['heavy_urls'] == dict(auto_detect='yes', latency_threshold=3000,
exclude=['/exclude1.html', '/exclude2.html'],
include=[dict(url='/include1.html', threshold='auto'),
dict(url='/include2.html', threshold='2000')]
)
assert results['mobile_detection'] == dict(enabled='yes', allow_android_rooted_device='yes',
allow_any_android_package='yes', allow_any_ios_package='yes',
allow_jailbroken_devices='yes', allow_emulators='yes',
client_side_challenge_mode='cshui',
ios_allowed_package_names=['foo', 'bar'],
android_publishers=['/Common/cert1.crt', '/Common/cert2.crt']
)
assert results['rtbh_duration'] == 180
assert results['rtbh_enable'] == 'yes'
assert results['scrubbing_duration'] == 360
assert results['scrubbing_enable'] == 'yes'
assert results['single_page_application'] == 'yes'
assert results['trigger_irule'] == 'no'
def test_update_asm_dos_profile(self, *args):
set_module_args(dict(
profile='test',
heavy_urls=dict(
latency_threshold=3000,
exclude=['/exclude1.html', '/exclude2.html'],
include=[dict(url='include1.html', threshold='auto'),
dict(url='include2.html', threshold='2000')]
),
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = ApiParameters(params=load_fixture('load_asm_dos.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.version_less_than_13_1 = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['heavy_urls'] == dict(latency_threshold=3000, exclude=['/exclude1.html', '/exclude2.html'],
include=[dict(url='/include1.html', threshold='auto'),
dict(url='/include2.html', threshold='2000')]
)
| gpl-3.0 |
Tejal011089/paypal_erpnext | erpnext/setup/doctype/sms_settings/sms_settings.py | 15 | 3544 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _, throw, msgprint
from frappe.utils import cstr, nowdate
from frappe.model.document import Document
class SMSSettings(Document):
pass
def validate_receiver_nos(receiver_list):
validated_receiver_list = []
for d in receiver_list:
# remove invalid character
for x in [' ', '+', '-', '(', ')']:
d = d.replace(x, '')
validated_receiver_list.append(d)
if not validated_receiver_list:
throw(_("Please enter valid mobile nos"))
return validated_receiver_list
def get_sender_name():
"returns name as SMS sender"
sender_name = frappe.db.get_single_value('SMS Settings', 'sms_sender_name') or \
'ERPNXT'
if len(sender_name) > 6 and \
frappe.db.get_default("country") == "India":
throw("""As per TRAI rule, sender name must be exactly 6 characters.
Kindly change sender name in Setup --> Global Defaults.
Note: Hyphen, space, numeric digit, special characters are not allowed.""")
return sender_name
@frappe.whitelist()
def get_contact_number(contact_name, value, key):
"returns mobile number of the contact"
number = frappe.db.sql("""select mobile_no, phone from tabContact where name=%s and %s=%s""" %
('%s', key, '%s'), (contact_name, value))
return number and (number[0][0] or number[0][1]) or ''
@frappe.whitelist()
def send_sms(receiver_list, msg, sender_name = ''):
import json
if isinstance(receiver_list, basestring):
receiver_list = json.loads(receiver_list)
if not isinstance(receiver_list, list):
receiver_list = [receiver_list]
receiver_list = validate_receiver_nos(receiver_list)
arg = {
'receiver_list' : receiver_list,
'message' : msg,
'sender_name' : sender_name or get_sender_name()
}
if frappe.db.get_value('SMS Settings', None, 'sms_gateway_url'):
ret = send_via_gateway(arg)
msgprint(ret)
else:
msgprint(_("Please Update SMS Settings"))
def send_via_gateway(arg):
ss = frappe.get_doc('SMS Settings', 'SMS Settings')
args = {ss.message_parameter : arg.get('message')}
for d in ss.get("parameters"):
args[d.parameter] = d.value
resp = []
for d in arg.get('receiver_list'):
args[ss.receiver_parameter] = d
resp.append(send_request(ss.sms_gateway_url, args))
return resp
# Send Request
# =========================================================
def send_request(gateway_url, args):
import httplib, urllib
server, api_url = scrub_gateway_url(gateway_url)
conn = httplib.HTTPConnection(server) # open connection
headers = {}
headers['Accept'] = "text/plain, text/html, */*"
conn.request('GET', api_url + urllib.urlencode(args), headers = headers) # send request
resp = conn.getresponse() # get response
resp = resp.read()
return resp
# Split gateway url to server and api url
# =========================================================
def scrub_gateway_url(url):
url = url.replace('http://', '').strip().split('/')
server = url.pop(0)
api_url = '/' + '/'.join(url)
if not api_url.endswith('?'):
api_url += '?'
return server, api_url
# Create SMS Log
# =========================================================
def create_sms_log(arg, sent_sms):
sl = frappe.get_doc('SMS Log')
sl.sender_name = arg['sender_name']
sl.sent_on = nowdate()
sl.receiver_list = cstr(arg['receiver_list'])
sl.message = arg['message']
sl.no_of_requested_sms = len(arg['receiver_list'])
sl.no_of_sent_sms = sent_sms
sl.save()
| agpl-3.0 |
Deepakkothandan/ansible | lib/ansible/modules/utilities/logic/import_tasks.py | 2 | 1548 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
author:
- "Ansible Core Team (@ansible)"
module: import_tasks
short_description: import a task list.
description:
- Imports a list of tasks to be added to the current playbook for subsequent execution.
version_added: "2.4"
options:
free-form:
description:
- This action allows you to specify the name of the file directly w/o any other options.
- Any loops, conditionals and most other keywords will be applied to the included tasks, not to this statement itself.
- If you need any of those to apply to this action, use M(include_tasks) instead.
notes:
- This is really not a module, this is a feature of the Ansible Engine, as such it cannot be overridden the same way a module can.
'''
EXAMPLES = """
# include task list in play
- hosts: all
tasks:
- debug:
msg: task1
- import_tasks: stuff.yml
- debug:
msg: task10
# apply conditional to all imported tasks
- hosts: all
tasks:
- debug:
msg: task1
- include: stuff.yml"
when: hostvar is defined
"""
RETURN = """
# this module does not return anything except tasks to execute
"""
| gpl-3.0 |
keyboardio/Kaleidoscope | testing/googletest/googletest/test/gtest_xml_outfiles_test.py | 60 | 5349 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="TestSomeProperties" status="run" result="completed" time="*" timestamp="*" classname="PropertyOne">
<properties>
<property name="SetUpProp" value="1"/>
<property name="TestSomeProperty" value="1"/>
<property name="TearDownProp" value="1"/>
</properties>
</testcase>
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="TestSomeProperties" status="run" result="completed" time="*" timestamp="*" classname="PropertyTwo">
<properties>
<property name="SetUpProp" value="2"/>
<property name="TestSomeProperty" value="2"/>
<property name="TearDownProp" value="2"/>
</properties>
</testcase>
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
| gpl-3.0 |
halberom/ansible-modules-core | inventory/group_by.py | 161 | 1330 | # -*- mode: python -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: group_by
short_description: Create Ansible groups based on facts
description:
- Use facts to create ad-hoc groups that can be used later in a playbook.
version_added: "0.9"
options:
key:
description:
- The variables whose values will be used as groups
required: true
author: "Jeroen Hoekx (@jhoekx)"
notes:
- Spaces in group names are converted to dashes '-'.
'''
EXAMPLES = '''
# Create groups based on the machine architecture
- group_by: key=machine_{{ ansible_machine }}
# Create groups like 'kvm-host'
- group_by: key=virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }}
'''
| gpl-3.0 |
14thibea/megamix | doc/example.py | 1 | 2598 | ########################
# Prelude to the example
########################
"""
This example is realized with a DP-VBGMM model
The other mixtures and the K-means are working in the same way
The available classes are:
- Kmeans (kmeans)
- GaussianMixture (GMM)
- VariationalGaussianMixture (VBGMM)
- DPVariationalGaussianMixture (DP-VBGMM)
"""
from megamix.batch import DPVariationalGaussianMixture
import numpy as np
########################
# Features used
########################
"""
Features must be numpy arrays of two dimensions:
the first dimension is the number of points
the second dimension is the dimension of the space
"""
# Here we use a radom set of points for the example
n_points = 10000
dim = 39
points = np.random.randn(n_points,dim)
########################
# Fitting the model
########################
# We choose the number of clusters that we want
n_components = 100
# The model is instantiated
GM = DPVariationalGaussianMixture(n_components)
# The model is fitting
GM.fit(points)
# It is also possible to do early stopping in order to avoid overfitting
points_data = points[:n_points//2:]
points_test = points[n_points//2::]
# In this case the model will fit only on points_data but will use points_test
# to evaluate the convergence criterion.
GM.fit(points_data,points_test)
# Some clusters may disappear with the DP-VBGMM model. You may want to
# simplify the model by removing the useless information
GM_simple = GM.simplified_model(points)
##########################
# Analysis of the model
##########################
other_points = np.random.randn(n_points,dim)
# We can obtain the log of the reponsibilities of any set of points when the
# model is fitted (or at least initialized)
log_resp = GM.predict_log_resp(other_points)
# log_resp.shape = (n_points,n_components)
# We can obtain the value of the convergence criterion for any set of points
score = GM.score(other_points)
#############################
# Writing or reading a model
#############################
# It is possible to write your model in a group of a h5py file
import h5py
file = h5py.File('DP_VBGMM.h5','w')
grp = file.create_group('model_fitted')
GM.write(grp)
file.close()
# You also can read data from such h5py file to initialize new models
GM_new = DPVariationalGaussianMixture()
file = h5py.File('DP_VBGMM.h5','r')
grp = file['model_fitted']
GM_new.read_and_init(grp,points)
file.close()
# You can also save regurlarly your code while fitting the model by using
# the saving parameter
GM.fit(points,saving='log',directory='mypath',legend='wonderful_model') | apache-2.0 |
pravsripad/mne-python | tutorials/source-modeling/plot_compute_covariance.py | 4 | 8732 | """
.. _tut_compute_covariance:
Computing a covariance matrix
=============================
Many methods in MNE, including source estimation and some classification
algorithms, require covariance estimations from the recordings.
In this tutorial we cover the basics of sensor covariance computations and
construct a noise covariance matrix that can be used when computing the
minimum-norm inverse solution. For more information, see
:ref:`minimum_norm_estimates`.
"""
import os.path as op
import mne
from mne.datasets import sample
###############################################################################
# Source estimation method such as MNE require a noise estimations from the
# recordings. In this tutorial we cover the basics of noise covariance and
# construct a noise covariance matrix that can be used when computing the
# inverse solution. For more information, see :ref:`minimum_norm_estimates`.
data_path = sample.data_path()
raw_empty_room_fname = op.join(
data_path, 'MEG', 'sample', 'ernoise_raw.fif')
raw_empty_room = mne.io.read_raw_fif(raw_empty_room_fname)
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(raw_fname)
raw.set_eeg_reference('average', projection=True)
raw.info['bads'] += ['EEG 053'] # bads + 1 more
###############################################################################
# The definition of noise depends on the paradigm. In MEG it is quite common
# to use empty room measurements for the estimation of sensor noise. However if
# you are dealing with evoked responses, you might want to also consider
# resting state brain activity as noise.
# First we compute the noise using empty room recording. Note that you can also
# use only a part of the recording with tmin and tmax arguments. That can be
# useful if you use resting state as a noise baseline. Here we use the whole
# empty room recording to compute the noise covariance (``tmax=None`` is the
# same as the end of the recording, see :func:`mne.compute_raw_covariance`).
#
# Keep in mind that you want to match your empty room dataset to your
# actual MEG data, processing-wise. Ensure that filters
# are all the same and if you use ICA, apply it to your empty-room and subject
# data equivalently. In this case we did not filter the data and
# we don't use ICA. However, we do have bad channels and projections in
# the MEG data, and, hence, we want to make sure they get stored in the
# covariance object.
raw_empty_room.info['bads'] = [
bb for bb in raw.info['bads'] if 'EEG' not in bb]
raw_empty_room.add_proj(
[pp.copy() for pp in raw.info['projs'] if 'EEG' not in pp['desc']])
noise_cov = mne.compute_raw_covariance(
raw_empty_room, tmin=0, tmax=None)
###############################################################################
# Now that you have the covariance matrix in an MNE-Python object you can
# save it to a file with :func:`mne.write_cov`. Later you can read it back
# using :func:`mne.read_cov`.
#
# You can also use the pre-stimulus baseline to estimate the noise covariance.
# First we have to construct the epochs. When computing the covariance, you
# should use baseline correction when constructing the epochs. Otherwise the
# covariance matrix will be inaccurate. In MNE this is done by default, but
# just to be sure, we define it here manually.
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=0.5,
baseline=(-0.2, 0.0), decim=3, # we'll decimate for speed
verbose='error') # and ignore the warning about aliasing
###############################################################################
# Note that this method also attenuates any activity in your
# source estimates that resemble the baseline, if you like it or not.
noise_cov_baseline = mne.compute_covariance(epochs, tmax=0)
###############################################################################
# Plot the covariance matrices
# ----------------------------
#
# Try setting proj to False to see the effect. Notice that the projectors in
# epochs are already applied, so ``proj`` parameter has no effect.
noise_cov.plot(raw_empty_room.info, proj=True)
noise_cov_baseline.plot(epochs.info, proj=True)
###############################################################################
# .. _plot_compute_covariance_howto:
#
# How should I regularize the covariance matrix?
# ----------------------------------------------
#
# The estimated covariance can be numerically
# unstable and tends to induce correlations between estimated source amplitudes
# and the number of samples available. The MNE manual therefore suggests to
# regularize the noise covariance matrix (see
# :ref:`cov_regularization_math`), especially if only few samples are
# available. Unfortunately it is not easy to tell the effective number of
# samples, hence, to choose the appropriate regularization.
# In MNE-Python, regularization is done using advanced regularization methods
# described in :footcite:`EngemannGramfort2015`. For this the 'auto' option
# can be used. With this option cross-validation will be used to learn the
# optimal regularization:
noise_cov_reg = mne.compute_covariance(epochs, tmax=0., method='auto',
rank=None)
###############################################################################
# This procedure evaluates the noise covariance quantitatively by how well it
# whitens the data using the
# negative log-likelihood of unseen data. The final result can also be visually
# inspected.
# Under the assumption that the baseline does not contain a systematic signal
# (time-locked to the event of interest), the whitened baseline signal should
# be follow a multivariate Gaussian distribution, i.e.,
# whitened baseline signals should be between -1.96 and 1.96 at a given time
# sample.
# Based on the same reasoning, the expected value for the :term:`global field
# power (GFP) <GFP>` is 1 (calculation of the GFP should take into account the
# true degrees of freedom, e.g. ``ddof=3`` with 2 active SSP vectors):
evoked = epochs.average()
evoked.plot_white(noise_cov_reg, time_unit='s')
###############################################################################
# This plot displays both, the whitened evoked signals for each channels and
# the whitened :term:`GFP`. The numbers in the GFP panel represent the
# estimated rank of the data, which amounts to the effective degrees of freedom
# by which the squared sum across sensors is divided when computing the
# whitened :term:`GFP`. The whitened :term:`GFP` also helps detecting spurious
# late evoked components which can be the consequence of over- or
# under-regularization.
#
# Note that if data have been processed using signal space separation
# (SSS) :footcite:`TauluEtAl2005`,
# gradiometers and magnetometers will be displayed jointly because both are
# reconstructed from the same SSS basis vectors with the same numerical rank.
# This also implies that both sensor types are not any longer statistically
# independent.
# These methods for evaluation can be used to assess model violations.
# Additional
# introductory materials can be found `here <https://goo.gl/ElWrxe>`_.
#
# For expert use cases or debugging the alternative estimators can also be
# compared (see
# :ref:`sphx_glr_auto_examples_visualization_plot_evoked_whitening.py`) and
# :ref:`sphx_glr_auto_examples_inverse_plot_covariance_whitening_dspm.py`):
noise_covs = mne.compute_covariance(
epochs, tmax=0., method=('empirical', 'shrunk'), return_estimators=True,
rank=None)
evoked.plot_white(noise_covs, time_unit='s')
##############################################################################
# This will plot the whitened evoked for the optimal estimator and display the
# :term:`GFP` for all estimators as separate lines in the related panel.
##############################################################################
# Finally, let's have a look at the difference between empty room and
# event related covariance, hacking the "method" option so that their types
# are shown in the legend of the plot.
evoked_meg = evoked.copy().pick('meg')
noise_cov['method'] = 'empty_room'
noise_cov_baseline['method'] = 'baseline'
evoked_meg.plot_white([noise_cov_baseline, noise_cov], time_unit='s')
##############################################################################
# Based on the negative log-likelihood, the baseline covariance
# seems more appropriate. See :ref:`ex-covariance-whitening-dspm` for more
# information.
###############################################################################
# References
# ----------
#
# .. footbibliography::
| bsd-3-clause |
tsdgeos/snapcraft | external_snaps_tests/__main__.py | 5 | 3360 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Snapcraft external snaps tests.
This will clone the external repository, search for snapcraft.yaml files and
snap the packages.
Usage:
external_snaps_tests REPO_URL [--repo-branch BRANCH]
[--cleanbuild] [--keep-dir]
Arguments:
REPO_URL The URL of the repository to build.
Options:
--repo-branch The name of the branch to build. The default value build the
default branch of the repository.
--cleanbuild Build the snaps in a clean LXC container.
--keep-dir Do not remove the temporary directory where the repository was
cloned and snapped.
"""
import os
import shutil
import subprocess
import sys
import tempfile
import docopt
def main():
arguments = docopt.docopt(__doc__)
repo = arguments['REPO_URL']
repo_branch = arguments['--repo-branch']
cleanbuild = arguments['--cleanbuild']
keep_dir = arguments['--keep-dir']
if _is_git(repo):
if shutil.which('git'):
path = _git_clone(repo, repo_branch)
_build_snaps(path, cleanbuild, keep_dir)
else:
sys.exit('Please install git.')
else:
sys.exit('Unsupported repository.')
def _is_git(repo):
return (repo.startswith('https://github.com/') or
repo.startswith('git://') or
repo.startswith('https://git.launchpad.net/'))
def _git_clone(url, repo_branch=None):
temp_dir = tempfile.mkdtemp(prefix='snapcraft-')
command = ['git', 'clone', url, temp_dir]
print(' '.join(command))
subprocess.check_call(command)
if repo_branch:
subprocess.check_call(['git', 'checkout', repo_branch], cwd=temp_dir)
return temp_dir
def _build_snaps(path, cleanbuild=False, keep_dir=False):
try:
for dirpath, _, filenames in os.walk(path):
if 'snapcraft.yaml' in filenames or '.snapcraft.yaml' in filenames:
_build_snap(dirpath, cleanbuild, keep_dir)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
finally:
if keep_dir:
print(
'You can inspect the built project repository in {}'.format(
path))
else:
shutil.rmtree(path)
def _build_snap(path, cleanbuild=False, keep_dir=False):
snapcraft = os.path.abspath(os.path.join('bin', 'snapcraft'))
print('Updating the parts cache...')
subprocess.check_call([snapcraft, 'update'])
print('Snapping {}'.format(path))
command = [snapcraft, '-d']
if cleanbuild:
command.append('cleanbuild')
print(' '.join(command))
subprocess.check_call(command, cwd=path)
if __name__ == '__main__':
main()
| gpl-3.0 |
openfun/edx-platform | lms/djangoapps/instructor/hint_manager.py | 110 | 11466 | """
Views for hint management.
Get to these views through courseurl/hint_manager.
For example: https://courses.edx.org/courses/MITx/2.01x/2013_Spring/hint_manager
These views will only be visible if FEATURES['ENABLE_HINTER_INSTRUCTOR_VIEW'] = True
"""
import json
import re
from django.http import HttpResponse, Http404
from django.views.decorators.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response, render_to_string
from courseware.courses import get_course_with_access
from courseware.models import XModuleUserStateSummaryField
import courseware.module_render as module_render
import courseware.model_data as model_data
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.exceptions import ItemNotFoundError
@ensure_csrf_cookie
def hint_manager(request, course_id):
"""
The URL landing function for all calls to the hint manager, both POST and GET.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
course = get_course_with_access(request.user, 'staff', course_key, depth=None)
except Http404:
out = 'Sorry, but students are not allowed to access the hint manager!'
return HttpResponse(out)
if request.method == 'GET':
out = get_hints(request, course_key, 'mod_queue')
out.update({'error': ''})
return render_to_response('instructor/hint_manager.html', out)
field = request.POST['field']
if not (field == 'mod_queue' or field == 'hints'):
# Invalid field. (Don't let users continue - they may overwrite other db's)
out = 'Error in hint manager - an invalid field was accessed.'
return HttpResponse(out)
switch_dict = {
'delete hints': delete_hints,
'switch fields': lambda *args: None, # Takes any number of arguments, returns None.
'change votes': change_votes,
'add hint': add_hint,
'approve': approve,
}
# Do the operation requested, and collect any error messages.
error_text = switch_dict[request.POST['op']](request, course_key, field)
if error_text is None:
error_text = ''
render_dict = get_hints(request, course_key, field, course=course)
render_dict.update({'error': error_text})
rendered_html = render_to_string('instructor/hint_manager_inner.html', render_dict)
return HttpResponse(json.dumps({'success': True, 'contents': rendered_html}))
def get_hints(request, course_id, field, course=None): # pylint: disable=unused-argument
"""
Load all of the hints submitted to the course.
Args:
`request` -- Django request object.
`course_id` -- The course id, like 'Me/19.002/test_course'
`field` -- Either 'hints' or 'mod_queue'; specifies which set of hints to load.
Keys in returned dict:
- 'field': Same as input
- 'other_field': 'mod_queue' if `field` == 'hints'; and vice-versa.
- 'field_label', 'other_field_label': English name for the above.
- 'all_hints': A list of [answer, pk dict] pairs, representing all hints.
Sorted by answer.
- 'id_to_name': A dictionary mapping problem id to problem name.
"""
if field == 'mod_queue':
other_field = 'hints'
field_label = 'Hints Awaiting Moderation'
other_field_label = 'Approved Hints'
elif field == 'hints':
other_field = 'mod_queue'
field_label = 'Approved Hints'
other_field_label = 'Hints Awaiting Moderation'
# We want to use the course_id to find all matching usage_id's.
# To do this, just take the school/number part - leave off the classname.
# FIXME: we need to figure out how to do this with opaque keys
all_hints = XModuleUserStateSummaryField.objects.filter(
field_name=field,
usage_id__regex=re.escape(u'{0.org}/{0.course}'.format(course_id)),
)
# big_out_dict[problem id] = [[answer, {pk: [hint, votes]}], sorted by answer]
# big_out_dict maps a problem id to a list of [answer, hints] pairs, sorted in order of answer.
big_out_dict = {}
# id_to name maps a problem id to the name of the problem.
# id_to_name[problem id] = Display name of problem
id_to_name = {}
for hints_by_problem in all_hints:
hints_by_problem.usage_id = hints_by_problem.usage_id.map_into_course(course_id)
name = location_to_problem_name(course_id, hints_by_problem.usage_id)
if name is None:
continue
id_to_name[hints_by_problem.usage_id] = name
def answer_sorter(thing):
"""
`thing` is a tuple, where `thing[0]` contains an answer, and `thing[1]` contains
a dict of hints. This function returns an index based on `thing[0]`, which
is used as a key to sort the list of things.
"""
try:
return float(thing[0])
except ValueError:
# Put all non-numerical answers first.
return float('-inf')
# Answer list contains [answer, dict_of_hints] pairs.
answer_list = sorted(json.loads(hints_by_problem.value).items(), key=answer_sorter)
big_out_dict[hints_by_problem.usage_id] = answer_list
render_dict = {'field': field,
'other_field': other_field,
'field_label': field_label,
'other_field_label': other_field_label,
'all_hints': big_out_dict,
'id_to_name': id_to_name}
return render_dict
def location_to_problem_name(course_id, loc):
"""
Given the location of a crowdsource_hinter module, try to return the name of the
problem it wraps around. Return None if the hinter no longer exists.
"""
try:
descriptor = modulestore().get_item(loc)
return descriptor.get_children()[0].display_name
except ItemNotFoundError:
# Sometimes, the problem is no longer in the course. Just
# don't include said problem.
return None
def delete_hints(request, course_id, field, course=None): # pylint: disable=unused-argument
"""
Deletes the hints specified.
`request.POST` contains some fields keyed by integers. Each such field contains a
[problem_defn_id, answer, pk] tuple. These tuples specify the hints to be deleted.
Example `request.POST`:
{'op': 'delete_hints',
'field': 'mod_queue',
1: ['problem_whatever', '42.0', '3'],
2: ['problem_whatever', '32.5', '12']}
"""
for key in request.POST:
if key == 'op' or key == 'field':
continue
problem_id, answer, pk = request.POST.getlist(key)
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
# Can be optimized - sort the delete list by problem_id, and load each problem
# from the database only once.
this_problem = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
problem_dict = json.loads(this_problem.value)
del problem_dict[answer][pk]
this_problem.value = json.dumps(problem_dict)
this_problem.save()
def change_votes(request, course_id, field, course=None): # pylint: disable=unused-argument
"""
Updates the number of votes.
The numbered fields of `request.POST` contain [problem_id, answer, pk, new_votes] tuples.
See `delete_hints`.
Example `request.POST`:
{'op': 'delete_hints',
'field': 'mod_queue',
1: ['problem_whatever', '42.0', '3', 42],
2: ['problem_whatever', '32.5', '12', 9001]}
"""
for key in request.POST:
if key == 'op' or key == 'field':
continue
problem_id, answer, pk, new_votes = request.POST.getlist(key)
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
this_problem = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
problem_dict = json.loads(this_problem.value)
# problem_dict[answer][pk] points to a [hint_text, #votes] pair.
problem_dict[answer][pk][1] = int(new_votes)
this_problem.value = json.dumps(problem_dict)
this_problem.save()
def add_hint(request, course_id, field, course=None):
"""
Add a new hint. `request.POST`:
op
field
problem - The problem id
answer - The answer to which a hint will be added
hint - The text of the hint
"""
problem_id = request.POST['problem']
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
answer = request.POST['answer']
hint_text = request.POST['hint']
# Validate the answer. This requires initializing the xmodules, which
# is annoying.
try:
descriptor = modulestore().get_item(problem_key)
descriptors = [descriptor]
except ItemNotFoundError:
descriptors = []
field_data_cache = model_data.FieldDataCache(descriptors, course_id, request.user)
hinter_module = module_render.get_module(
request.user,
request,
problem_key,
field_data_cache,
course_id,
course=course
)
if not hinter_module.validate_answer(answer):
# Invalid answer. Don't add it to the database, or else the
# hinter will crash when we encounter it.
return 'Error - the answer you specified is not properly formatted: ' + str(answer)
this_problem = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
hint_pk_entry = XModuleUserStateSummaryField.objects.get(field_name='hint_pk', usage_id=problem_key)
this_pk = int(hint_pk_entry.value)
hint_pk_entry.value = this_pk + 1
hint_pk_entry.save()
problem_dict = json.loads(this_problem.value)
if answer not in problem_dict:
problem_dict[answer] = {}
problem_dict[answer][this_pk] = [hint_text, 1]
this_problem.value = json.dumps(problem_dict)
this_problem.save()
def approve(request, course_id, field, course=None): # pylint: disable=unused-argument
"""
Approve a list of hints, moving them from the mod_queue to the real
hint list. POST:
op, field
(some number) -> [problem, answer, pk]
The numbered fields are analogous to those in `delete_hints` and `change_votes`.
"""
for key in request.POST:
if key == 'op' or key == 'field':
continue
problem_id, answer, pk = request.POST.getlist(key)
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
# Can be optimized - sort the delete list by problem_id, and load each problem
# from the database only once.
problem_in_mod = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
problem_dict = json.loads(problem_in_mod.value)
hint_to_move = problem_dict[answer][pk]
del problem_dict[answer][pk]
problem_in_mod.value = json.dumps(problem_dict)
problem_in_mod.save()
problem_in_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=problem_key)
problem_dict = json.loads(problem_in_hints.value)
if answer not in problem_dict:
problem_dict[answer] = {}
problem_dict[answer][pk] = hint_to_move
problem_in_hints.value = json.dumps(problem_dict)
problem_in_hints.save()
| agpl-3.0 |
reshama/data-science-from-scratch | code/getting_data.py | 60 | 6317 | from __future__ import division
from collections import Counter
import math, random, csv, json
from bs4 import BeautifulSoup
import requests
######
#
# BOOKS ABOUT DATA
#
######
def is_video(td):
"""it's a video if it has exactly one pricelabel, and if
the stripped text inside that pricelabel starts with 'Video'"""
pricelabels = td('span', 'pricelabel')
return (len(pricelabels) == 1 and
pricelabels[0].text.strip().startswith("Video"))
def book_info(td):
"""given a BeautifulSoup <td> Tag representing a book,
extract the book's details and return a dict"""
title = td.find("div", "thumbheader").a.text
by_author = td.find('div', 'AuthorName').text
authors = [x.strip() for x in re.sub("^By ", "", by_author).split(",")]
isbn_link = td.find("div", "thumbheader").a.get("href")
isbn = re.match("/product/(.*)\.do", isbn_link).groups()[0]
date = td.find("span", "directorydate").text.strip()
return {
"title" : title,
"authors" : authors,
"isbn" : isbn,
"date" : date
}
from time import sleep
def scrape(num_pages=31):
base_url = "http://shop.oreilly.com/category/browse-subjects/" + \
"data.do?sortby=publicationDate&page="
books = []
for page_num in range(1, num_pages + 1):
print "souping page", page_num
url = base_url + str(page_num)
soup = BeautifulSoup(requests.get(url).text, 'html5lib')
for td in soup('td', 'thumbtext'):
if not is_video(td):
books.append(book_info(td))
# now be a good citizen and respect the robots.txt!
sleep(30)
return books
def get_year(book):
"""book["date"] looks like 'November 2014' so we need to
split on the space and then take the second piece"""
return int(book["date"].split()[1])
def plot_years(plt, books):
# 2014 is the last complete year of data (when I ran this)
year_counts = Counter(get_year(book) for book in books
if get_year(book) <= 2014)
years = sorted(year_counts)
book_counts = [year_counts[year] for year in x]
plt.bar([x - 0.5 for x in years], book_counts)
plt.xlabel("year")
plt.ylabel("# of data books")
plt.title("Data is Big!")
plt.show()
##
#
# APIs
#
##
endpoint = "https://api.github.com/users/joelgrus/repos"
repos = json.loads(requests.get(endpoint).text)
from dateutil.parser import parse
dates = [parse(repo["created_at"]) for repo in repos]
month_counts = Counter(date.month for date in dates)
weekday_counts = Counter(date.weekday() for date in dates)
####
#
# Twitter
#
####
from twython import Twython
# fill these in if you want to use the code
CONSUMER_KEY = ""
CONSUMER_SECRET = ""
ACCESS_TOKEN = ""
ACCESS_TOKEN_SECRET = ""
def call_twitter_search_api():
twitter = Twython(CONSUMER_KEY, CONSUMER_SECRET)
# search for tweets containing the phrase "data science"
for status in twitter.search(q='"data science"')["statuses"]:
user = status["user"]["screen_name"].encode('utf-8')
text = status["text"].encode('utf-8')
print user, ":", text
print
from twython import TwythonStreamer
# appending data to a global variable is pretty poor form
# but it makes the example much simpler
tweets = []
class MyStreamer(TwythonStreamer):
"""our own subclass of TwythonStreamer that specifies
how to interact with the stream"""
def on_success(self, data):
"""what do we do when twitter sends us data?
here data will be a Python object representing a tweet"""
# only want to collect English-language tweets
if data['lang'] == 'en':
tweets.append(data)
# stop when we've collected enough
if len(tweets) >= 1000:
self.disconnect()
def on_error(self, status_code, data):
print status_code, data
self.disconnect()
def call_twitter_streaming_api():
stream = MyStreamer(CONSUMER_KEY, CONSUMER_SECRET,
ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# starts consuming public statuses that contain the keyword 'data'
stream.statuses.filter(track='data')
if __name__ == "__main__":
def process(date, symbol, price):
print date, symbol, price
print "tab delimited stock prices:"
with open('tab_delimited_stock_prices.txt', 'rb') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
date = row[0]
symbol = row[1]
closing_price = float(row[2])
process(date, symbol, closing_price)
print
print "colon delimited stock prices:"
with open('colon_delimited_stock_prices.txt', 'rb') as f:
reader = csv.DictReader(f, delimiter=':')
for row in reader:
date = row["date"]
symbol = row["symbol"]
closing_price = float(row["closing_price"])
process(date, symbol, closing_price)
print
print "writing out comma_delimited_stock_prices.txt"
today_prices = { 'AAPL' : 90.91, 'MSFT' : 41.68, 'FB' : 64.5 }
with open('comma_delimited_stock_prices.txt','wb') as f:
writer = csv.writer(f, delimiter=',')
for stock, price in today_prices.items():
writer.writerow([stock, price])
print "BeautifulSoup"
html = requests.get("http://www.example.com").text
soup = BeautifulSoup(html)
print soup
print
print "parsing json"
serialized = """{ "title" : "Data Science Book",
"author" : "Joel Grus",
"publicationYear" : 2014,
"topics" : [ "data", "science", "data science"] }"""
# parse the JSON to create a Python object
deserialized = json.loads(serialized)
if "data science" in deserialized["topics"]:
print deserialized
print
print "GitHub API"
print "dates", dates
print "month_counts", month_counts
print "weekday_count", weekday_counts
last_5_repositories = sorted(repos,
key=lambda r: r["created_at"],
reverse=True)[:5]
print "last five languages", [repo["language"]
for repo in last_5_repositories]
| unlicense |
Anonymouslemming/ansible | lib/ansible/modules/network/nxos/nxos_vrf_af.py | 37 | 7795 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: nxos_vrf_af
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VRF AF.
description:
- Manages VRF AF
author: Gabriele Gerbino (@GGabriele)
notes:
- Default, where supported, restores params default value.
options:
vrf:
description:
- Name of the VRF.
required: true
afi:
description:
- Address-Family Identifier (AFI).
required: true
choices: ['ipv4', 'ipv6']
default: null
safi:
description:
- Sub Address-Family Identifier (SAFI).
required: true
choices: ['unicast', 'multicast']
default: null
route_target_both_auto_evpn:
description:
- Enable/Disable the EVPN route-target 'auto' setting for both
import and export target communities.
required: false
choices: ['true', 'false']
default: null
state:
description:
- Determines whether the config should be present or
not on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_vrf_af:
vrf: ntc
afi: ipv4
safi: unicast
route_target_both_auto_evpn: True
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["vrf context ntc", "address-family ipv4 unicast",
"afi ipv4", "route-target both auto evpn", "vrf ntc",
"safi unicast"]
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
BOOL_PARAMS = ['route_target_both_auto_evpn']
PARAM_TO_COMMAND_KEYMAP = {
'vrf': 'vrf',
'safi': 'safi',
'afi': 'afi',
'route_target_both_auto_evpn': 'route-target both auto evpn'
}
PARAM_TO_DEFAULT_KEYMAP = {}
def get_value(arg, config, module):
command = PARAM_TO_COMMAND_KEYMAP.get(arg)
if arg in BOOL_PARAMS:
command_re = re.compile(r'\s+{0}\s*$'.format(command), re.M)
value = False
try:
if command_re.search(config):
value = True
except TypeError:
value = False
else:
command_re = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(command), re.M)
value = ''
if command in config:
value = command_re.search(config).group('value')
return value
def get_existing(module, args):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
parents = ['vrf context {0}'.format(module.params['vrf'])]
parents.append('address-family {0} {1}'.format(module.params['afi'],
module.params['safi']))
config = netcfg.get_section(parents)
if config:
splitted_config = config.splitlines()
vrf_index = False
for index in range(0, len(splitted_config) - 1):
if 'vrf' in splitted_config[index].strip():
vrf_index = index
break
if vrf_index:
config = '\n'.join(splitted_config[0:vrf_index])
for arg in args:
if arg not in ['afi', 'safi', 'vrf']:
existing[arg] = get_value(arg, config, module)
existing['afi'] = module.params['afi']
existing['safi'] = module.params['safi']
existing['vrf'] = module.params['vrf']
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key in table:
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = table.get(key)
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
commands.append('no {0} {1}'.format(key, existing_value))
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
parents = ['vrf context {0}'.format(module.params['vrf'])]
parents.append('address-family {0} {1}'.format(module.params['afi'],
module.params['safi']))
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
commands = []
parents = ['vrf context {0}'.format(module.params['vrf'])]
commands.append('no address-family {0} {1}'.format(module.params['afi'],
module.params['safi']))
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
vrf=dict(required=True, type='str'),
safi=dict(required=True, type='str', choices=['unicast', 'multicast']),
afi=dict(required=True, type='str', choices=['ipv4', 'ipv6']),
route_target_both_auto_evpn=dict(required=False, type='bool'),
m_facts=dict(required=False, default=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present', required=False),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
state = module.params['state']
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args)
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key != 'interface':
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
value = 'default'
if existing.get(key) != value:
proposed[key] = value
candidate = CustomNetworkConfig(indent=3)
if state == 'present':
state_present(module, existing, proposed, candidate)
elif state == 'absent' and existing:
state_absent(module, existing, proposed, candidate)
if candidate:
load_config(module, candidate)
result['changed'] = True
result['commands'] = candidate.items_text()
else:
result['commands'] = []
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jmartinm/invenio | modules/bibformat/lib/elements/bfe_webauthorpage_data.py | 18 | 1333 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints authors
"""
from cgi import escape
from invenio.webauthorprofile_config import serialize
def format_element(bfo):
"""
Return list of profile data.
"""
data_dict = {}
year_fields = map(bfo.fields, ['260__c', '269__c', '773__y', '502__d'])
recid = bfo.recID
data_dict['year_fields'] = year_fields
return serialize([recid, data_dict])
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 |
cwadding/sensit-python | sensit/api/publication.py | 1 | 2940 | # Publications are stored actions which are taken when a feed is created, updated, deleted, or there is a matching percolator query.
#
# topic_id - The key for the parent topic
# id - The identifier of the publication
class Publication():
def __init__(self, topic_id, id, client):
self.topic_id = topic_id
self.id = id
self.client = client
# Get all publications for the associated Topic. Requires authorization of **read_any_publications**, or **read_application_publications**.
# '/api/topics/:topic_id/publications' GET
#
def list(self, options = {}):
body = options['query'] if 'query' in options else {}
response = self.client.get('/api/topics/' + self.topic_id + '/publications', body, options)
return response
# Retrieve a specific publication on the associated topic by Id. Requires authorization of **read_any_publications**, or **read_application_publications**.
# '/api/topics/:topic_id/publications/:id' GET
#
def find(self, options = {}):
body = options['query'] if 'query' in options else {}
response = self.client.get('/api/topics/' + self.topic_id + '/publications/' + self.id + '', body, options)
return response
# Create a new publication on the associated Topic which can be easily retrieved later using an id. Requires authorization of **manage_any_publications**, or **manage_application_publications**.
# '/api/topics/:topic_id/publications' POST
#
# publication - A Hash containing `host`:The ip address or host of the connection(required).`protocol`:the protocol to communicate over (http, tcp, udp, mqtt) (required)`port`:The port of the connection.
def create(self, publication, options = {}):
body = options['body'] if 'body' in options else {}
body['publication'] = publication
response = self.client.post('/api/topics/' + self.topic_id + '/publications', body, options)
return response
# Update a publication. Requires authorization of **manage_any_publications**, or **manage_application_publications**.
# '/api/topics/:topic_id/publications/:id' PUT
#
# publication - A Hash containing `host`:The ip address or host of the connection(required).`protocol`:the protocol to communicate over (http, tcp, udp, mqtt) (required)`port`:The port of the connection.
def update(self, publication, options = {}):
body = options['body'] if 'body' in options else {}
body['publication'] = publication
response = self.client.put('/api/topics/' + self.topic_id + '/publications/' + self.id + '', body, options)
return response
# Remove a saved publication on the associated Topic by Id. Requires authorization of **manage_any_publications**, or **manage_application_publications**.
# '/api/topics/:topic_id/publications/:id' DELETE
#
def delete(self, options = {}):
body = options['body'] if 'body' in options else {}
response = self.client.delete('/api/topics/' + self.topic_id + '/publications/' + self.id + '', body, options)
return response
| mit |
sysalexis/kbengine | kbe/src/lib/python/Lib/test/sortperf.py | 92 | 4805 | """Sort performance test.
See main() for command line syntax.
See tabulate() for output format.
"""
import sys
import time
import random
import marshal
import tempfile
import os
td = tempfile.gettempdir()
def randfloats(n):
"""Return a list of n random floats in [0, 1)."""
# Generating floats is expensive, so this writes them out to a file in
# a temp directory. If the file already exists, it just reads them
# back in and shuffles them a bit.
fn = os.path.join(td, "rr%06d" % n)
try:
fp = open(fn, "rb")
except OSError:
r = random.random
result = [r() for i in range(n)]
try:
try:
fp = open(fn, "wb")
marshal.dump(result, fp)
fp.close()
fp = None
finally:
if fp:
try:
os.unlink(fn)
except OSError:
pass
except OSError as msg:
print("can't write", fn, ":", msg)
else:
result = marshal.load(fp)
fp.close()
# Shuffle it a bit...
for i in range(10):
i = random.randrange(n)
temp = result[:i]
del result[:i]
temp.reverse()
result.extend(temp)
del temp
assert len(result) == n
return result
def flush():
sys.stdout.flush()
def doit(L):
t0 = time.perf_counter()
L.sort()
t1 = time.perf_counter()
print("%6.2f" % (t1-t0), end=' ')
flush()
def tabulate(r):
"""Tabulate sort speed for lists of various sizes.
The sizes are 2**i for i in r (the argument, a list).
The output displays i, 2**i, and the time to sort arrays of 2**i
floating point numbers with the following properties:
*sort: random data
\sort: descending data
/sort: ascending data
3sort: ascending, then 3 random exchanges
+sort: ascending, then 10 random at the end
%sort: ascending, then randomly replace 1% of the elements w/ random values
~sort: many duplicates
=sort: all equal
!sort: worst case scenario
"""
cases = tuple([ch + "sort" for ch in r"*\/3+%~=!"])
fmt = ("%2s %7s" + " %6s"*len(cases))
print(fmt % (("i", "2**i") + cases))
for i in r:
n = 1 << i
L = randfloats(n)
print("%2d %7d" % (i, n), end=' ')
flush()
doit(L) # *sort
L.reverse()
doit(L) # \sort
doit(L) # /sort
# Do 3 random exchanges.
for dummy in range(3):
i1 = random.randrange(n)
i2 = random.randrange(n)
L[i1], L[i2] = L[i2], L[i1]
doit(L) # 3sort
# Replace the last 10 with random floats.
if n >= 10:
L[-10:] = [random.random() for dummy in range(10)]
doit(L) # +sort
# Replace 1% of the elements at random.
for dummy in range(n // 100):
L[random.randrange(n)] = random.random()
doit(L) # %sort
# Arrange for lots of duplicates.
if n > 4:
del L[4:]
L = L * (n // 4)
# Force the elements to be distinct objects, else timings can be
# artificially low.
L = list(map(lambda x: --x, L))
doit(L) # ~sort
del L
# All equal. Again, force the elements to be distinct objects.
L = list(map(abs, [-0.5] * n))
doit(L) # =sort
del L
# This one looks like [3, 2, 1, 0, 0, 1, 2, 3]. It was a bad case
# for an older implementation of quicksort, which used the median
# of the first, last and middle elements as the pivot.
half = n // 2
L = list(range(half - 1, -1, -1))
L.extend(range(half))
# Force to float, so that the timings are comparable. This is
# significantly faster if we leave tham as ints.
L = list(map(float, L))
doit(L) # !sort
print()
def main():
"""Main program when invoked as a script.
One argument: tabulate a single row.
Two arguments: tabulate a range (inclusive).
Extra arguments are used to seed the random generator.
"""
# default range (inclusive)
k1 = 15
k2 = 20
if sys.argv[1:]:
# one argument: single point
k1 = k2 = int(sys.argv[1])
if sys.argv[2:]:
# two arguments: specify range
k2 = int(sys.argv[2])
if sys.argv[3:]:
# derive random seed from remaining arguments
x = 1
for a in sys.argv[3:]:
x = 69069 * x + hash(a)
random.seed(x)
r = range(k1, k2+1) # include the end point
tabulate(r)
if __name__ == '__main__':
main()
| lgpl-3.0 |
kswiat/django | django/utils/module_loading.py | 18 | 6640 | from __future__ import absolute_import # Avoid importing `importlib` from this package.
import copy
from importlib import import_module
import os
import sys
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (
dotted_path, class_name)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
def import_by_path(dotted_path, error_prefix=''):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImproperlyConfigured if something goes wrong.
"""
warnings.warn(
'import_by_path() has been deprecated. Use import_string() instead.',
RemovedInDjango19Warning, stacklevel=2)
try:
attr = import_string(dotted_path)
except ImportError as e:
msg = '%sError importing module %s: "%s"' % (
error_prefix, dotted_path, e)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg),
sys.exc_info()[2])
return attr
def autodiscover_modules(*args, **kwargs):
"""
Auto-discover INSTALLED_APPS modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
You may provide a register_to keyword parameter as a way to access a
registry. This register_to object must have a _registry instance variable
to access it.
"""
from django.apps import apps
register_to = kwargs.get('register_to')
for app_config in apps.get_app_configs():
# Attempt to import the app's module.
try:
if register_to:
before_import_registry = copy.copy(register_to._registry)
for module_to_search in args:
import_module('%s.%s' % (app_config.name, module_to_search))
except:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
# (see #8245).
if register_to:
register_to._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have an admin module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(app_config.module, module_to_search):
raise
if sys.version_info[:2] >= (3, 3):
if sys.version_info[:2] >= (3, 4):
from importlib.util import find_spec as importlib_find
else:
from importlib import find_loader as importlib_find
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
try:
package_name = package.__name__
package_path = package.__path__
except AttributeError:
# package isn't a package.
return False
full_module_name = package_name + '.' + module_name
return importlib_find(full_module_name, package_path) is not None
else:
import imp
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
name = ".".join([package.__name__, module_name])
try:
# None indicates a cached miss; see mark_miss() in Python/import.c.
return sys.modules[name] is not None
except KeyError:
pass
try:
package_path = package.__path__ # No __path__, then not a package.
except AttributeError:
# Since the remainder of this function assumes that we're dealing with
# a package (module with a __path__), so if it's not, then bail here.
return False
for finder in sys.meta_path:
if finder.find_module(name, package_path):
return True
for entry in package_path:
try:
# Try the cached finder.
finder = sys.path_importer_cache[entry]
if finder is None:
# Implicit import machinery should be used.
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
continue
# Else see if the finder knows of a loader.
elif finder.find_module(name):
return True
else:
continue
except KeyError:
# No cached finder, so try and make one.
for hook in sys.path_hooks:
try:
finder = hook(entry)
# XXX Could cache in sys.path_importer_cache
if finder.find_module(name):
return True
else:
# Once a finder is found, stop the search.
break
except ImportError:
# Continue the search for a finder.
continue
else:
# No finder found.
# Try the implicit import machinery if searching a directory.
if os.path.isdir(entry):
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
pass
# XXX Could insert None or NullImporter
else:
# Exhausted the search, so the module cannot be found.
return False
| bsd-3-clause |
paterson/servo | tests/wpt/css-tests/css21_dev/xhtml1/support/fonts/makegsubfonts.py | 820 | 14309 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData() | mpl-2.0 |
blacktear23/django | tests/regressiontests/utils/html.py | 86 | 4753 | import unittest
from django.utils import html
class TestUtilsHtml(unittest.TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&','&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('<f', '<f'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_fix_ampersands(self):
f = html.fix_ampersands
# Strings without ampersands or with ampersands already encoded.
values = ("a", "b", "&a;", "& &x; ", "asdf")
patterns = (
("%s", "%s"),
("&%s", "&%s"),
("&%s&", "&%s&"),
)
for value in values:
for in_pattern, out_pattern in patterns:
self.check_output(f, in_pattern % value, out_pattern % value)
# Strings with ampersands that need encoding.
items = (
("&#;", "&#;"),
("ͫ ;", "&#875 ;"),
("abc;", "&#4abc;"),
)
for value, output in items:
self.check_output(f, value, output)
def test_escapejs(self):
f = html.escapejs
items = (
(u'"double quotes" and \'single quotes\'', u'\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(ur'\ : backslashes, too', u'\\u005C : backslashes, too'),
(u'and lots of whitespace: \r\n\t\v\f\b', u'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'),
(ur'<script>and this</script>', u'\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
(u'paragraph separator:\u2029and line separator:\u2028', u'paragraph separator:\\u2029and line separator:\\u2028'),
)
for value, output in items:
self.check_output(f, value, output)
| bsd-3-clause |
Beyond-Imagination/BlubBlub | ChatbotServer/ChatbotEnv/Lib/site-packages/numpy/polynomial/tests/test_laguerre.py | 58 | 17242 | """Tests for laguerre module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.laguerre as lag
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
L0 = np.array([1])/1
L1 = np.array([1, -1])/1
L2 = np.array([2, -4, 1])/2
L3 = np.array([6, -18, 9, -1])/6
L4 = np.array([24, -96, 72, -16, 1])/24
L5 = np.array([120, -600, 600, -200, 25, -1])/120
L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1])/720
Llist = [L0, L1, L2, L3, L4, L5, L6]
def trim(x):
return lag.lagtrim(x, tol=1e-6)
class TestConstants(TestCase):
def test_lagdomain(self):
assert_equal(lag.lagdomain, [0, 1])
def test_lagzero(self):
assert_equal(lag.lagzero, [0])
def test_lagone(self):
assert_equal(lag.lagone, [1])
def test_lagx(self):
assert_equal(lag.lagx, [1, -1])
class TestArithmetic(TestCase):
x = np.linspace(-3, 3, 100)
def test_lagadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = lag.lagadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_lagsub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = lag.lagsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_lagmulx(self):
assert_equal(lag.lagmulx([0]), [0])
assert_equal(lag.lagmulx([1]), [1, -1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [-i, 2*i + 1, -(i + 1)]
assert_almost_equal(lag.lagmulx(ser), tgt)
def test_lagmul(self):
# check values of result
for i in range(5):
pol1 = [0]*i + [1]
val1 = lag.lagval(self.x, pol1)
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
pol2 = [0]*j + [1]
val2 = lag.lagval(self.x, pol2)
pol3 = lag.lagmul(pol1, pol2)
val3 = lag.lagval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_lagdiv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = lag.lagadd(ci, cj)
quo, rem = lag.lagdiv(tgt, ci)
res = lag.lagadd(lag.lagmul(quo, ci), rem)
assert_almost_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([9., -14., 6.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_lagval(self):
#check empty input
assert_equal(lag.lagval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Llist]
for i in range(7):
msg = "At i=%d" % i
tgt = y[i]
res = lag.lagval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(lag.lagval(x, [1]).shape, dims)
assert_equal(lag.lagval(x, [1, 0]).shape, dims)
assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims)
def test_lagval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = lag.lagval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = lag.lagval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_lagval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = lag.lagval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = lag.lagval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_laggrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = lag.laggrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = lag.laggrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_laggrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = lag.laggrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = lag.laggrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_lagint(self):
# check exceptions
assert_raises(ValueError, lag.lagint, [0], .5)
assert_raises(ValueError, lag.lagint, [0], -1)
assert_raises(ValueError, lag.lagint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = lag.lagint([0], m=i, k=k)
assert_almost_equal(res, [1, -1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
lagpol = lag.poly2lag(pol)
lagint = lag.lagint(lagpol, m=1, k=[i])
res = lag.lag2poly(lagint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
lagpol = lag.poly2lag(pol)
lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(lag.lagval(-1, lagint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
lagpol = lag.poly2lag(pol)
lagint = lag.lagint(lagpol, m=1, k=[i], scl=2)
res = lag.lag2poly(lagint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = lag.lagint(tgt, m=1)
res = lag.lagint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = lag.lagint(tgt, m=1, k=[k])
res = lag.lagint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1)
res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = lag.lagint(tgt, m=1, k=[k], scl=2)
res = lag.lagint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_lagint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([lag.lagint(c) for c in c2d.T]).T
res = lag.lagint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([lag.lagint(c) for c in c2d])
res = lag.lagint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([lag.lagint(c, k=3) for c in c2d])
res = lag.lagint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase):
def test_lagder(self):
# check exceptions
assert_raises(ValueError, lag.lagder, [0], .5)
assert_raises(ValueError, lag.lagder, [0], -1)
# check that zeroth derivative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = lag.lagder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = lag.lagder(lag.lagint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_lagder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([lag.lagder(c) for c in c2d.T]).T
res = lag.lagder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([lag.lagder(c) for c in c2d])
res = lag.lagder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_lagvander(self):
# check for 1d x
x = np.arange(3)
v = lag.lagvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], lag.lagval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = lag.lagvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], lag.lagval(x, coef))
def test_lagvander2d(self):
# also tests lagval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = lag.lagvander2d(x1, x2, [1, 2])
tgt = lag.lagval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = lag.lagvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_lagvander3d(self):
# also tests lagval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = lag.lagvander3d(x1, x2, x3, [1, 2, 3])
tgt = lag.lagval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = lag.lagvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_lagfit(self):
def f(x):
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, lag.lagfit, [1], [1], -1)
assert_raises(TypeError, lag.lagfit, [[1]], [1], 0)
assert_raises(TypeError, lag.lagfit, [], [1], 0)
assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0)
assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0)
assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0)
assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1])
assert_raises(ValueError, lag.lagfit, [1], [1], [-1,])
assert_raises(ValueError, lag.lagfit, [1], [1], [2, -1, 6])
assert_raises(TypeError, lag.lagfit, [1], [1], [])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = lag.lagfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(lag.lagval(x, coef3), y)
coef3 = lag.lagfit(x, y, [0, 1, 2, 3])
assert_equal(len(coef3), 4)
assert_almost_equal(lag.lagval(x, coef3), y)
#
coef4 = lag.lagfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(lag.lagval(x, coef4), y)
coef4 = lag.lagfit(x, y, [0, 1, 2, 3, 4])
assert_equal(len(coef4), 5)
assert_almost_equal(lag.lagval(x, coef4), y)
#
coef2d = lag.lagfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
coef2d = lag.lagfit(x, np.array([y, y]).T, [0, 1, 2, 3])
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = lag.lagfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
wcoef3 = lag.lagfit(x, yw, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(lag.lagfit(x, x, 1), [1, -1])
assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1])
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, lag.lagcompanion, [])
assert_raises(ValueError, lag.lagcompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(lag.lagcompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5)
class TestGauss(TestCase):
def test_100(self):
x, w = lag.laggauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = lag.lagvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = 1.0
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase):
def test_lagfromroots(self):
res = lag.lagfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = lag.lagfromroots(roots)
res = lag.lagval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(lag.lag2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_lagroots(self):
assert_almost_equal(lag.lagroots([1]), [])
assert_almost_equal(lag.lagroots([0, 1]), [1])
for i in range(2, 5):
tgt = np.linspace(0, 3, i)
res = lag.lagroots(lag.lagfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_lagtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, lag.lagtrim, coef, -1)
# Test results
assert_equal(lag.lagtrim(coef), coef[:-1])
assert_equal(lag.lagtrim(coef, 1), coef[:-3])
assert_equal(lag.lagtrim(coef, 2), [0])
def test_lagline(self):
assert_equal(lag.lagline(3, 4), [7, -4])
def test_lag2poly(self):
for i in range(7):
assert_almost_equal(lag.lag2poly([0]*i + [1]), Llist[i])
def test_poly2lag(self):
for i in range(7):
assert_almost_equal(lag.poly2lag(Llist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(0, 10, 11)
tgt = np.exp(-x)
res = lag.lagweight(x)
assert_almost_equal(res, tgt)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
alisaifee/limits | limits/_version.py | 1 | 18450 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "limits-"
cfg.versionfile_source = "limits/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| mit |
Jgarcia-IAS/localizacion | openerp/tools/convert.py | 205 | 41282 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import cStringIO
import csv
import logging
import os.path
import pickle
import re
import sys
# for eval context:
import time
import openerp
import openerp.release
import openerp.workflow
from yaml_import import convert_yaml_import
import assertion_report
_logger = logging.getLogger(__name__)
try:
import pytz
except:
_logger.warning('could not find pytz library, please install it')
class pytzclass(object):
all_timezones=[]
pytz=pytzclass()
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from lxml import etree, builder
import misc
from config import config
from translate import _
# List of etree._Element subclasses that we choose to ignore when parsing XML.
from misc import SKIPPED_ELEMENT_TYPES
from misc import unquote
from openerp import SUPERUSER_ID
# Import of XML records requires the unsafe eval as well,
# almost everywhere, which is ok because it supposedly comes
# from trusted data, but at least we make it obvious now.
unsafe_eval = eval
from safe_eval import safe_eval as eval
class ParseError(Exception):
def __init__(self, msg, text, filename, lineno):
self.msg = msg
self.text = text
self.filename = filename
self.lineno = lineno
def __str__(self):
return '"%s" while parsing %s:%s, near\n%s' \
% (self.msg, self.filename, self.lineno, self.text)
def _ref(self, cr):
return lambda x: self.id_get(cr, x)
def _obj(pool, cr, uid, model_str, context=None):
model = pool[model_str]
return lambda x: model.browse(cr, uid, x, context=context)
def _get_idref(self, cr, uid, model_str, context, idref):
idref2 = dict(idref,
time=time,
DateTime=datetime,
datetime=datetime,
timedelta=timedelta,
relativedelta=relativedelta,
version=openerp.release.major_version,
ref=_ref(self, cr),
pytz=pytz)
if len(model_str):
idref2['obj'] = _obj(self.pool, cr, uid, model_str, context=context)
return idref2
def _fix_multiple_roots(node):
"""
Surround the children of the ``node`` element of an XML field with a
single root "data" element, to prevent having a document with multiple
roots once parsed separately.
XML nodes should have one root only, but we'd like to support
direct multiple roots in our partial documents (like inherited view architectures).
As a convention we'll surround multiple root with a container "data" element, to be
ignored later when parsing.
"""
real_nodes = [x for x in node if not isinstance(x, SKIPPED_ELEMENT_TYPES)]
if len(real_nodes) > 1:
data_node = etree.Element("data")
for child in node:
data_node.append(child)
node.append(data_node)
def _eval_xml(self, node, pool, cr, uid, idref, context=None):
if context is None:
context = {}
if node.tag in ('field','value'):
t = node.get('type','char')
f_model = node.get('model', '').encode('utf-8')
if node.get('search'):
f_search = node.get("search",'').encode('utf-8')
f_use = node.get("use",'id').encode('utf-8')
f_name = node.get("name",'').encode('utf-8')
idref2 = {}
if f_search:
idref2 = _get_idref(self, cr, uid, f_model, context, idref)
q = unsafe_eval(f_search, idref2)
ids = pool[f_model].search(cr, uid, q)
if f_use != 'id':
ids = map(lambda x: x[f_use], pool[f_model].read(cr, uid, ids, [f_use]))
_cols = pool[f_model]._columns
if (f_name in _cols) and _cols[f_name]._type=='many2many':
return ids
f_val = False
if len(ids):
f_val = ids[0]
if isinstance(f_val, tuple):
f_val = f_val[0]
return f_val
a_eval = node.get('eval','')
if a_eval:
idref2 = _get_idref(self, cr, uid, f_model, context, idref)
try:
return unsafe_eval(a_eval, idref2)
except Exception:
logging.getLogger('openerp.tools.convert.init').error(
'Could not eval(%s) for %s in %s', a_eval, node.get('name'), context)
raise
def _process(s, idref):
matches = re.finditer('[^%]%\((.*?)\)[ds]', s)
done = []
for m in matches:
found = m.group()[1:]
if found in done:
continue
done.append(found)
id = m.groups()[0]
if not id in idref:
idref[id] = self.id_get(cr, id)
s = s.replace(found, str(idref[id]))
s = s.replace('%%', '%') # Quite wierd but it's for (somewhat) backward compatibility sake
return s
if t == 'xml':
_fix_multiple_roots(node)
return '<?xml version="1.0"?>\n'\
+_process("".join([etree.tostring(n, encoding='utf-8')
for n in node]), idref)
if t == 'html':
return _process("".join([etree.tostring(n, encoding='utf-8')
for n in node]), idref)
data = node.text
if node.get('file'):
with openerp.tools.file_open(node.get('file'), 'rb') as f:
data = f.read()
if t == 'file':
from ..modules import module
path = data.strip()
if not module.get_module_resource(self.module, path):
raise IOError("No such file or directory: '%s' in %s" % (
path, self.module))
return '%s,%s' % (self.module, path)
if t == 'char':
return data
if t == 'base64':
return data.encode('base64')
if t == 'int':
d = data.strip()
if d == 'None':
return None
return int(d)
if t == 'float':
return float(data.strip())
if t in ('list','tuple'):
res=[]
for n in node.iterchildren(tag='value'):
res.append(_eval_xml(self,n,pool,cr,uid,idref))
if t=='tuple':
return tuple(res)
return res
elif node.tag == "function":
args = []
a_eval = node.get('eval','')
# FIXME: should probably be exclusive
if a_eval:
idref['ref'] = lambda x: self.id_get(cr, x)
args = unsafe_eval(a_eval, idref)
for n in node:
return_val = _eval_xml(self,n, pool, cr, uid, idref, context)
if return_val is not None:
args.append(return_val)
model = pool[node.get('model', '')]
method = node.get('name')
res = getattr(model, method)(cr, uid, *args)
return res
elif node.tag == "test":
return node.text
escape_re = re.compile(r'(?<!\\)/')
def escape(x):
return x.replace('\\/', '/')
class xml_import(object):
@staticmethod
def nodeattr2bool(node, attr, default=False):
if not node.get(attr):
return default
val = node.get(attr).strip()
if not val:
return default
return val.lower() not in ('0', 'false', 'off')
def isnoupdate(self, data_node=None):
return self.noupdate or (len(data_node) and self.nodeattr2bool(data_node, 'noupdate', False))
def get_context(self, data_node, node, eval_dict):
data_node_context = (len(data_node) and data_node.get('context','').encode('utf8'))
node_context = node.get("context",'').encode('utf8')
context = {}
for ctx in (data_node_context, node_context):
if ctx:
try:
ctx_res = unsafe_eval(ctx, eval_dict)
if isinstance(context, dict):
context.update(ctx_res)
else:
context = ctx_res
except NameError:
# Some contexts contain references that are only valid at runtime at
# client-side, so in that case we keep the original context string
# as it is. We also log it, just in case.
context = ctx
_logger.debug('Context value (%s) for element with id "%s" or its data node does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
ctx, node.get('id','n/a'), exc_info=True)
return context
def get_uid(self, cr, uid, data_node, node):
node_uid = node.get('uid','') or (len(data_node) and data_node.get('uid',''))
if node_uid:
return self.id_get(cr, node_uid)
return uid
def _test_xml_id(self, xml_id):
id = xml_id
if '.' in xml_id:
module, id = xml_id.split('.', 1)
assert '.' not in id, """The ID reference "%s" must contain
maximum one dot. They are used to refer to other modules ID, in the
form: module.record_id""" % (xml_id,)
if module != self.module:
modcnt = self.pool['ir.module.module'].search_count(self.cr, self.uid, ['&', ('name', '=', module), ('state', 'in', ['installed'])])
assert modcnt == 1, """The ID "%s" refers to an uninstalled module""" % (xml_id,)
if len(id) > 64:
_logger.error('id: %s is to long (max: 64)', id)
def _tag_delete(self, cr, rec, data_node=None, mode=None):
d_model = rec.get("model")
d_search = rec.get("search",'').encode('utf-8')
d_id = rec.get("id")
ids = []
if d_search:
idref = _get_idref(self, cr, self.uid, d_model, context={}, idref={})
try:
ids = self.pool[d_model].search(cr, self.uid, unsafe_eval(d_search, idref))
except ValueError:
_logger.warning('Skipping deletion for failed search `%r`', d_search, exc_info=True)
pass
if d_id:
try:
ids.append(self.id_get(cr, d_id))
except ValueError:
# d_id cannot be found. doesn't matter in this case
_logger.warning('Skipping deletion for missing XML ID `%r`', d_id, exc_info=True)
pass
if ids:
self.pool[d_model].unlink(cr, self.uid, ids)
def _remove_ir_values(self, cr, name, value, model):
ir_values_obj = self.pool['ir.values']
ir_value_ids = ir_values_obj.search(cr, self.uid, [('name','=',name),('value','=',value),('model','=',model)])
if ir_value_ids:
ir_values_obj.unlink(cr, self.uid, ir_value_ids)
return True
def _tag_report(self, cr, rec, data_node=None, mode=None):
res = {}
for dest,f in (('name','string'),('model','model'),('report_name','name')):
res[dest] = rec.get(f,'').encode('utf8')
assert res[dest], "Attribute %s of report is empty !" % (f,)
for field,dest in (('rml','report_rml'),('file','report_rml'),('xml','report_xml'),('xsl','report_xsl'),
('attachment','attachment'),('attachment_use','attachment_use'), ('usage','usage'),
('report_type', 'report_type'), ('parser', 'parser')):
if rec.get(field):
res[dest] = rec.get(field).encode('utf8')
if rec.get('auto'):
res['auto'] = eval(rec.get('auto','False'))
if rec.get('sxw'):
sxw_content = misc.file_open(rec.get('sxw')).read()
res['report_sxw_content'] = sxw_content
if rec.get('header'):
res['header'] = eval(rec.get('header','False'))
res['multi'] = rec.get('multi') and eval(rec.get('multi','False'))
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
res['groups_id'] = groups_value
id = self.pool['ir.model.data']._update(cr, self.uid, "ir.actions.report.xml", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
if not rec.get('menu') or eval(rec.get('menu','False')):
keyword = str(rec.get('keyword', 'client_print_multi'))
value = 'ir.actions.report.xml,'+str(id)
replace = rec.get('replace', True)
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', keyword, res['name'], [res['model']], value, replace=replace, isobject=True, xml_id=xml_id)
elif self.mode=='update' and eval(rec.get('menu','False'))==False:
# Special check for report having attribute menu=False on update
value = 'ir.actions.report.xml,'+str(id)
self._remove_ir_values(cr, res['name'], value, res['model'])
return id
def _tag_function(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
context = self.get_context(data_node, rec, {'ref': _ref(self, cr)})
uid = self.get_uid(cr, self.uid, data_node, rec)
_eval_xml(self,rec, self.pool, cr, uid, self.idref, context=context)
return
def _tag_url(self, cr, rec, data_node=None, mode=None):
url = rec.get("url",'').encode('utf8')
target = rec.get("target",'').encode('utf8')
name = rec.get("name",'').encode('utf8')
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
res = {'name': name, 'url': url, 'target':target}
id = self.pool['ir.model.data']._update(cr, self.uid, "ir.actions.act_url", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
def _tag_act_window(self, cr, rec, data_node=None, mode=None):
name = rec.get('name','').encode('utf-8')
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
type = rec.get('type','').encode('utf-8') or 'ir.actions.act_window'
view_id = False
if rec.get('view_id'):
view_id = self.id_get(cr, rec.get('view_id','').encode('utf-8'))
domain = rec.get('domain','').encode('utf-8') or '[]'
res_model = rec.get('res_model','').encode('utf-8')
src_model = rec.get('src_model','').encode('utf-8')
view_type = rec.get('view_type','').encode('utf-8') or 'form'
view_mode = rec.get('view_mode','').encode('utf-8') or 'tree,form'
usage = rec.get('usage','').encode('utf-8')
limit = rec.get('limit','').encode('utf-8')
auto_refresh = rec.get('auto_refresh','').encode('utf-8')
uid = self.uid
# Act_window's 'domain' and 'context' contain mostly literals
# but they can also refer to the variables provided below
# in eval_context, so we need to eval() them before storing.
# Among the context variables, 'active_id' refers to
# the currently selected items in a list view, and only
# takes meaning at runtime on the client side. For this
# reason it must remain a bare variable in domain and context,
# even after eval() at server-side. We use the special 'unquote'
# class to achieve this effect: a string which has itself, unquoted,
# as representation.
active_id = unquote("active_id")
active_ids = unquote("active_ids")
active_model = unquote("active_model")
def ref(str_id):
return self.id_get(cr, str_id)
# Include all locals() in eval_context, for backwards compatibility
eval_context = {
'name': name,
'xml_id': xml_id,
'type': type,
'view_id': view_id,
'domain': domain,
'res_model': res_model,
'src_model': src_model,
'view_type': view_type,
'view_mode': view_mode,
'usage': usage,
'limit': limit,
'auto_refresh': auto_refresh,
'uid' : uid,
'active_id': active_id,
'active_ids': active_ids,
'active_model': active_model,
'ref' : ref,
}
context = self.get_context(data_node, rec, eval_context)
try:
domain = unsafe_eval(domain, eval_context)
except NameError:
# Some domains contain references that are only valid at runtime at
# client-side, so in that case we keep the original domain string
# as it is. We also log it, just in case.
_logger.debug('Domain value (%s) for element with id "%s" does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
domain, xml_id or 'n/a', exc_info=True)
res = {
'name': name,
'type': type,
'view_id': view_id,
'domain': domain,
'context': context,
'res_model': res_model,
'src_model': src_model,
'view_type': view_type,
'view_mode': view_mode,
'usage': usage,
'limit': limit,
'auto_refresh': auto_refresh,
}
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
res['groups_id'] = groups_value
if rec.get('target'):
res['target'] = rec.get('target','')
if rec.get('multi'):
res['multi'] = eval(rec.get('multi', 'False'))
id = self.pool['ir.model.data']._update(cr, self.uid, 'ir.actions.act_window', self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
if src_model:
#keyword = 'client_action_relate'
keyword = rec.get('key2','').encode('utf-8') or 'client_action_relate'
value = 'ir.actions.act_window,'+str(id)
replace = rec.get('replace','') or True
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', keyword, xml_id, [src_model], value, replace=replace, isobject=True, xml_id=xml_id)
# TODO add remove ir.model.data
def _tag_ir_set(self, cr, rec, data_node=None, mode=None):
if self.mode != 'init':
return
res = {}
for field in rec.findall('./field'):
f_name = field.get("name",'').encode('utf-8')
f_val = _eval_xml(self,field,self.pool, cr, self.uid, self.idref)
res[f_name] = f_val
self.pool['ir.model.data'].ir_set(cr, self.uid, res['key'], res['key2'], res['name'], res['models'], res['value'], replace=res.get('replace',True), isobject=res.get('isobject', False), meta=res.get('meta',None))
def _tag_workflow(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
model = rec.get('model').encode('ascii')
w_ref = rec.get('ref')
if w_ref:
id = self.id_get(cr, w_ref)
else:
number_children = len(rec)
assert number_children > 0,\
'You must define a child node if you dont give a ref'
assert number_children == 1,\
'Only one child node is accepted (%d given)' % number_children
id = _eval_xml(self, rec[0], self.pool, cr, self.uid, self.idref)
uid = self.get_uid(cr, self.uid, data_node, rec)
openerp.workflow.trg_validate(
uid, model, id, rec.get('action').encode('ascii'), cr)
#
# Support two types of notation:
# name="Inventory Control/Sending Goods"
# or
# action="action_id"
# parent="parent_id"
#
def _tag_menuitem(self, cr, rec, data_node=None, mode=None):
rec_id = rec.get("id",'').encode('ascii')
self._test_xml_id(rec_id)
m_l = map(escape, escape_re.split(rec.get("name",'').encode('utf8')))
values = {'parent_id': False}
if rec.get('parent', False) is False and len(m_l) > 1:
# No parent attribute specified and the menu name has several menu components,
# try to determine the ID of the parent according to menu path
pid = False
res = None
values['name'] = m_l[-1]
m_l = m_l[:-1] # last part is our name, not a parent
for idx, menu_elem in enumerate(m_l):
if pid:
cr.execute('select id from ir_ui_menu where parent_id=%s and name=%s', (pid, menu_elem))
else:
cr.execute('select id from ir_ui_menu where parent_id is null and name=%s', (menu_elem,))
res = cr.fetchone()
if res:
pid = res[0]
else:
# the menuitem does't exist but we are in branch (not a leaf)
_logger.warning('Warning no ID for submenu %s of menu %s !', menu_elem, str(m_l))
pid = self.pool['ir.ui.menu'].create(cr, self.uid, {'parent_id' : pid, 'name' : menu_elem})
values['parent_id'] = pid
else:
# The parent attribute was specified, if non-empty determine its ID, otherwise
# explicitly make a top-level menu
if rec.get('parent'):
menu_parent_id = self.id_get(cr, rec.get('parent',''))
else:
# we get here with <menuitem parent="">, explicit clear of parent, or
# if no parent attribute at all but menu name is not a menu path
menu_parent_id = False
values = {'parent_id': menu_parent_id}
if rec.get('name'):
values['name'] = rec.get('name')
try:
res = [ self.id_get(cr, rec.get('id','')) ]
except:
res = None
if rec.get('action'):
a_action = rec.get('action','').encode('utf8')
# determine the type of action
action_type, action_id = self.model_id_get(cr, a_action)
action_type = action_type.split('.')[-1] # keep only type part
if not values.get('name') and action_type in ('act_window', 'wizard', 'url', 'client', 'server'):
a_table = 'ir_act_%s' % action_type.replace('act_', '')
cr.execute('select name from "%s" where id=%%s' % a_table, (int(action_id),))
resw = cr.fetchone()
if resw:
values['name'] = resw[0]
if not values.get('name'):
# ensure menu has a name
values['name'] = rec_id or '?'
if rec.get('sequence'):
values['sequence'] = int(rec.get('sequence'))
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
values['groups_id'] = groups_value
pid = self.pool['ir.model.data']._update(cr, self.uid, 'ir.ui.menu', self.module, values, rec_id, noupdate=self.isnoupdate(data_node), mode=self.mode, res_id=res and res[0] or False)
if rec_id and pid:
self.idref[rec_id] = int(pid)
if rec.get('action') and pid:
action = "ir.actions.%s,%d" % (action_type, action_id)
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', 'tree_but_open', 'Menuitem', [('ir.ui.menu', int(pid))], action, True, True, xml_id=rec_id)
return 'ir.ui.menu', pid
def _assert_equals(self, f1, f2, prec=4):
return not round(f1 - f2, prec)
def _tag_assert(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
rec_model = rec.get("model",'').encode('ascii')
model = self.pool[rec_model]
rec_id = rec.get("id",'').encode('ascii')
self._test_xml_id(rec_id)
rec_src = rec.get("search",'').encode('utf8')
rec_src_count = rec.get("count")
rec_string = rec.get("string",'').encode('utf8') or 'unknown'
ids = None
eval_dict = {'ref': _ref(self, cr)}
context = self.get_context(data_node, rec, eval_dict)
uid = self.get_uid(cr, self.uid, data_node, rec)
if rec_id:
ids = [self.id_get(cr, rec_id)]
elif rec_src:
q = unsafe_eval(rec_src, eval_dict)
ids = self.pool[rec_model].search(cr, uid, q, context=context)
if rec_src_count:
count = int(rec_src_count)
if len(ids) != count:
self.assertion_report.record_failure()
msg = 'assertion "%s" failed!\n' \
' Incorrect search count:\n' \
' expected count: %d\n' \
' obtained count: %d\n' \
% (rec_string, count, len(ids))
_logger.error(msg)
return
assert ids is not None,\
'You must give either an id or a search criteria'
ref = _ref(self, cr)
for id in ids:
brrec = model.browse(cr, uid, id, context)
class d(dict):
def __getitem__(self2, key):
if key in brrec:
return brrec[key]
return dict.__getitem__(self2, key)
globals_dict = d()
globals_dict['floatEqual'] = self._assert_equals
globals_dict['ref'] = ref
globals_dict['_ref'] = ref
for test in rec.findall('./test'):
f_expr = test.get("expr",'').encode('utf-8')
expected_value = _eval_xml(self, test, self.pool, cr, uid, self.idref, context=context) or True
expression_value = unsafe_eval(f_expr, globals_dict)
if expression_value != expected_value: # assertion failed
self.assertion_report.record_failure()
msg = 'assertion "%s" failed!\n' \
' xmltag: %s\n' \
' expected value: %r\n' \
' obtained value: %r\n' \
% (rec_string, etree.tostring(test), expected_value, expression_value)
_logger.error(msg)
return
else: # all tests were successful for this assertion tag (no break)
self.assertion_report.record_success()
def _tag_record(self, cr, rec, data_node=None, mode=None):
rec_model = rec.get("model").encode('ascii')
model = self.pool[rec_model]
rec_id = rec.get("id",'').encode('ascii')
rec_context = rec.get("context", None)
if rec_context:
rec_context = unsafe_eval(rec_context)
self._test_xml_id(rec_id)
# in update mode, the record won't be updated if the data node explicitely
# opt-out using @noupdate="1". A second check will be performed in
# ir.model.data#_update() using the record's ir.model.data `noupdate` field.
if self.isnoupdate(data_node) and self.mode != 'init':
# check if the xml record has no id, skip
if not rec_id:
return None
if '.' in rec_id:
module,rec_id2 = rec_id.split('.')
else:
module = self.module
rec_id2 = rec_id
id = self.pool['ir.model.data']._update_dummy(cr, self.uid, rec_model, module, rec_id2)
if id:
# if the resource already exists, don't update it but store
# its database id (can be useful)
self.idref[rec_id] = int(id)
return None
elif not self.nodeattr2bool(rec, 'forcecreate', True):
# if it doesn't exist and we shouldn't create it, skip it
return None
# else create it normally
res = {}
for field in rec.findall('./field'):
#TODO: most of this code is duplicated above (in _eval_xml)...
f_name = field.get("name").encode('utf-8')
f_ref = field.get("ref",'').encode('utf-8')
f_search = field.get("search",'').encode('utf-8')
f_model = field.get("model",'').encode('utf-8')
if not f_model and f_name in model._fields:
f_model = model._fields[f_name].comodel_name
f_use = field.get("use",'').encode('utf-8') or 'id'
f_val = False
if f_search:
q = unsafe_eval(f_search, self.idref)
assert f_model, 'Define an attribute model="..." in your .XML file !'
f_obj = self.pool[f_model]
# browse the objects searched
s = f_obj.browse(cr, self.uid, f_obj.search(cr, self.uid, q))
# column definitions of the "local" object
_fields = self.pool[rec_model]._fields
# if the current field is many2many
if (f_name in _fields) and _fields[f_name].type == 'many2many':
f_val = [(6, 0, map(lambda x: x[f_use], s))]
elif len(s):
# otherwise (we are probably in a many2one field),
# take the first element of the search
f_val = s[0][f_use]
elif f_ref:
if f_name in model._fields and model._fields[f_name].type == 'reference':
val = self.model_id_get(cr, f_ref)
f_val = val[0] + ',' + str(val[1])
else:
f_val = self.id_get(cr, f_ref)
else:
f_val = _eval_xml(self,field, self.pool, cr, self.uid, self.idref)
if f_name in model._fields:
if model._fields[f_name].type == 'integer':
f_val = int(f_val)
res[f_name] = f_val
id = self.pool['ir.model.data']._update(cr, self.uid, rec_model, self.module, res, rec_id or False, not self.isnoupdate(data_node), noupdate=self.isnoupdate(data_node), mode=self.mode, context=rec_context )
if rec_id:
self.idref[rec_id] = int(id)
if config.get('import_partial'):
cr.commit()
return rec_model, id
def _tag_template(self, cr, el, data_node=None, mode=None):
# This helper transforms a <template> element into a <record> and forwards it
tpl_id = el.get('id', el.get('t-name', '')).encode('ascii')
full_tpl_id = tpl_id
if '.' not in full_tpl_id:
full_tpl_id = '%s.%s' % (self.module, tpl_id)
# set the full template name for qweb <module>.<id>
if not el.get('inherit_id'):
el.set('t-name', full_tpl_id)
el.tag = 't'
else:
el.tag = 'data'
el.attrib.pop('id', None)
record_attrs = {
'id': tpl_id,
'model': 'ir.ui.view',
}
for att in ['forcecreate', 'context']:
if att in el.keys():
record_attrs[att] = el.attrib.pop(att)
Field = builder.E.field
name = el.get('name', tpl_id)
record = etree.Element('record', attrib=record_attrs)
record.append(Field(name, name='name'))
record.append(Field("qweb", name='type'))
record.append(Field(el.get('priority', "16"), name='priority'))
if 'inherit_id' in el.attrib:
record.append(Field(name='inherit_id', ref=el.get('inherit_id')))
if el.get('active') in ("True", "False"):
view_id = self.id_get(cr, tpl_id, raise_if_not_found=False)
if mode != "update" or not view_id:
record.append(Field(name='active', eval=el.get('active')))
if el.get('customize_show') in ("True", "False"):
record.append(Field(name='customize_show', eval=el.get('customize_show')))
groups = el.attrib.pop('groups', None)
if groups:
grp_lst = map(lambda x: "ref('%s')" % x, groups.split(','))
record.append(Field(name="groups_id", eval="[(6, 0, ["+', '.join(grp_lst)+"])]"))
if el.attrib.pop('page', None) == 'True':
record.append(Field(name="page", eval="True"))
if el.get('primary') == 'True':
# Pseudo clone mode, we'll set the t-name to the full canonical xmlid
el.append(
builder.E.xpath(
builder.E.attribute(full_tpl_id, name='t-name'),
expr=".",
position="attributes",
)
)
record.append(Field('primary', name='mode'))
# inject complete <template> element (after changing node name) into
# the ``arch`` field
record.append(Field(el, name="arch", type="xml"))
return self._tag_record(cr, record, data_node)
def id_get(self, cr, id_str, raise_if_not_found=True):
if id_str in self.idref:
return self.idref[id_str]
res = self.model_id_get(cr, id_str, raise_if_not_found)
if res and len(res)>1: res = res[1]
return res
def model_id_get(self, cr, id_str, raise_if_not_found=True):
model_data_obj = self.pool['ir.model.data']
mod = self.module
if '.' not in id_str:
id_str = '%s.%s' % (mod, id_str)
return model_data_obj.xmlid_to_res_model_res_id(
cr, self.uid, id_str,
raise_if_not_found=raise_if_not_found)
def parse(self, de, mode=None):
if de.tag != 'openerp':
raise Exception("Mismatch xml format: root tag must be `openerp`.")
for n in de.findall('./data'):
for rec in n:
if rec.tag in self._tags:
try:
self._tags[rec.tag](self.cr, rec, n, mode=mode)
except Exception, e:
self.cr.rollback()
exc_info = sys.exc_info()
raise ParseError, (misc.ustr(e), etree.tostring(rec).rstrip(), rec.getroottree().docinfo.URL, rec.sourceline), exc_info[2]
return True
def __init__(self, cr, module, idref, mode, report=None, noupdate=False):
self.mode = mode
self.module = module
self.cr = cr
self.idref = idref
self.pool = openerp.registry(cr.dbname)
self.uid = 1
if report is None:
report = assertion_report.assertion_report()
self.assertion_report = report
self.noupdate = noupdate
self._tags = {
'record': self._tag_record,
'delete': self._tag_delete,
'function': self._tag_function,
'menuitem': self._tag_menuitem,
'template': self._tag_template,
'workflow': self._tag_workflow,
'report': self._tag_report,
'ir_set': self._tag_ir_set,
'act_window': self._tag_act_window,
'url': self._tag_url,
'assert': self._tag_assert,
}
def convert_file(cr, module, filename, idref, mode='update', noupdate=False, kind=None, report=None, pathname=None):
if pathname is None:
pathname = os.path.join(module, filename)
fp = misc.file_open(pathname)
ext = os.path.splitext(filename)[1].lower()
try:
if ext == '.csv':
convert_csv_import(cr, module, pathname, fp.read(), idref, mode, noupdate)
elif ext == '.sql':
convert_sql_import(cr, fp)
elif ext == '.yml':
convert_yaml_import(cr, module, fp, kind, idref, mode, noupdate, report)
elif ext == '.xml':
convert_xml_import(cr, module, fp, idref, mode, noupdate, report)
elif ext == '.js':
pass # .js files are valid but ignored here.
else:
_logger.warning("Can't load unknown file type %s.", filename)
finally:
fp.close()
def convert_sql_import(cr, fp):
queries = fp.read().split(';')
for query in queries:
new_query = ' '.join(query.split())
if new_query:
cr.execute(new_query)
def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
noupdate=False):
'''Import csv file :
quote: "
delimiter: ,
encoding: utf-8'''
if not idref:
idref={}
model = ('.'.join(fname.split('.')[:-1]).split('-'))[0]
#remove folder path from model
head, model = os.path.split(model)
input = cStringIO.StringIO(csvcontent) #FIXME
reader = csv.reader(input, quotechar='"', delimiter=',')
fields = reader.next()
fname_partial = ""
if config.get('import_partial'):
fname_partial = module + '/'+ fname
if not os.path.isfile(config.get('import_partial')):
pickle.dump({}, file(config.get('import_partial'),'w+'))
else:
data = pickle.load(file(config.get('import_partial')))
if fname_partial in data:
if not data[fname_partial]:
return
else:
for i in range(data[fname_partial]):
reader.next()
if not (mode == 'init' or 'id' in fields):
_logger.error("Import specification does not contain 'id' and we are in init mode, Cannot continue.")
return
uid = 1
datas = []
for line in reader:
if not (line and any(line)):
continue
try:
datas.append(map(misc.ustr, line))
except:
_logger.error("Cannot import the line: %s", line)
registry = openerp.registry(cr.dbname)
result, rows, warning_msg, dummy = registry[model].import_data(cr, uid, fields, datas,mode, module, noupdate, filename=fname_partial)
if result < 0:
# Report failed import and abort module install
raise Exception(_('Module loading %s failed: file %s could not be processed:\n %s') % (module, fname, warning_msg))
if config.get('import_partial'):
data = pickle.load(file(config.get('import_partial')))
data[fname_partial] = 0
pickle.dump(data, file(config.get('import_partial'),'wb'))
cr.commit()
#
# xml import/export
#
def convert_xml_import(cr, module, xmlfile, idref=None, mode='init', noupdate=False, report=None):
doc = etree.parse(xmlfile)
relaxng = etree.RelaxNG(
etree.parse(os.path.join(config['root_path'],'import_xml.rng' )))
try:
relaxng.assert_(doc)
except Exception:
_logger.error('The XML file does not fit the required schema !')
_logger.error(misc.ustr(relaxng.error_log.last_error))
raise
if idref is None:
idref={}
obj = xml_import(cr, module, idref, mode, report=report, noupdate=noupdate)
obj.parse(doc.getroot(), mode=mode)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jgonthier/psi4 | tests/pytests/test_qcvars.py | 10 | 8520 | import copy
import pytest
from .utils import *
import numpy as np
import psi4
pytestmark = pytest.mark.quick
_vars_entered = {
'VAR A': 4.0,
'VaR B': -4.0,
'MATVAR A': psi4.core.Matrix.from_array(np.arange(6).reshape(2, 3)),
'MatvaR B': psi4.core.Matrix.from_array(np.arange(3).reshape(1, 3)),
'NPVAR A': np.arange(8).reshape(2, 4),
'NpvaR B': np.arange(4).reshape(1, 4),
}
_vars_stored = {
'VAR A': 4.0,
'VAR B': -4.0,
'MATVAR A': psi4.core.Matrix.from_array(np.arange(6).reshape(2, 3)),
'MATVAR B': psi4.core.Matrix.from_array(np.arange(3).reshape(1, 3)),
'NPVAR A': psi4.core.Matrix.from_array(np.arange(8).reshape(2, 4)),
'NPVAR B': psi4.core.Matrix.from_array(np.arange(4).reshape(1, 4)),
}
@pytest.fixture
def pe_wfn_qcvars():
psi4.core.clean_variables()
he = psi4.geometry('He')
wfn = psi4.core.Wavefunction.build(he, 'cc-pvdz')
for pv, pvv in _vars_entered.items():
psi4.core.set_variable(pv, pvv)
wfn.set_variable(pv, pvv)
return wfn
# can't use compare_dicts with symmetry psi4.Matrix
def _compare_qcvars(ref, expected, decimal, label):
assert set(ref.keys()) == set(expected.keys())
for k, v in ref.items():
if isinstance(v, psi4.core.Matrix):
assert compare_matrices(v, expected[k], decimal, label)
else:
assert compare_values(v, expected[k], decimal, label)
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_variables(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
subject = obj.variables()
_compare_qcvars(_vars_stored, subject, 8, '')
obj.set_variable('npvar A', np.zeros(3).reshape(1, 3))
_compare_qcvars(_vars_stored, subject, 8, '')
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_set_variable_overwrite(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
# fine to overwrite keys
key = 'var D'
val = 3.3
val2 = 4.4
obj.set_variable(key, val)
assert compare_values(val, obj.variable(key), 8, tnm())
obj.set_variable(key, val2)
assert compare_values(val2, obj.variable(key), 8, tnm())
# fine to overwrite array keys
key = 'matvar D'
mat = psi4.core.Matrix.from_array(np.arange(4).reshape(2, 2))
mat2 = psi4.core.Matrix.from_array(np.arange(6).reshape(3, 2))
obj.set_variable(key, mat)
assert compare_matrices(mat, obj.variable(key), 8, tnm())
obj.set_variable(key, mat2)
assert compare_matrices(mat2, obj.variable(key), 8, tnm())
# not fine to shadow keys with both types
with pytest.raises(psi4.ValidationError) as err:
obj.set_variable('vAr D', mat)
assert 'already a scalar variable' in str(err.value)
with pytest.raises(psi4.ValidationError) as err:
obj.set_variable('matvAr D', val)
assert 'already an array variable' in str(err.value)
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_variable_none(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
with pytest.raises(KeyError):
obj.variable('var f')
@pytest.mark.parametrize("mode,key", [
pytest.param('globals', 'vAR B', id='globals scal'),
pytest.param('globals', 'MatvAR B', id='globals mat'),
pytest.param('globals', 'NpvAR B', id='globals np'),
pytest.param('wfn', 'vAR B', id='wfn scal'),
pytest.param('wfn', 'MatvAR B', id='wfn mat'),
pytest.param('wfn', 'NpvAR B', id='wfn np'),
])
def test_variable(mode, key, pe_wfn_qcvars, request):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
if 'scal' in request.node.name:
compare = compare_values
else:
compare = compare_matrices
assert compare(_vars_stored[key.upper()], obj.variable(key), 8, tnm())
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_variable_mem_scal(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
key = 'VaR C'
ref = 3.3
val = 3.3
obj.set_variable(key, val)
assert compare_values(ref, val, 8, tnm())
assert compare_values(ref, obj.variable(key), 8, tnm())
val *= 2
assert compare_values(ref, obj.variable(key), 8, tnm())
accessed = obj.variable(key)
accessed *= 3
assert compare_values(ref, obj.variable(key), 8, tnm())
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_variable_mem_mat(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
key = 'MaTvAr C'
ref = psi4.core.Matrix.from_array(np.arange(4).reshape(2, 2))
val = psi4.core.Matrix.from_array(np.arange(4).reshape(2, 2))
obj.set_variable(key, val)
assert compare_matrices(ref, val, 8, tnm())
assert compare_matrices(ref, obj.variable(key), 8, tnm())
val.scale(2)
assert compare_matrices(ref, obj.variable(key), 8, tnm())
accessed = obj.variable(key)
accessed.scale(3)
assert compare_matrices(ref, obj.variable(key), 8, tnm())
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_variable_mem_np(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
key = 'npVaR C'
ref = np.arange(4).reshape(2, 2)
val = np.arange(4).reshape(2, 2)
obj.set_variable(key, val)
assert compare_arrays(ref, val, 8, tnm())
ref = psi4.core.Matrix.from_array(ref)
assert compare_matrices(ref, obj.variable(key), 8, tnm())
val *= 2
assert compare_matrices(ref, obj.variable(key), 8, tnm())
accessed = obj.variable(key)
accessed.scale(3)
assert compare_matrices(ref, obj.variable(key), 8, tnm())
@pytest.mark.parametrize("mode,tkey,fkey", [
pytest.param('globals', 'var A', 'var C', id='globals scal'),
pytest.param('globals', 'matvar A', 'var C', id='globals mat'),
pytest.param('globals', 'npvar A', 'var C', id='globals np'),
pytest.param('wfn', 'var A', 'var C', id='wfn scal'),
pytest.param('wfn', 'matvar A', 'var C', id='wfn mat'),
pytest.param('wfn', 'npvar A', 'var C', id='wfn np'),
])
def test_has_del_variable_scal(mode, tkey, fkey, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
assert obj.has_variable(tkey)
assert not obj.has_variable(fkey)
obj.del_variable(tkey)
assert not obj.has_variable(tkey)
obj.del_variable(fkey)
# <<< TODO Deprecated! Delete in Psi4 v1.4 >>>
def test_deprecated_core_get_variable(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = psi4.core.get_variable('vAR B')
assert compare_values(_vars_stored['VAR B'], subject, 8, tnm())
def test_deprecated_core_get_variables(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = psi4.core.get_variables()
scals = {k: v for k, v in _vars_stored.items() if k.startswith('VAR ')}
_compare_qcvars(scals, subject, 8, tnm())
def test_deprecated_core_get_array_variable(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = psi4.core.get_array_variable('MatvAR B')
assert compare_matrices(_vars_stored['MATVAR B'], subject, 8, tnm())
def test_deprecated_core_get_array_variables(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = psi4.core.get_array_variables()
arrs = {k: v for k, v in _vars_stored.items() if not k.startswith('VAR ')}
_compare_qcvars(arrs, subject, 8, tnm())
def test_deprecated_wfn_get_variable(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = pe_wfn_qcvars.get_variable('vAR B')
assert compare_values(_vars_stored['VAR B'], subject, 8, tnm())
def test_deprecated_wfn_get_array(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = pe_wfn_qcvars.get_array('MatvAR B')
assert compare_matrices(_vars_stored['MATVAR B'], subject, 8, tnm())
def test_deprecated_wfn_set_array(pe_wfn_qcvars):
mat = psi4.core.Matrix.from_array(np.arange(4).reshape(2, 2))
with pytest.warns(FutureWarning) as err:
pe_wfn_qcvars.set_array('matvar D', mat)
assert compare_matrices(mat, pe_wfn_qcvars.variable('MATvar D'), 8, tnm())
def test_deprecated_wfn_arrays(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = pe_wfn_qcvars.arrays()
arrs = {k: v for k, v in _vars_stored.items() if not k.startswith('VAR ')}
_compare_qcvars(arrs, subject, 8, tnm())
| lgpl-3.0 |
vktr/CouchPotatoServer | couchpotato/core/media/movie/providers/automation/crowdai.py | 12 | 3025 | import re
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'CrowdAI'
class CrowdAI(Automation, RSS):
interval = 1800
def getIMDBids(self):
movies = []
urls = dict(zip(splitString(self.conf('automation_urls')), [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]))
for url in urls:
if not urls[url]:
continue
rss_movies = self.getRSSData(url)
for movie in rss_movies:
description = self.getTextElement(movie, 'description')
grabs = 0
for item in movie:
if item.attrib.get('name') == 'grabs':
grabs = item.attrib.get('value')
break
if int(grabs) > tryInt(self.conf('number_grabs')):
title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1)
log.info2('%s grabs for movie: %s, enqueue...', (grabs, title))
year = re.match(r'.*Year: (\d{4}).*', description).group(1)
imdb = self.search(title, year)
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
return movies
config = [{
'name': 'crowdai',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'crowdai_automation',
'label': 'CrowdAI',
'description': 'Imports from any newznab powered NZB providers RSS feed depending on the number of grabs per movie. Go to your newznab site and find the RSS section. Then copy the copy paste the link under "Movies > x264 feed" here.',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'automation_urls_use',
'label': 'Use',
'default': '1',
},
{
'name': 'automation_urls',
'label': 'url',
'type': 'combined',
'combine': ['automation_urls_use', 'automation_urls'],
'default': 'http://YOUR_PROVIDER/rss?t=THE_MOVIE_CATEGORY&i=YOUR_USER_ID&r=YOUR_API_KEY&res=2&rls=2&num=100',
},
{
'name': 'number_grabs',
'default': '500',
'label': 'Grab threshold',
'description': 'Number of grabs required',
},
],
},
],
}]
| gpl-3.0 |
openmips/stbgui | lib/python/Screens/TimeDateInput.py | 25 | 2498 | from Screen import Screen
from Components.config import ConfigClock, ConfigDateTime, getConfigListEntry
from Components.ActionMap import NumberActionMap
from Components.ConfigList import ConfigListScreen
from Components.Label import Label
from Components.Pixmap import Pixmap
import time
import datetime
class TimeDateInput(Screen, ConfigListScreen):
def __init__(self, session, config_time=None, config_date=None):
Screen.__init__(self, session)
self.setTitle(_("Date/time input"))
self["oktext"] = Label(_("OK"))
self["canceltext"] = Label(_("Cancel"))
self["ok"] = Pixmap()
self["cancel"] = Pixmap()
self.createConfig(config_date, config_time)
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.keySelect,
"save": self.keyGo,
"cancel": self.keyCancel,
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list)
self.createSetup(self["config"])
def createConfig(self, conf_date, conf_time):
self.save_mask = 0
if conf_time:
self.save_mask |= 1
else:
conf_time = ConfigClock(default = time.time()),
if conf_date:
self.save_mask |= 2
else:
conf_date = ConfigDateTime(default = time.time(), formatstring = _("%d.%B %Y"), increment = 86400)
self.timeinput_date = conf_date
self.timeinput_time = conf_time
def createSetup(self, configlist):
self.list = [
getConfigListEntry(_("Date"), self.timeinput_date),
getConfigListEntry(_("Time"), self.timeinput_time)
]
configlist.list = self.list
configlist.l.setList(self.list)
def keyPageDown(self):
sel = self["config"].getCurrent()
if sel and sel[1] == self.timeinput_time:
self.timeinput_time.decrement()
self["config"].invalidateCurrent()
def keyPageUp(self):
sel = self["config"].getCurrent()
if sel and sel[1] == self.timeinput_time:
self.timeinput_time.increment()
self["config"].invalidateCurrent()
def keySelect(self):
self.keyGo()
def getTimestamp(self, date, mytime):
d = time.localtime(date)
dt = datetime.datetime(d.tm_year, d.tm_mon, d.tm_mday, mytime[0], mytime[1])
return int(time.mktime(dt.timetuple()))
def keyGo(self):
time = self.getTimestamp(self.timeinput_date.value, self.timeinput_time.value)
if self.save_mask & 1:
self.timeinput_time.save()
if self.save_mask & 2:
self.timeinput_date.save()
self.close((True, time))
def keyCancel(self):
if self.save_mask & 1:
self.timeinput_time.cancel()
if self.save_mask & 2:
self.timeinput_date.cancel()
self.close((False,))
| gpl-2.0 |
seaotterman/tensorflow | tensorflow/contrib/learn/python/learn/metric_spec_test.py | 136 | 14891 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MetricSpec."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# pylint: disable=g-bad-todo,g-import-not-at-top
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.python.platform import test
class MetricSpecTest(test.TestCase):
def test_named_args_with_weights(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels_ = {"l1": "l1_value", "l2": "l2_value"}
predictions_ = {"p1": "p1_value", "p2": "p2_value"}
def _fn0(predictions, labels, weights=None):
self.assertEqual("p1_value", predictions)
self.assertEqual("l1_value", labels)
self.assertEqual("f2_value", weights)
return "metric_fn_result"
def _fn1(predictions, targets, weights=None):
self.assertEqual("p1_value", predictions)
self.assertEqual("l1_value", targets)
self.assertEqual("f2_value", weights)
return "metric_fn_result"
def _fn2(prediction, label, weight=None):
self.assertEqual("p1_value", prediction)
self.assertEqual("l1_value", label)
self.assertEqual("f2_value", weight)
return "metric_fn_result"
def _fn3(prediction, target, weight=None):
self.assertEqual("p1_value", prediction)
self.assertEqual("l1_value", target)
self.assertEqual("f2_value", weight)
return "metric_fn_result"
for fn in (_fn0, _fn1, _fn2, _fn3):
spec = MetricSpec(
metric_fn=fn, prediction_key="p1", label_key="l1", weight_key="f2")
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_no_args(self):
def _fn():
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(metric_fn=_fn)
with self.assertRaises(TypeError):
spec.create_metric_ops(
{"f1": "f1_value"}, "labels_value", "predictions_value")
def test_kwargs(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(**kwargs):
self.assertEqual({}, kwargs)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
with self.assertRaises(TypeError):
spec.create_metric_ops(features, labels_, predictions_)
def test_named_labels_no_predictions(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(labels):
self.assertEqual(labels_, labels)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
with self.assertRaises(TypeError):
spec.create_metric_ops(features, labels_, predictions_)
def test_named_labels_no_predictions_with_kwargs(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(labels, **kwargs):
self.assertEqual(labels_, labels)
self.assertEqual({}, kwargs)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
with self.assertRaises(TypeError):
spec.create_metric_ops(features, labels_, predictions_)
def test_no_named_predictions_named_labels_first_arg(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(labels, predictions_by_another_name):
self.assertEqual(predictions_, predictions_by_another_name)
self.assertEqual(labels_, labels)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_no_named_predictions_named_labels_second_arg(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(predictions_by_another_name, labels):
self.assertEqual(predictions_, predictions_by_another_name)
self.assertEqual(labels_, labels)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_no_named_labels(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(predictions):
self.assertEqual(predictions_, predictions)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_no_named_labels_or_predictions_1arg(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(a):
self.assertEqual(predictions_, a)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_no_named_labels_or_predictions_2args(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(a, b):
del a, b
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(metric_fn=_fn)
with self.assertRaises(TypeError):
spec.create_metric_ops(features, labels_, predictions_)
def test_named_args_no_weights(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels_ = {"l1": "l1_value", "l2": "l2_value"}
predictions_ = {"p1": "p1_value", "p2": "p2_value"}
def _fn0(predictions, labels):
self.assertEqual("p1_value", predictions)
self.assertEqual("l1_value", labels)
return "metric_fn_result"
def _fn1(predictions, targets):
self.assertEqual("p1_value", predictions)
self.assertEqual("l1_value", targets)
return "metric_fn_result"
def _fn2(prediction, label):
self.assertEqual("p1_value", prediction)
self.assertEqual("l1_value", label)
return "metric_fn_result"
def _fn3(prediction, target):
self.assertEqual("p1_value", prediction)
self.assertEqual("l1_value", target)
return "metric_fn_result"
for fn in (_fn0, _fn1, _fn2, _fn3):
spec = MetricSpec(metric_fn=fn, prediction_key="p1", label_key="l1")
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_predictions_dict_no_key(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels = {"l1": "l1_value", "l2": "l2_value"}
predictions = {"p1": "p1_value", "p2": "p2_value"}
def _fn(predictions, labels, weights=None):
del labels, predictions, weights
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(metric_fn=_fn, label_key="l1", weight_key="f2")
with self.assertRaisesRegexp(
ValueError,
"MetricSpec without specified prediction_key requires predictions"
" tensor or single element dict"):
spec.create_metric_ops(features, labels, predictions)
def test_labels_dict_no_key(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels = {"l1": "l1_value", "l2": "l2_value"}
predictions = {"p1": "p1_value", "p2": "p2_value"}
def _fn(labels, predictions, weights=None):
del labels, predictions, weights
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(metric_fn=_fn, prediction_key="p1", weight_key="f2")
with self.assertRaisesRegexp(
ValueError,
"MetricSpec without specified label_key requires labels tensor or"
" single element dict"):
spec.create_metric_ops(features, labels, predictions)
def test_single_prediction(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels_ = {"l1": "l1_value", "l2": "l2_value"}
predictions_ = "p1_value"
def _fn(predictions, labels, weights=None):
self.assertEqual(predictions_, predictions)
self.assertEqual("l1_value", labels)
self.assertEqual("f2_value", weights)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn, label_key="l1", weight_key="f2")
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_single_label(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels_ = "l1_value"
predictions_ = {"p1": "p1_value", "p2": "p2_value"}
def _fn(predictions, labels, weights=None):
self.assertEqual("p1_value", predictions)
self.assertEqual(labels_, labels)
self.assertEqual("f2_value", weights)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn, prediction_key="p1", weight_key="f2")
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_single_predictions_with_key(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels = {"l1": "l1_value", "l2": "l2_value"}
predictions = "p1_value"
def _fn(predictions, labels, weights=None):
del labels, predictions, weights
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(
metric_fn=_fn, prediction_key="p1", label_key="l1", weight_key="f2")
with self.assertRaisesRegexp(
ValueError,
"MetricSpec with prediction_key specified requires predictions dict"):
spec.create_metric_ops(features, labels, predictions)
def test_single_labels_with_key(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels = "l1_value"
predictions = {"p1": "p1_value", "p2": "p2_value"}
def _fn(predictions, labels, weights=None):
del labels, predictions, weights
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(
metric_fn=_fn, prediction_key="p1", label_key="l1", weight_key="f2")
with self.assertRaisesRegexp(
ValueError, "MetricSpec with label_key specified requires labels dict"):
spec.create_metric_ops(features, labels, predictions)
def test_str(self):
def _metric_fn(labels, predictions, weights=None):
return predictions, labels, weights
string = str(MetricSpec(
metric_fn=_metric_fn,
label_key="my_label",
prediction_key="my_prediction",
weight_key="my_weight"))
self.assertIn("_metric_fn", string)
self.assertIn("my_label", string)
self.assertIn("my_prediction", string)
self.assertIn("my_weight", string)
def test_partial_str(self):
def custom_metric(predictions, labels, stuff, weights=None):
return predictions, labels, weights, stuff
string = str(MetricSpec(
metric_fn=functools.partial(custom_metric, stuff=5),
label_key="my_label",
prediction_key="my_prediction",
weight_key="my_weight"))
self.assertIn("custom_metric", string)
self.assertIn("my_label", string)
self.assertIn("my_prediction", string)
self.assertIn("my_weight", string)
def test_partial(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels = {"l1": "l1_value"}
predictions = {"p1": "p1_value", "p2": "p2_value"}
def custom_metric(predictions, labels, stuff, weights=None):
self.assertEqual("p1_value", predictions)
self.assertEqual("l1_value", labels)
self.assertEqual("f2_value", weights)
if stuff:
return "metric_fn_result"
raise ValueError("No stuff.")
spec = MetricSpec(
metric_fn=functools.partial(custom_metric, stuff=5),
label_key="l1",
prediction_key="p1",
weight_key="f2")
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels, predictions))
spec = MetricSpec(
metric_fn=functools.partial(custom_metric, stuff=None),
prediction_key="p1", label_key="l1", weight_key="f2")
with self.assertRaisesRegexp(ValueError, "No stuff."):
spec.create_metric_ops(features, labels, predictions)
def test_label_key_without_label_arg(self):
def _fn0(predictions, weights=None):
del predictions, weights
self.fail("Expected failure before metric_fn.")
def _fn1(prediction, weight=None):
del prediction, weight
self.fail("Expected failure before metric_fn.")
for fn in (_fn0, _fn1):
with self.assertRaisesRegexp(ValueError, "label.*missing"):
MetricSpec(metric_fn=fn, label_key="l1")
def test_weight_key_without_weight_arg(self):
def _fn0(predictions, labels):
del predictions, labels
self.fail("Expected failure before metric_fn.")
def _fn1(prediction, label):
del prediction, label
self.fail("Expected failure before metric_fn.")
def _fn2(predictions, targets):
del predictions, targets
self.fail("Expected failure before metric_fn.")
def _fn3(prediction, target):
del prediction, target
self.fail("Expected failure before metric_fn.")
for fn in (_fn0, _fn1, _fn2, _fn3):
with self.assertRaisesRegexp(ValueError, "weight.*missing"):
MetricSpec(metric_fn=fn, weight_key="f2")
def test_multiple_label_args(self):
def _fn0(predictions, labels, targets):
del predictions, labels, targets
self.fail("Expected failure before metric_fn.")
def _fn1(prediction, label, target):
del prediction, label, target
self.fail("Expected failure before metric_fn.")
for fn in (_fn0, _fn1):
with self.assertRaisesRegexp(ValueError, "provide only one of.*label"):
MetricSpec(metric_fn=fn)
def test_multiple_prediction_args(self):
def _fn(predictions, prediction, labels):
del predictions, prediction, labels
self.fail("Expected failure before metric_fn.")
with self.assertRaisesRegexp(ValueError, "provide only one of.*prediction"):
MetricSpec(metric_fn=_fn)
def test_multiple_weight_args(self):
def _fn(predictions, labels, weights=None, weight=None):
del predictions, labels, weights, weight
self.fail("Expected failure before metric_fn.")
with self.assertRaisesRegexp(ValueError, "provide only one of.*weight"):
MetricSpec(metric_fn=_fn)
if __name__ == "__main__":
test.main()
| apache-2.0 |
KiChjang/servo | tests/wpt/web-platform-tests/tools/third_party/pywebsocket3/test/testdata/handlers/origin_check_wsh.py | 21 | 1934 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def web_socket_do_extra_handshake(request):
if request.ws_origin == 'http://example.com':
return
raise ValueError('Unacceptable origin: %r' % request.ws_origin)
def web_socket_transfer_data(request):
message = 'origin_check_wsh.py is called for %s, %s' % (
request.ws_resource, request.ws_protocol)
request.connection.write(message.encode('UTF-8'))
# vi:sts=4 sw=4 et
| mpl-2.0 |
lgp171188/cookiecutter | tests/test_generate_copy_without_render.py | 25 | 2238 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_generate_copy_without_render
---------------------------------
"""
from __future__ import unicode_literals
import os
import pytest
from cookiecutter import generate
from cookiecutter import utils
@pytest.fixture(scope='function')
def remove_test_dir(request):
"""
Remove the folder that is created by the test.
"""
def fin_remove_test_dir():
if os.path.exists('test_copy_without_render'):
utils.rmtree('test_copy_without_render')
request.addfinalizer(fin_remove_test_dir)
@pytest.mark.usefixtures('clean_system', 'remove_test_dir')
def test_generate_copy_without_render_extensions():
generate.generate_files(
context={
'cookiecutter': {
'repo_name': 'test_copy_without_render',
'render_test': 'I have been rendered!',
'_copy_without_render': [
'*not-rendered',
'rendered/not_rendered.yml',
'*.txt',
]}
},
repo_dir='tests/test-generate-copy-without-render'
)
dir_contents = os.listdir('test_copy_without_render')
assert '{{cookiecutter.repo_name}}-not-rendered' in dir_contents
assert 'test_copy_without_render-rendered' in dir_contents
with open('test_copy_without_render/README.txt') as f:
assert '{{cookiecutter.render_test}}' in f.read()
with open('test_copy_without_render/README.rst') as f:
assert 'I have been rendered!' in f.read()
with open('test_copy_without_render/'
'test_copy_without_render-rendered/'
'README.txt') as f:
assert '{{cookiecutter.render_test}}' in f.read()
with open('test_copy_without_render/'
'test_copy_without_render-rendered/'
'README.rst') as f:
assert 'I have been rendered' in f.read()
with open('test_copy_without_render/'
'{{cookiecutter.repo_name}}-not-rendered/'
'README.rst') as f:
assert '{{cookiecutter.render_test}}' in f.read()
with open('test_copy_without_render/rendered/not_rendered.yml') as f:
assert '{{cookiecutter.render_test}}' in f.read()
| bsd-3-clause |
h00dy/Diamond | src/collectors/slony/test/testslony.py | 23 | 3624 | #!/usr/bin/python
# coding=utf-8
###############################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import run_only
from mock import patch
from slony import SlonyCollector
def run_only_if_psycopg2_is_available(func):
try:
import psycopg2
except ImportError:
psycopg2 = None
pred = lambda: psycopg2 is not None
return run_only(func, pred)
class TestSlonyCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('SlonyCollector', {})
self.collector = SlonyCollector(config, None)
def test_import(self):
self.assertTrue(SlonyCollector)
@run_only_if_psycopg2_is_available
@patch.object(SlonyCollector, '_get_stats_by_database')
@patch.object(SlonyCollector, 'publish')
def test_default(self, publish, _get_stats_by_database):
_get_stats_by_database.return_value = [('foo', 7)]
self.collector.collect()
_get_stats_by_database.assert_called_with(
'localhost',
5432,
'postgres',
'postgres',
'postgres',
'_postgres',
'Node [0-9]+ - postgres@localhost',
)
self.assertPublished(publish, 'foo', 7)
@run_only_if_psycopg2_is_available
@patch.object(SlonyCollector, '_get_stats_by_database')
@patch.object(SlonyCollector, 'publish')
def test_instances(self, publish, _get_stats_by_database):
def side_effect(host, port, user, pwd, slony_db, slony_schema, node):
if (slony_db, slony_schema) == ('postgres', '_postgres'):
return [('foo', 7)]
elif (slony_db, slony_schema) == ('data', '_data'):
return [('bar', 14)]
_get_stats_by_database.side_effect = side_effect
config = get_collector_config('SlonyCollector', {
'instances': {
'alpha': {
'slony_db': 'postgres',
'slony_schema': '_postgres',
},
'beta': {
'slony_db': 'data',
'slony_schema': '_data',
},
}
})
collector = SlonyCollector(config, None)
collector.collect()
self.assertPublished(publish, 'foo', 7)
self.assertPublished(publish, 'bar', 14)
@run_only_if_psycopg2_is_available
@patch.object(SlonyCollector, '_get_stats_by_database')
def test_override_user_password_nodestr(self, _get_stats_by_database):
config = get_collector_config('SlonyCollector', {
'instances': {
'alpha': {
'slony_db': 'postgres',
'slony_schema': '_postgres',
'user': 'postgres',
'password': 'postgres',
'slony_node_string': '(.*)',
},
'beta': {
'slony_db': 'data',
'slony_schema': '_data',
'user': 'data',
'password': 'data',
'slony_node_string': 'Node (.*)',
},
}
})
collector = SlonyCollector(config, None)
collector.collect()
_get_stats_by_database.assert_any_call(
'localhost', 5432, 'postgres', 'postgres',
'postgres', '_postgres', '(.*)'
)
_get_stats_by_database.assert_any_call(
'localhost', 5432, 'data', 'data',
'data', '_data', 'Node (.*)'
)
| mit |
openstack/rally | tests/unit/test_resources.py | 1 | 1715 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import difflib
import os
import rally
from rally.cli import cliutils
from rally.utils import encodeutils
from tests.unit import test
RES_PATH = os.path.join(os.path.dirname(rally.__file__), os.pardir, "etc")
class BashCompletionTestCase(test.TestCase):
def test_bash_completion(self):
with open(os.path.join(RES_PATH, "rally.bash_completion"), "r") as f:
old = f.read().splitlines()
new = cliutils._generate_bash_completion_script().splitlines()
if old != new:
for line in difflib.unified_diff(old, new):
print(line)
new_filename = "/tmp/rally.bash.new"
with open(new_filename, "wb") as new_file:
new_file.write(encodeutils.safe_encode("\n".join(new)))
self.fail("bash completion script is outdated. "
"New script is located at %s "
"You may fix this by executing "
"`mv %s etc/rally.bash_completion`" % (new_filename,
new_filename))
| apache-2.0 |
mat650/metagoofil | discovery/googlesearch.py | 15 | 1388 | import string
import httplib, sys
import myparser
import re
import time
class search_google:
def __init__(self,word,limit,start,filetype):
self.word=word
self.results=""
self.totalresults=""
self.filetype=filetype
self.server="www.google.com"
self.hostname="www.google.com"
self.userAgent="(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6"
self.quantity="100"
self.limit=limit
self.counter=start
def do_search_files(self):
h = httplib.HTTP(self.server)
h.putrequest('GET', "/search?num="+self.quantity+"&start=" + str(self.counter) + "&hl=en&meta=&q=filetype:"+self.filetype+"%20site:" + self.word)
h.putheader('Host', self.hostname)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, headers = h.getreply()
self.results = h.getfile().read()
self.totalresults+= self.results
def get_emails(self):
rawres=myparser.parser(self.totalresults,self.word)
return rawres.emails()
def get_hostnames(self):
rawres=myparser.parser(self.totalresults,self.word)
return rawres.hostnames()
def get_files(self):
rawres=myparser.parser(self.totalresults,self.word)
return rawres.fileurls()
def process_files(self):
while self.counter < self.limit:
self.do_search_files()
time.sleep(1)
self.counter+=100
print "\tSearching "+ str(self.counter) + " results..."
| gpl-2.0 |
fceller/arangodb | 3rdParty/boost/1.62.0/libs/mpi/test/python/scatter_test.py | 64 | 1300 | # Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
# Use, modification and distribution is subject to the Boost Software
# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test scatter() collective.
import boost.parallel.mpi as mpi
from generators import *
def scatter_test(comm, generator, kind, root):
if comm.rank == root:
print ("Scattering %s from root %d..." % (kind, root)),
if comm.rank == root:
values = list()
for p in range(0, comm.size):
values.append(generator(p))
result = mpi.scatter(comm, values, root = root)
else:
result = mpi.scatter(comm, root = root);
assert result == generator(comm.rank)
if comm.rank == root: print "OK."
return
scatter_test(mpi.world, int_generator, "integers", 0)
scatter_test(mpi.world, int_generator, "integers", 1)
scatter_test(mpi.world, gps_generator, "GPS positions", 0)
scatter_test(mpi.world, gps_generator, "GPS positions", 1)
scatter_test(mpi.world, string_generator, "strings", 0)
scatter_test(mpi.world, string_generator, "strings", 1)
scatter_test(mpi.world, string_list_generator, "list of strings", 0)
scatter_test(mpi.world, string_list_generator, "list of strings", 1)
| apache-2.0 |
drawks/ansible | test/units/module_utils/xenserver/test_xenserverobject.py | 14 | 2121 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, Bojan Vitnik <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from .FakeAnsibleModule import FakeAnsibleModule, ExitJsonException, FailJsonException
from .common import fake_xenapi_ref
def test_xenserverobject_xenapi_lib_detection(mocker, fake_ansible_module, xenserver):
"""Tests XenAPI lib detection code."""
mocker.patch('ansible.module_utils.xenserver.HAS_XENAPI', new=False)
with pytest.raises(FailJsonException) as exc_info:
xenserver.XenServerObject(fake_ansible_module)
assert exc_info.value.kwargs['msg'] == ("XenAPI Python library is required for this module! "
"Please download XenServer SDK and copy XenAPI.py to your Python site-packages. "
"Check Notes section in module documentation for more info.")
def test_xenserverobject_xenapi_failure(mock_xenapi_failure, fake_ansible_module, xenserver):
"""Tests catching of XenAPI failures."""
with pytest.raises(FailJsonException) as exc_info:
xenserver.XenServerObject(fake_ansible_module)
assert exc_info.value.kwargs['msg'] == "XAPI ERROR: %s" % mock_xenapi_failure[1]
def test_xenserverobject(mocker, fake_ansible_module, XenAPI, xenserver):
"""Tests successful creation of XenServerObject."""
mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
mocked_returns = {
"pool.get_all.return_value": [fake_xenapi_ref('pool')],
"pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
"session.get_this_host.return_value": fake_xenapi_ref('host'),
"host.get_software_version.return_value": {"product_version": "7.2.0"},
}
mocked_xenapi.configure_mock(**mocked_returns)
xso = xenserver.XenServerObject(fake_ansible_module)
assert xso.pool_ref == fake_xenapi_ref('pool')
assert xso.xenserver_version == [7, 2, 0]
| gpl-3.0 |
ahachete/gpdb | gpMgmt/bin/gppylib/commands/test/regress/test_regress_pg.py | 54 | 1711 | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
# Unit Testing of pg commands
#
import os
import unittest
import tempfile
from gppylib.db import dbconn
from gppylib.db.test import skipIfDatabaseDown
from gppylib import gplog
from gppylib.commands import pg
from gppylib.gparray import GpArray
logger = gplog.get_default_logger()
gplog.enable_verbose_logging()
@skipIfDatabaseDown()
class PgCommandsTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testReadPostmasterTempFile(self):
logger.info("testReadPostmasterTempFile")
url = dbconn.DbURL()
gpdb = GpArray.initFromCatalog(url)
logger.info("Search for valid master port: %s" % gpdb.master.port)
cmd = pg.ReadPostmasterTempFile.local('test pg tempfile read',gpdb.master.port)
(exists,PID,datadir)=cmd.getResults()
logger.info("exists:=%s PID=%d datadir='%s'" % (exists,PID,datadir))
self.assertTrue(exists)
self.assertTrue(PID > 0)
self.assertEquals(datadir,gpdb.master.datadir)
gpdb.master.port=4000
logger.info("Search for bogus master port: %s" % gpdb.master.port)
cmd = pg.ReadPostmasterTempFile.local('test pg tempfile read',gpdb.master.port)
(exists,PID,datadir)=cmd.getResults()
logger.info("exists:=%s PID=%d datadir='%s'" % (exists,PID,datadir))
self.assertFalse(exists)
#------------------------------- Mainline --------------------------------
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
FrancisLab/bing_image_archiver | iorise_image_extractor.py | 1 | 4682 | import re
import urllib
from HTMLParser import HTMLParser
class BlogAttachmentPageParser(HTMLParser):
"""HTMLParser used to extract the url of Bing images from a Blog Post Attachment Page from www.iorise.com
(e.g.: http://www.iorise.com/blog/?attachment_id=44)"""
def __init__(self, result_list):
""" Constructor: Initialize parser. """
HTMLParser.__init__(self)
self.result_list = result_list
# Regex used to validate the href attribute of a tags
self.href_chk = re.compile('^http://www[.]iorise[.]com/blog/wp-content/uploads/20[0-9]{2}/[01][0-9]/.+[.](jpg|jpeg)$')
def handle_starttag(self, tag, attrs):
""" Method called when the parser encounter a start tag. """
# The url to the image will be in an achor tag
if tag == 'a':
# Check if we are currently at the right a tag
if self.validate_a_tag(attrs):
for attr_name, attr_value in attrs:
if attr_name == 'href':
self.result_list.append(attr_value)
def validate_a_tag(self, attrs):
""" Method called to check if a <a> tag and its attributes correspond to what we're looking for. """
href_ok = False
rel_ok = False
for attribute_name, value in attrs:
# Check the href
if attribute_name == 'href':
if self.href_chk.match(value):
href_ok = True
# Check the rel
elif attribute_name == 'rel':
#if value == 'attachment':
# rel_ok = True
return False
# The tag should not contain any more attributes
else:
return False
return href_ok
class BlogDayPageParser(HTMLParser):
"""HTMLParser used to extract the url of attachment page containing the Bing images from a Day Page from
www.iorise.com (e.g.: http://www.iorise.com/blog/?m=20121125)"""
def __init__(self, result_list):
""" Constructor: Initialize parser. """
HTMLParser.__init__(self)
self.result_list = result_list
# Regex used to validate the href attribute of a tags
self.href_chk = re.compile('^http://www[.]iorise[.]com/(blog/)?[?]attachment_id=[0-9]+$')
self.rel_chk = re.compile('^attachment wp-att-[0-9]+$')
def handle_starttag(self, tag, attrs):
""" Method called when the parser encounter a start tag. """
# The url we are looking for will be in an <a> tag
if tag == 'a':
# Check if we are currently at the right a tag
if self.validate_a_tag(attrs):
for attr_name, attr_value in attrs:
if attr_name == 'href':
self.result_list.append(attr_value)
def validate_a_tag(self, attrs):
""" Method called to check if a <a> tag and its attributes correspond to what we're looking for. """
href_ok = False
rel_ok = False
for attribute_name, value in attrs:
# Check the href
if attribute_name == 'href':
if self.href_chk.match(value):
href_ok = True
# Check the rel
elif attribute_name == 'rel':
if self.rel_chk.match(value):
rel_ok = True
# The tag should not contain any more attributes
else:
return False
return href_ok and rel_ok
def extract_all_image_urls(date_to_extract):
""" Function used to extract all Bing images of the day published on iorise between the two provided dates. """
url = "http://www.iorise.com/blog/?m={year}{month:02}{day:02}".format(year=date_to_extract.year,
month=date_to_extract.month,
day=date_to_extract.day)
try:
page = urllib.urlopen(url)
except:
return []
# Extract attachment pages from day page
attachment_pages_url = []
day_page_parser = BlogDayPageParser(attachment_pages_url)
day_page_parser.feed(page.read().decode('UTF-8'))
all_image_urls = []
# For each attachment page, extract the image urls
for page_url in attachment_pages_url:
try:
attachment_page = urllib.urlopen(page_url)
except:
continue
image_urls = []
parser = BlogAttachmentPageParser(image_urls)
parser.feed(attachment_page.read().decode('UTF-8'))
all_image_urls += image_urls
return all_image_urls
| mit |
Distrotech/intellij-community | python/testData/inspections/PyTypeCheckerInspection/Generator.py | 25 | 3013 | def test():
def gen(n):
for x in xrange(n):
yield str(x)
def f_1(xs):
"""
:type xs: list of int
"""
return xs
def f_2(xs):
"""
:type xs: collections.Sequence of int
"""
return xs
def f_3(xs):
"""
:type xs: collections.Container of int
"""
return xs
def f_4(xs):
"""
:type xs: collections.Iterator of int
"""
return xs
def f_5(xs):
"""
:type xs: collections.Iterable of int
"""
return xs
def f_6(xs):
"""
:type xs: list
"""
return xs
def f_7(xs):
"""
:type xs: collections.Sequence
"""
return xs
def f_8(xs):
"""
:type xs: collections.Container
"""
return xs
def f_9(xs):
"""
:type xs: collections.Iterator
"""
return xs
def f_10(xs):
"""
:type xs: collections.Iterable
"""
return xs
def f_11(xs):
"""
:type xs: list of string
"""
return xs
def f_12(xs):
"""
:type xs: collections.Sequence of string
"""
return xs
def f_13(xs):
"""
:type xs: collections.Container of string
"""
return xs
def f_14(xs):
"""
:type xs: collections.Iterator of string
"""
return xs
def f_15(xs):
"""
:type xs: collections.Iterable of string
"""
return xs
return [
''.join(gen(10)),
f_1(<warning descr="Expected type 'list[int]', got '__generator[str]' instead">gen(11)</warning>),
f_2(<warning descr="Expected type 'Sequence[int]', got '__generator[str]' instead">gen(11)</warning>),
f_3(<warning descr="Expected type 'Container[int]', got '__generator[str]' instead">gen(11)</warning>),
f_4(<warning descr="Expected type 'Iterator[int]', got '__generator[str]' instead">gen(11)</warning>),
f_5(<warning descr="Expected type 'Iterable[int]', got '__generator[str]' instead">gen(11)</warning>),
f_6(<warning descr="Expected type 'list', got '__generator[str]' instead">gen(11)</warning>),
f_7(<warning descr="Expected type 'Sequence', got '__generator[str]' instead">gen(11)</warning>),
f_8(<warning descr="Expected type 'Container', got '__generator[str]' instead">gen(11)</warning>),
f_9(gen(11)),
f_10(gen(11)),
f_11(<warning descr="Expected type 'list[Union[str, unicode]]', got '__generator[str]' instead">gen(11)</warning>),
f_12(<warning descr="Expected type 'Sequence[Union[str, unicode]]', got '__generator[str]' instead">gen(11)</warning>),
f_13(<warning descr="Expected type 'Container[Union[str, unicode]]', got '__generator[str]' instead">gen(11)</warning>),
f_14(gen(11)),
f_15(gen(11)),
f_15('foo'.split('o')),
]
| apache-2.0 |
gbourdin/charlas | ml-notebook-to-prod/examples/mnistExample/mnistapi/mnistapi/__main__.py | 1 | 1833 | import click
from os import cpu_count, environ, execvp
from sys import prefix
from .app import create_app
@click.group()
def cli():
pass
@cli.command()
def run_uwsgi():
"""
Run API through uwsgi server.
"""
# avoid fork problems with tensorflow (and other non-serializable objects)
environ['UWSGI_LAZY_APPS'] = '1'
# explicit http timeout
environ['UWSGI_HTTP_TIMEOUT'] = '60'
# how many uwsgi workers (processes)
environ['UWSGI_WORKERS'] = '{}'.format(cpu_count())
# create one thread for every process, since you're probably cpu-bound
environ['UWSGI_THREADS'] = '1'
# load the mmanager WSGI handler
environ['UWSGI_MODULE'] = 'mnistapi.wsgi:app'
# bind the http server, DO NOT USE UWSGI_HTTP_SOCKET!!!
environ['UWSGI_HTTP'] = ':8081'
# remove sockets/pidfile at exit
environ['UWSGI_VACUUM'] = '1'
# retrieve/set the PythonHome
environ['UWSGI_PYHOME'] = prefix
# increase buffer size a bit
environ['UWSGI_BUFFER_SIZE'] = '8192'
# enable the master process
environ['UWSGI_MASTER'] = '1'
# disable ping logging
environ['UWSGI_ROUTE'] = '^/ping donotlog:'
# keep connection from balancer alive
environ['UWSGI_HTTP_KEEPALIVE'] = '1'
# slower but safe
environ['UWSGI_THUNDER_LOCK'] = '1'
# do not log every request, it's slow and verbose
environ['UWSGI_DISABLE_LOGGING'] = '1'
# close uwsgi if something goes wrong, otherwise uwsgi starts with no app
environ['UWSGI_NEED_APP'] = '1'
# exec the uwsgi binary
execvp('uwsgi', ('uwsgi',))
@cli.command()
@click.option('-h', '--host', 'host', default='localhost')
def run_server(host):
app = create_app()
app.run(debug=True, host=host, port=8080, threaded=False, processes=1, use_reloader=False)
if __name__ == '__main__':
cli()
| gpl-2.0 |
Walesson/angular2-movie-app | node_modules/node-forge/tests/forge_ssl/forge/ssl.py | 169 | 16598 | # Wrapper module for _ssl, providing some additional facilities
# implemented in Python. Written by Bill Janssen.
"""\
This module provides some more Pythonic support for SSL.
Object types:
SSLSocket -- subtype of socket.socket which does SSL over the socket
Exceptions:
SSLError -- exception raised for I/O errors
Functions:
cert_time_to_seconds -- convert time string used for certificate
notBefore and notAfter functions to integer
seconds past the Epoch (the time values
returned from time.time())
fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
by the server running on HOST at port PORT. No
validation of the certificate is performed.
Integer constants:
SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE
SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_SYSCALL
SSL_ERROR_SSL
SSL_ERROR_WANT_CONNECT
SSL_ERROR_EOF
SSL_ERROR_INVALID_ERROR_CODE
The following group define certificate requirements that one side is
allowing/requiring from the other side:
CERT_NONE - no certificates from the other side are required (or will
be looked at if provided)
CERT_OPTIONAL - certificates are not required, but if provided will be
validated, and if validation fails, the connection will
also fail
CERT_REQUIRED - certificates are required, and will be validated, and
if validation fails, the connection will also fail
The following constants identify various SSL protocol variants:
PROTOCOL_SSLv2
PROTOCOL_SSLv3
PROTOCOL_SSLv23
PROTOCOL_TLSv1
The following constants identify various SSL session caching modes:
SESS_CACHE_OFF
SESS_CACHE_CLIENT
SESS_CACHE_SERVER
SESS_CACHE_BOTH
"""
import textwrap
import _forge_ssl # if we can't import it, let the error propagate
from _forge_ssl import SSLError
from _forge_ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _forge_ssl import PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
from _forge_ssl import SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH
from _forge_ssl import RAND_status, RAND_egd, RAND_add
from _forge_ssl import \
SSL_ERROR_ZERO_RETURN, \
SSL_ERROR_WANT_READ, \
SSL_ERROR_WANT_WRITE, \
SSL_ERROR_WANT_X509_LOOKUP, \
SSL_ERROR_SYSCALL, \
SSL_ERROR_SSL, \
SSL_ERROR_WANT_CONNECT, \
SSL_ERROR_EOF, \
SSL_ERROR_INVALID_ERROR_CODE
from socket import socket, _fileobject, _delegate_methods
from socket import error as socket_error
from socket import getnameinfo as _getnameinfo
import base64 # for DER-to-PEM translation
import errno
class SSLSocket(socket):
"""This class implements a subtype of socket.socket that wraps
the underlying OS socket in an SSL context when necessary, and
provides read and write methods over that channel."""
def __init__(self, parent_socket, sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23,
sess_cache_mode=SESS_CACHE_SERVER,
sess_id_ctx=None,
ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True):
socket.__init__(self, _sock=sock._sock)
# The initializer for socket overrides the methods send(), recv(), etc.
# in the instancce, which we don't need -- but we want to provide the
# methods defined in SSLSocket.
for attr in _delegate_methods:
try:
delattr(self, attr)
except AttributeError:
pass
if certfile and not keyfile:
keyfile = certfile
create = True
connected = False
if not server_side:
# see if it's connected
try:
socket.getpeername(self)
connected = True
except socket_error, e:
if e.errno != errno.ENOTCONN:
raise
# no, no connection yet
self._sslobj = None
create = False
if create:
# yes, create the SSL object
if parent_socket == None:
self._sslobj = _forge_ssl.sslwrap(
self._sock,
server_side,
keyfile, certfile,
cert_reqs, ssl_version,
sess_cache_mode, sess_id_ctx,
ca_certs)
else:
self._sslobj = parent_socket._sslobj.wrap_accepted(self._sock)
if connected and do_handshake_on_connect:
self.do_handshake()
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.sess_cache_mode = sess_cache_mode
self.sess_id_ctx = sess_id_ctx
self.ca_certs = ca_certs
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def read(self, len=1024):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
try:
return self._sslobj.read(len)
except SSLError, x:
if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
return ''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
return self._sslobj.peer_certificate(binary_form)
def cipher(self):
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def send(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
while True:
try:
v = self._sslobj.write(data)
except SSLError, x:
if x.args[0] == SSL_ERROR_WANT_READ:
return 0
elif x.args[0] == SSL_ERROR_WANT_WRITE:
return 0
else:
raise
else:
return v
else:
return socket.send(self, data, flags)
def sendto(self, data, addr, flags=0):
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
else:
return socket.sendto(self, data, addr, flags)
def sendall(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv(self, buflen=1024, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on %s" %
self.__class__)
return self.read(buflen)
else:
return socket.recv(self, buflen, flags)
def recv_into(self, buffer, nbytes=None, flags=0):
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s" %
self.__class__)
tmp_buffer = self.read(nbytes)
v = len(tmp_buffer)
buffer[:v] = tmp_buffer
return v
else:
return socket.recv_into(self, buffer, nbytes, flags)
def recvfrom(self, addr, buflen=1024, flags=0):
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom(self, addr, buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
if self._sslobj:
raise ValueError("recvfrom_into not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom_into(self, buffer, nbytes, flags)
def pending(self):
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def unwrap(self):
if self._sslobj:
try:
# if connected then shutdown
self.getpeername()
s = self._sslobj.shutdown()
except:
s = self._sock
self._sslobj = None
return s
else:
raise ValueError("No SSL wrapper around " + str(self))
def shutdown(self, how):
self._sslobj = None
socket.shutdown(self, how)
def close(self):
if self._makefile_refs < 1:
if self._sslobj:
self.unwrap()
socket.close(self)
else:
self._makefile_refs -= 1
def do_handshake(self):
"""Perform a TLS/SSL handshake."""
self._sslobj.do_handshake()
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._sslobj:
raise ValueError("attempt to connect already-connected SSLSocket!")
socket.connect(self, addr)
self._sslobj = _forge_ssl.sslwrap(self._sock, False,
self.keyfile, self.certfile,
self.cert_reqs, self.ssl_version,
self.sess_cache_mode,
self.sess_id_ctx,
self.ca_certs)
if self.do_handshake_on_connect:
self.do_handshake()
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
return (SSLSocket(self,
newsock,
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True,
cert_reqs=self.cert_reqs,
ssl_version=self.ssl_version,
sess_cache_mode=self.sess_cache_mode,
sess_id_ctx=self.sess_id_ctx,
ca_certs=self.ca_certs,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs),
addr)
def makefile(self, mode='r', bufsize=-1):
"""Make and return a file-like object that
works with the SSL connection. Just use the code
from the socket module."""
self._makefile_refs += 1
# close=True so as to decrement the reference count when done with
# the file-like object.
return _fileobject(self, mode, bufsize, close=True)
def wrap_socket(sock, parent_socket=None, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23,
sess_cache_mode=SESS_CACHE_SERVER,
sess_id_ctx=None,
ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True):
return SSLSocket(parent_socket,
sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ssl_version=ssl_version,
sess_cache_mode=sess_cache_mode,
sess_id_ctx=sess_id_ctx,
ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs)
# some utility functions
def cert_time_to_seconds(cert_time):
"""Takes a date-time string in standard ASN1_print form
("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return
a Python time value in seconds past the epoch."""
import time
return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT"))
PEM_HEADER = "-----BEGIN CERTIFICATE-----"
PEM_FOOTER = "-----END CERTIFICATE-----"
def DER_cert_to_PEM_cert(der_cert_bytes):
"""Takes a certificate in binary DER format and returns the
PEM version of it as a string."""
if hasattr(base64, 'standard_b64encode'):
# preferred because older API gets line-length wrong
f = base64.standard_b64encode(der_cert_bytes)
return (PEM_HEADER + '\n' +
textwrap.fill(f, 64) + '\n' +
PEM_FOOTER + '\n')
else:
return (PEM_HEADER + '\n' +
base64.encodestring(der_cert_bytes) +
PEM_FOOTER + '\n')
def PEM_cert_to_DER_cert(pem_cert_string):
"""Takes a certificate in ASCII PEM format and returns the
DER-encoded version of it as a byte sequence"""
if not pem_cert_string.startswith(PEM_HEADER):
raise ValueError("Invalid PEM encoding; must start with %s"
% PEM_HEADER)
if not pem_cert_string.strip().endswith(PEM_FOOTER):
raise ValueError("Invalid PEM encoding; must end with %s"
% PEM_FOOTER)
d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
return base64.decodestring(d)
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if (ca_certs is not None):
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
s = wrap_socket(socket(), ssl_version=ssl_version,
cert_reqs=cert_reqs, ca_certs=ca_certs)
s.connect(addr)
dercert = s.getpeercert(True)
s.close()
return DER_cert_to_PEM_cert(dercert)
def get_protocol_name(protocol_code):
if protocol_code == PROTOCOL_TLSv1:
return "TLSv1"
elif protocol_code == PROTOCOL_SSLv23:
return "SSLv23"
elif protocol_code == PROTOCOL_SSLv2:
return "SSLv2"
elif protocol_code == PROTOCOL_SSLv3:
return "SSLv3"
else:
return "<unknown>"
# a replacement for the old socket.ssl function
def sslwrap_simple(sock, keyfile=None, certfile=None):
"""A replacement for the old socket.ssl function. Designed
for compability with Python 2.5 and earlier. Will disappear in
Python 3.0."""
if hasattr(sock, "_sock"):
sock = sock._sock
ssl_sock = _forge_ssl.sslwrap(sock, 0, keyfile, certfile,
CERT_NONE, PROTOCOL_SSLv23,
SESS_CACHE_SERVER, None, None)
try:
sock.getpeername()
except:
# no, no connection yet
pass
else:
# yes, do the handshake
ssl_sock.do_handshake()
return ssl_sock
| gpl-3.0 |
aristotle-tek/cuny-bdif | AWS/ec2/lib/boto-2.34.0/tests/unit/vpc/test_routetable.py | 64 | 20068 | from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, RouteTable
class TestDescribeRouteTables(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeRouteTablesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>6f570b0b-9c18-4b07-bdec-73740dcf861a</requestId>
<routeTableSet>
<item>
<routeTableId>rtb-13ad487a</routeTableId>
<vpcId>vpc-11ad4878</vpcId>
<routeSet>
<item>
<destinationCidrBlock>10.0.0.0/22</destinationCidrBlock>
<gatewayId>local</gatewayId>
<state>active</state>
<origin>CreateRouteTable</origin>
</item>
</routeSet>
<associationSet>
<item>
<routeTableAssociationId>rtbassoc-12ad487b</routeTableAssociationId>
<routeTableId>rtb-13ad487a</routeTableId>
<main>true</main>
</item>
</associationSet>
<tagSet/>
</item>
<item>
<routeTableId>rtb-f9ad4890</routeTableId>
<vpcId>vpc-11ad4878</vpcId>
<routeSet>
<item>
<destinationCidrBlock>10.0.0.0/22</destinationCidrBlock>
<gatewayId>local</gatewayId>
<state>active</state>
<origin>CreateRouteTable</origin>
</item>
<item>
<destinationCidrBlock>0.0.0.0/0</destinationCidrBlock>
<gatewayId>igw-eaad4883</gatewayId>
<state>active</state>
</item>
<item>
<destinationCidrBlock>10.0.0.0/21</destinationCidrBlock>
<networkInterfaceId>eni-884ec1d1</networkInterfaceId>
<state>blackhole</state>
<origin>CreateRoute</origin>
</item>
<item>
<destinationCidrBlock>11.0.0.0/22</destinationCidrBlock>
<vpcPeeringConnectionId>pcx-efc52b86</vpcPeeringConnectionId>
<state>blackhole</state>
<origin>CreateRoute</origin>
</item>
</routeSet>
<associationSet>
<item>
<routeTableAssociationId>rtbassoc-faad4893</routeTableAssociationId>
<routeTableId>rtb-f9ad4890</routeTableId>
<subnetId>subnet-15ad487c</subnetId>
</item>
</associationSet>
<tagSet/>
</item>
</routeTableSet>
</DescribeRouteTablesResponse>
"""
def test_get_all_route_tables(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_route_tables(
['rtb-13ad487a', 'rtb-f9ad4890'], filters=[('route.state', 'active')])
self.assert_request_parameters({
'Action': 'DescribeRouteTables',
'RouteTableId.1': 'rtb-13ad487a',
'RouteTableId.2': 'rtb-f9ad4890',
'Filter.1.Name': 'route.state',
'Filter.1.Value.1': 'active'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(len(api_response), 2)
self.assertIsInstance(api_response[0], RouteTable)
self.assertEquals(api_response[0].id, 'rtb-13ad487a')
self.assertEquals(len(api_response[0].routes), 1)
self.assertEquals(api_response[0].routes[0].destination_cidr_block, '10.0.0.0/22')
self.assertEquals(api_response[0].routes[0].gateway_id, 'local')
self.assertEquals(api_response[0].routes[0].state, 'active')
self.assertEquals(len(api_response[0].associations), 1)
self.assertEquals(api_response[0].associations[0].id, 'rtbassoc-12ad487b')
self.assertEquals(api_response[0].associations[0].route_table_id, 'rtb-13ad487a')
self.assertIsNone(api_response[0].associations[0].subnet_id)
self.assertEquals(api_response[0].associations[0].main, True)
self.assertEquals(api_response[1].id, 'rtb-f9ad4890')
self.assertEquals(len(api_response[1].routes), 4)
self.assertEquals(api_response[1].routes[0].destination_cidr_block, '10.0.0.0/22')
self.assertEquals(api_response[1].routes[0].gateway_id, 'local')
self.assertEquals(api_response[1].routes[0].state, 'active')
self.assertEquals(api_response[1].routes[1].destination_cidr_block, '0.0.0.0/0')
self.assertEquals(api_response[1].routes[1].gateway_id, 'igw-eaad4883')
self.assertEquals(api_response[1].routes[1].state, 'active')
self.assertEquals(api_response[1].routes[2].destination_cidr_block, '10.0.0.0/21')
self.assertEquals(api_response[1].routes[2].interface_id, 'eni-884ec1d1')
self.assertEquals(api_response[1].routes[2].state, 'blackhole')
self.assertEquals(api_response[1].routes[3].destination_cidr_block, '11.0.0.0/22')
self.assertEquals(api_response[1].routes[3].vpc_peering_connection_id, 'pcx-efc52b86')
self.assertEquals(api_response[1].routes[3].state, 'blackhole')
self.assertEquals(len(api_response[1].associations), 1)
self.assertEquals(api_response[1].associations[0].id, 'rtbassoc-faad4893')
self.assertEquals(api_response[1].associations[0].route_table_id, 'rtb-f9ad4890')
self.assertEquals(api_response[1].associations[0].subnet_id, 'subnet-15ad487c')
self.assertEquals(api_response[1].associations[0].main, False)
class TestAssociateRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<AssociateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<associationId>rtbassoc-f8ad4891</associationId>
</AssociateRouteTableResponse>
"""
def test_associate_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.associate_route_table(
'rtb-e4ad488d', 'subnet-15ad487c')
self.assert_request_parameters({
'Action': 'AssociateRouteTable',
'RouteTableId': 'rtb-e4ad488d',
'SubnetId': 'subnet-15ad487c'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, 'rtbassoc-f8ad4891')
class TestDisassociateRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DisassociateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DisassociateRouteTableResponse>
"""
def test_disassociate_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.disassociate_route_table('rtbassoc-fdad4894')
self.assert_request_parameters({
'Action': 'DisassociateRouteTable',
'AssociationId': 'rtbassoc-fdad4894'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestCreateRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<routeTable>
<routeTableId>rtb-f9ad4890</routeTableId>
<vpcId>vpc-11ad4878</vpcId>
<routeSet>
<item>
<destinationCidrBlock>10.0.0.0/22</destinationCidrBlock>
<gatewayId>local</gatewayId>
<state>active</state>
</item>
</routeSet>
<associationSet/>
<tagSet/>
</routeTable>
</CreateRouteTableResponse>
"""
def test_create_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route_table('vpc-11ad4878')
self.assert_request_parameters({
'Action': 'CreateRouteTable',
'VpcId': 'vpc-11ad4878'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, RouteTable)
self.assertEquals(api_response.id, 'rtb-f9ad4890')
self.assertEquals(len(api_response.routes), 1)
self.assertEquals(api_response.routes[0].destination_cidr_block, '10.0.0.0/22')
self.assertEquals(api_response.routes[0].gateway_id, 'local')
self.assertEquals(api_response.routes[0].state, 'active')
class TestDeleteRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteRouteTableResponse>
"""
def test_delete_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_route_table('rtb-e4ad488d')
self.assert_request_parameters({
'Action': 'DeleteRouteTable',
'RouteTableId': 'rtb-e4ad488d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestReplaceRouteTableAssociation(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<ReplaceRouteTableAssociationResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<newAssociationId>rtbassoc-faad4893</newAssociationId>
</ReplaceRouteTableAssociationResponse>
"""
def test_replace_route_table_assocation(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route_table_assocation(
'rtbassoc-faad4893', 'rtb-f9ad4890')
self.assert_request_parameters({
'Action': 'ReplaceRouteTableAssociation',
'AssociationId': 'rtbassoc-faad4893',
'RouteTableId': 'rtb-f9ad4890'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_table_association_with_assoc(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route_table_association_with_assoc(
'rtbassoc-faad4893', 'rtb-f9ad4890')
self.assert_request_parameters({
'Action': 'ReplaceRouteTableAssociation',
'AssociationId': 'rtbassoc-faad4893',
'RouteTableId': 'rtb-f9ad4890'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, 'rtbassoc-faad4893')
class TestCreateRoute(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</CreateRouteResponse>
"""
def test_create_route_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-e4ad488d', '0.0.0.0/0', gateway_id='igw-eaad4883')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-e4ad488d',
'DestinationCidrBlock': '0.0.0.0/0',
'GatewayId': 'igw-eaad4883'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_create_route_instance(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-g8ff4ea2', '0.0.0.0/0', instance_id='i-1a2b3c4d')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'InstanceId': 'i-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_create_route_interface(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-g8ff4ea2', '0.0.0.0/0', interface_id='eni-1a2b3c4d')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'NetworkInterfaceId': 'eni-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_create_route_vpc_peering_connection(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-g8ff4ea2', '0.0.0.0/0', vpc_peering_connection_id='pcx-1a2b3c4d')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'VpcPeeringConnectionId': 'pcx-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestReplaceRoute(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</CreateRouteResponse>
"""
def test_replace_route_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-e4ad488d', '0.0.0.0/0', gateway_id='igw-eaad4883')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-e4ad488d',
'DestinationCidrBlock': '0.0.0.0/0',
'GatewayId': 'igw-eaad4883'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_instance(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-g8ff4ea2', '0.0.0.0/0', instance_id='i-1a2b3c4d')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'InstanceId': 'i-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_interface(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-g8ff4ea2', '0.0.0.0/0', interface_id='eni-1a2b3c4d')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'NetworkInterfaceId': 'eni-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_vpc_peering_connection(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-g8ff4ea2', '0.0.0.0/0', vpc_peering_connection_id='pcx-1a2b3c4d')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'VpcPeeringConnectionId': 'pcx-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestDeleteRoute(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteRouteTableResponse>
"""
def test_delete_route(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_route('rtb-e4ad488d', '172.16.1.0/24')
self.assert_request_parameters({
'Action': 'DeleteRoute',
'RouteTableId': 'rtb-e4ad488d',
'DestinationCidrBlock': '172.16.1.0/24'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
if __name__ == '__main__':
unittest.main()
| mit |
rc0r/afl-utils | afl_utils/afl_stats.py | 1 | 15342 | """
Copyright 2015-2016 @_rc0r <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
try:
import simplejson as json
except ImportError:
import json
import os
import sys
import socket
import twitter
from urllib.error import URLError
import afl_utils
from afl_utils.AflPrettyPrint import clr, print_ok, print_warn, print_err
from db_connectors import con_sqlite
db_table_spec = """`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, `last_update` INTEGER NOT NULL, `start_time`INTEGER NOT NULL,
`fuzzer_pid` INTEGER NOT NULL, `cycles_done` INTEGER NOT NULL, `execs_done` INTEGER NOT NULL,
`execs_per_sec` REAL NOT NULL, `paths_total` INTEGER NOT NULL, `paths_favored` INTEGER NOT NULL,
`paths_found` INTEGER NOT NULL, `paths_imported` INTEGER NOT NULL, `max_depth` INTEGER NOT NULL,
`cur_path` INTEGER NOT NULL, `pending_favs` INTEGER NOT NULL, `pending_total` INTEGER NOT NULL,
`variable_paths` INTEGER NOT NULL, `stability` REAL, `bitmap_cvg` REAL NOT NULL,
`unique_crashes` INTEGER NOT NULL, `unique_hangs` INTEGER NOT NULL, `last_path` INTEGER NOT NULL,
`last_crash` INTEGER NOT NULL, `last_hang` INTEGER NOT NULL, `execs_since_crash` INTEGER NOT NULL,
`exec_timeout` INTEGER NOT NULL, `afl_banner` VARCHAR(200) NOT NULL, `afl_version` VARCHAR(10) NOT NULL,
`command_line` VARCHAR(1000)"""
def show_info():
print(clr.CYA + "afl-stats " + clr.BRI + "%s" % afl_utils.__version__ + clr.RST + " by %s" % afl_utils.__author__)
print("Send stats of afl-fuzz jobs to Twitter.")
print("")
def read_config(config_file):
config_file = os.path.abspath(os.path.expanduser(config_file))
if not os.path.isfile(config_file):
print_err("Config file not found!")
sys.exit(1)
with open(config_file, 'r') as raw_config:
config = json.load(raw_config)
return config
def twitter_init(config):
try:
config['twitter_creds_file'] = os.path.abspath(os.path.expanduser(config['twitter_creds_file']))
if not os.path.exists(config['twitter_creds_file']):
twitter.oauth_dance("fuzzer_stats", config['twitter_consumer_key'],
config['twitter_consumer_secret'], config['twitter_creds_file'])
oauth_token, oauth_secret = twitter.read_token_file(config['twitter_creds_file'])
twitter_instance = twitter.Twitter(auth=twitter.OAuth(oauth_token, oauth_secret,
config['twitter_consumer_key'],
config['twitter_consumer_secret']))
return twitter_instance
except (twitter.TwitterHTTPError, URLError):
print_err("Network error, twitter login failed! Check your connection!")
sys.exit(1)
def shorten_tweet(tweet):
if len(tweet) > 140:
print_ok("Status too long, will be shortened to 140 chars!")
short_tweet = tweet[:137] + "..."
else:
short_tweet = tweet
return short_tweet
def fuzzer_alive(pid):
try:
os.kill(pid, 0)
except (OSError, ProcessLookupError):
return 0
return 1
def parse_stat_file(stat_file, summary=True):
try:
f = open(stat_file, "r")
lines = f.readlines()
f.close()
summary_stats = {
'fuzzer_pid': None,
'execs_done': None,
'execs_per_sec': None,
'paths_total': None,
'paths_favored': None,
'pending_favs': None,
'pending_total': None,
'unique_crashes': None,
'unique_hangs': None,
'afl_banner': None
}
complete_stats = {
'last_update': '',
'start_time': '',
'fuzzer_pid': '',
'cycles_done': '',
'execs_done': '',
'execs_per_sec': '',
'paths_total': '',
'paths_favored': '',
'paths_found': '',
'paths_imported': '',
'max_depth': '',
'cur_path': '',
'pending_favs': '',
'pending_total': '',
'variable_paths': '',
'stability': '',
'bitmap_cvg': '',
'unique_crashes': '',
'unique_hangs': '',
'last_path': '',
'last_crash': '',
'last_hang': '',
'execs_since_crash': '',
'exec_timeout': '',
'afl_banner': '',
'afl_version': '',
'command_line': ''
}
for l in lines:
if summary:
stats = summary_stats
for k in stats.keys():
if k != "fuzzer_pid":
if k in l:
stats[k] = l[19:].strip(": \r\n")
else:
if k in l:
stats[k] = fuzzer_alive(int(l[19:].strip(": \r\n")))
else:
stats = complete_stats
for k in stats.keys():
if k in l:
stats[k] = l[19:].strip(": %\r\n")
return stats
except FileNotFoundError as e:
print_warn("Stat file " + clr.GRA + "%s" % e.filename + clr.RST + " not found!")
return None
def load_stats(fuzzer_dir, summary=True):
fuzzer_dir = os.path.abspath(os.path.expanduser(fuzzer_dir))
if not os.path.isdir(fuzzer_dir):
print_warn("Invalid fuzzing directory specified: " + clr.GRA + "%s" % fuzzer_dir + clr.RST)
return None
fuzzer_stats = []
if os.path.isfile(os.path.join(fuzzer_dir, "fuzzer_stats")):
# single afl-fuzz job
stats = parse_stat_file(os.path.join(fuzzer_dir, "fuzzer_stats"), summary)
if stats:
fuzzer_stats.append(stats)
else:
fuzzer_inst = []
for fdir in os.listdir(fuzzer_dir):
if os.path.isdir(os.path.join(fuzzer_dir, fdir)):
fuzzer_inst.append(os.path.join(fuzzer_dir, fdir, "fuzzer_stats"))
for stat_file in fuzzer_inst:
stats = parse_stat_file(stat_file, summary)
if stats:
fuzzer_stats.append(stats)
return fuzzer_stats
def summarize_stats(stats):
sum_stat = {
'fuzzers': len(stats),
'fuzzer_pid': 0,
'execs_done': 0,
'execs_per_sec': 0,
'paths_total': 0,
'paths_favored': 0,
'pending_favs': 0,
'pending_total': 0,
'unique_crashes': 0,
'unique_hangs': 0,
'afl_banner': 0,
'host': socket.gethostname()[:10]
}
for s in stats:
for k in sum_stat.keys():
if k in s.keys():
if k != "afl_banner":
sum_stat[k] += float(s[k])
else:
sum_stat[k] = s[k][:10]
return sum_stat
def diff_stats(sum_stats, old_stats):
if len(sum_stats) != len(old_stats):
print_warn("Stats corrupted for '" + clr.GRA + "%s" % sum_stats['afl_banner'] + clr.RST + "'!")
return None
diff_stat = {
'fuzzers': len(sum_stats),
'fuzzer_pid': 0,
'execs_done': 0,
'execs_per_sec': 0,
'paths_total': 0,
'paths_favored': 0,
'pending_favs': 0,
'pending_total': 0,
'unique_crashes': 0,
'unique_hangs': 0,
'afl_banner': 0,
'host': socket.gethostname()[:10]
}
for k in sum_stats.keys():
if k not in ['afl_banner', 'host']:
diff_stat[k] = sum_stats[k] - old_stats[k]
else:
diff_stat[k] = sum_stats[k]
return diff_stat
def prettify_stat(stat, dstat, console=True):
_stat = stat.copy()
_dstat = dstat.copy()
_stat['execs_done'] /= 1e6
_dstat['execs_done'] /= 1e6
if _dstat['fuzzer_pid'] == _dstat['fuzzers'] == 0:
ds_alive = ""
else:
ds_alive = " (%+d/%+d)" % (_dstat['fuzzer_pid'], _dstat['fuzzers'])
# if int(_dstat['execs_done']) == 0:
if _dstat['execs_done'] == 0:
ds_exec = " "
else:
ds_exec = " (%+d) " % _dstat['execs_done']
if _dstat['execs_per_sec'] == 0:
ds_speed = " "
else:
ds_speed = " (%+1.f) " % _dstat['execs_per_sec']
if _dstat['pending_total'] == _dstat['pending_favs'] == 0:
ds_pend = ""
else:
ds_pend = " (%+d/%+d)" % (_dstat['pending_total'], _dstat['pending_favs'])
if _dstat['unique_crashes'] == 0:
ds_crash = ""
else:
ds_crash = " (%+d)" % _dstat['unique_crashes']
if console:
# colorize stats
_stat['afl_banner'] = clr.BLU + _stat['afl_banner'] + clr.RST
_stat['host'] = clr.LBL + _stat['host'] + clr.RST
lbl = clr.GRA
if _stat['fuzzer_pid'] == 0:
alc = clr.LRD
slc = clr.GRA
else:
alc = clr.LGN if _stat['fuzzer_pid'] == _stat['fuzzers'] else clr.YEL
slc = ""
clc = clr.MGN if _stat['unique_crashes'] == 0 else clr.LRD
rst = clr.RST
# colorize diffs
if _dstat['fuzzer_pid'] < 0 or _dstat['fuzzers'] < 0:
ds_alive = clr.RED + ds_alive + clr.RST
else:
ds_alive = clr.GRN + ds_alive + clr.RST
# if int(_dstat['execs_done']) < 0:
if _dstat['execs_done'] < 0:
ds_exec = clr.RED + ds_exec + clr.RST
else:
ds_exec = clr.GRN + ds_exec + clr.RST
if _dstat['execs_per_sec'] < 0:
ds_speed = clr.RED + ds_speed + clr.RST
else:
ds_speed = clr.GRN + ds_speed + clr.RST
if _dstat['unique_crashes'] < 0:
ds_crash = clr.RED + ds_crash + clr.RST
else:
ds_crash = clr.GRN + ds_crash + clr.RST
ds_pend = clr.GRA + ds_pend + clr.RST
pretty_stat =\
"[%s on %s]\n %sAlive:%s %s%d/%d%s%s\n %sExecs:%s %d%sm\n %sSpeed:%s %s%.1f%sx/s%s\n %sPend:%s %d/%d%s\n" \
" %sCrashes:%s %s%d%s%s" % (_stat['afl_banner'], _stat['host'], lbl, rst, alc, _stat['fuzzer_pid'],
_stat['fuzzers'], rst, ds_alive, lbl, rst, _stat['execs_done'], ds_exec, lbl, rst, slc,
_stat['execs_per_sec'], ds_speed, rst, lbl, rst, _stat['pending_total'],
_stat['pending_favs'], ds_pend, lbl, rst, clc, _stat['unique_crashes'], rst, ds_crash)
else:
pretty_stat = "[%s #%s]\nAlive: %d/%d%s\nExecs: %d%sm\nSpeed: %.1f%sx/s\n" \
"Pend: %d/%d%s\nCrashes: %d%s" %\
(_stat['afl_banner'], _stat['host'], _stat['fuzzer_pid'], _stat['fuzzers'], ds_alive,
_stat['execs_done'], ds_exec, _stat['execs_per_sec'], ds_speed,
_stat['pending_total'], _stat['pending_favs'], ds_pend, _stat['unique_crashes'], ds_crash)
return pretty_stat
def dump_stats(config_settings, database):
for sync_dir in config_settings['fuzz_dirs']:
fuzzer_stats = load_stats(sync_dir, summary=False)
for fuzzer in fuzzer_stats:
# create different table for every afl instance
# table = 'fuzzer_stats_{}'.format(fuzzer['afl_banner'])
#
# django compatible: put everything into one table (according
# to django plots app model)
# Differentiate data based on afl_banner, so don't override
# it manually! afl-multicore will create a unique banner for
# every fuzzer!
table = 'aflutils_fuzzerstats'
database.init_database(table, db_table_spec)
if not database.dataset_exists(table, fuzzer, ['last_update', 'afl_banner']):
database.insert_dataset(table, fuzzer)
def fetch_stats(config_settings, twitter_inst):
stat_dict = dict()
for fuzzer in config_settings['fuzz_dirs']:
stats = load_stats(fuzzer)
if not stats:
continue
sum_stats = summarize_stats(stats)
try:
with open('.afl_stats.{}'.format(os.path.basename(fuzzer)), 'r') as f:
old_stats = json.load(f)
except FileNotFoundError:
old_stats = sum_stats.copy()
# initialize/update stat_dict
stat_dict[fuzzer] = (sum_stats, old_stats)
stat_change = diff_stats(sum_stats, old_stats)
with open('.afl_stats.{}'.format(os.path.basename(fuzzer)), 'w') as f:
json.dump(sum_stats, f)
print(prettify_stat(sum_stats, stat_change, True))
tweet = prettify_stat(sum_stats, stat_change, False)
l = len(tweet)
c = clr.LRD if l > 140 else clr.LGN
if twitter_inst:
print_ok("Tweeting status (%s%d" % (c, l) + clr.RST + " chars)...")
try:
twitter_inst.statuses.update(status=shorten_tweet(tweet))
except (twitter.TwitterHTTPError, URLError):
print_warn("Problem connecting to Twitter! Tweet not sent!")
except Exception as e:
print_err("Sending tweet failed (Reason: " + clr.GRA + "%s" % e.__cause__ + clr.RST + ")")
def main(argv):
parser = argparse.ArgumentParser(description="Post selected contents of fuzzer_stats to Twitter.",
usage="afl-stats [-h] [-c config] [-d database] [-t]\n")
parser.add_argument("-c", "--config", dest="config_file",
help="afl-stats config file (Default: afl-stats.conf)!", default="afl-stats.conf")
parser.add_argument("-d", "--database", dest="database_file",
help="Dump stats history into database.")
parser.add_argument('-t', '--twitter', dest='twitter', action='store_const', const=True,
help='Post stats to twitter (Default: off).', default=False)
parser.add_argument('-q', '--quiet', dest='quiet', action='store_const', const=True,
help='Suppress any output (Default: off).', default=False)
args = parser.parse_args(argv[1:])
if not args.quiet:
show_info()
if args.database_file:
db_file = os.path.abspath(os.path.expanduser(args.database_file))
else:
db_file = None
if db_file:
lite_db = con_sqlite.sqliteConnector(db_file, verbose=False)
else:
lite_db = None
config_settings = read_config(args.config_file)
if lite_db:
dump_stats(config_settings, lite_db)
lite_db.commit_close()
if args.twitter:
twitter_inst = twitter_init(config_settings)
else:
twitter_inst = None
fetch_stats(config_settings, twitter_inst)
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 |
jmdejong/Asciifarm | asciifarm/server/entity.py | 1 | 4171 |
from . import serialize
from .eventtarget import EventTarget
class Entity:
""" Attempt to implement an entity component system
This is the base object
Components are given on construction.
Once a component is added to the object the attach method will be called on the component (if it has one).
The attach method is used to pass the entity and room events to the component.
When the entity is removed, all components will have their remove method called if they have one.
Remove methods are for cleanup, like unsubscribing from events.
"""
def __init__(self, sprite=' ', height=0, name=None, components=None, flags=None):
if components is None:
components = {}
if flags is None:
flags = set()
self.sprite = sprite # the name of the image to display for this entity
self.height = height # if multiple objects are on a square, the tallest one is drawn
self.name = name if name else sprite # human readable name/description
self.components = components
self.observable = EventTarget()
self.flags = set(flags)
self.ground = None
self.roomData = None
for component in self.components.values():
component.attach(self)
def construct(self, roomData, preserve=False, stamp=None):
self.roomData = roomData
if preserve:
roomData.preserveObject(self)
self._preserve()
if stamp is None:
stamp = roomData.getStamp()
self.trigger("roomjoin", roomData, stamp)
def hasComponent(self, name):
return name in self.components
def getComponent(self, name):
return self.components.get(name, None)
def place(self, ground):
if self.ground:
self.ground.removeObj(self)
self.ground = ground
ground.addObj(self)
def remove(self):
if self.ground:
self.ground.removeObj(self)
self.ground = None
if self.isPreserved():
self.roomData.removePreserved(self)
for component in self.components.values():
component.remove()
self.trigger("remove")
self.roomData = None
def addListener(self, event, callback, key=None):
self.observable.addListener(event, callback, key)
def removeListener(self, event, key):
self.observable.removeListener(event, key)
def trigger(self, event, *args, **kwargs):
self.observable.trigger(event, self, *args, **kwargs)
def getSprite(self):
return self.sprite
def getName(self):
return self.name
def getHeight(self):
return self.height
def inRoom(self):
return self.ground is not None
def getGround(self):
return self.ground
def getNearObjects(self):
return [obj for obj in self.ground.getObjs() if obj != self]
def getFlags(self):
return self.flags
def _preserve(self):
self.flags.add("preserve")
def isPreserved(self):
return "preserve" in self.flags
def toJSON(self):
return {
"sprite": self.sprite,
"name": self.name,
"height": self.height,
"flags": list(self.flags),
"components": {
name: serialize.serialize(comp)
for name, comp in self.components.items()
}
}
def serialize(self):
if "serialize" not in self.components:
return self.toJSON()
return self.components["serialize"].serialize()
@classmethod
def fromJSON(cls, data):
if data is None:
return None
return cls(
sprite = data["sprite"],
name = data["name"],
height = data["height"],
flags = data["flags"],
components = {
name: serialize.unserialize(comp)
for name, comp in data["components"].items()
}
)
def getRoomData(self):
return self.roomData
| gpl-3.0 |
jsgage/android_kernel_ba2x | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
yashrastogi16/python_koans | python2/runner/sensei.py | 3 | 9891 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import re
import sys
import os
import glob
import helper
from mockable_test_result import MockableTestResult
from runner import path_to_enlightenment
from libs.colorama import init, Fore, Style
init() # init colorama
class Sensei(MockableTestResult):
def __init__(self, stream):
unittest.TestResult.__init__(self)
self.stream = stream
self.prevTestClassName = None
self.tests = path_to_enlightenment.koans()
self.pass_count = 0
self.lesson_pass_count = 0
self.all_lessons = None
def startTest(self, test):
MockableTestResult.startTest(self, test)
if helper.cls_name(test) != self.prevTestClassName:
self.prevTestClassName = helper.cls_name(test)
if not self.failures:
self.stream.writeln()
self.stream.writeln("{0}{1}Thinking {2}".format(
Fore.RESET, Style.NORMAL, helper.cls_name(test)))
if helper.cls_name(test) not in ['AboutAsserts', 'AboutExtraCredit']:
self.lesson_pass_count += 1
def addSuccess(self, test):
if self.passesCount():
MockableTestResult.addSuccess(self, test)
self.stream.writeln( \
" {0}{1}{2} has expanded your awareness.{3}{4}" \
.format(Fore.GREEN, Style.BRIGHT, test._testMethodName, \
Fore.RESET, Style.NORMAL))
self.pass_count += 1
def addError(self, test, err):
# Having 1 list for errors and 1 list for failures would mess with
# the error sequence
self.addFailure(test, err)
def passesCount(self):
return not (self.failures and helper.cls_name(self.failures[0][0]) !=
self.prevTestClassName)
def addFailure(self, test, err):
MockableTestResult.addFailure(self, test, err)
def sortFailures(self, testClassName):
table = list()
for test, err in self.failures:
if helper.cls_name(test) == testClassName:
m = re.search("(?<= line )\d+" ,err)
if m:
tup = (int(m.group(0)), test, err)
table.append(tup)
if table:
return sorted(table)
else:
return None
def firstFailure(self):
if not self.failures: return None
table = self.sortFailures(helper.cls_name(self.failures[0][0]))
if table:
return (table[0][1], table[0][2])
else:
return None
def learn(self):
self.errorReport()
self.stream.writeln("")
self.stream.writeln("")
self.stream.writeln(self.report_progress())
if self.failures:
self.stream.writeln(self.report_remaining())
self.stream.writeln("")
self.stream.writeln(self.say_something_zenlike())
if self.failures: sys.exit(-1)
self.stream.writeln(
"\n{0}**************************************************" \
.format(Fore.RESET))
self.stream.writeln("\n{0}That was the last one, well done!" \
.format(Fore.MAGENTA))
self.stream.writeln(
"\nIf you want more, take a look at about_extra_credit_task.py")
def errorReport(self):
problem = self.firstFailure()
if not problem: return
test, err = problem
self.stream.writeln(" {0}{1}{2} has damaged your "
"karma.".format(Fore.RED, Style.BRIGHT, test._testMethodName))
self.stream.writeln("\n{0}{1}You have not yet reached enlightenment ..." \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}".format(Fore.RED, \
Style.BRIGHT, self.scrapeAssertionError(err)))
self.stream.writeln("")
self.stream.writeln("{0}{1}Please meditate on the following code:" \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}{3}{4}".format(Fore.YELLOW, Style.BRIGHT, \
self.scrapeInterestingStackDump(err), Fore.RESET, Style.NORMAL))
def scrapeAssertionError(self, err):
if not err: return ""
error_text = ""
count = 0
for line in err.splitlines():
m = re.search("^[^^ ].*$",line)
if m and m.group(0):
count+=1
if count>1:
error_text += (" " + line.strip()).rstrip() + '\n'
return error_text.strip('\n')
def scrapeInterestingStackDump(self, err):
if not err:
return ""
lines = err.splitlines()
sep = '@@@@@SEP@@@@@'
stack_text = ""
for line in lines:
m = re.search("^ File .*$",line)
if m and m.group(0):
stack_text += '\n' + line
m = re.search("^ \w(\w)+.*$",line)
if m and m.group(0):
stack_text += sep + line
lines = stack_text.splitlines()
stack_text = ""
for line in lines:
m = re.search("^.*[/\\\\]koans[/\\\\].*$",line)
if m and m.group(0):
stack_text += line + '\n'
stack_text = stack_text.replace(sep, '\n').strip('\n')
stack_text = re.sub(r'(about_\w+.py)',
r"{0}\1{1}".format(Fore.BLUE, Fore.YELLOW), stack_text)
stack_text = re.sub(r'(line \d+)',
r"{0}\1{1}".format(Fore.BLUE, Fore.YELLOW), stack_text)
return stack_text
def report_progress(self):
return "You have completed {0} koans and " \
"{1} lessons.".format(
self.pass_count,
self.lesson_pass_count)
def report_remaining(self):
koans_remaining = self.total_koans() - self.pass_count
lessons_remaining = self.total_lessons() - self.lesson_pass_count
return "You are now {0} koans and {1} lessons away from " \
"reaching enlightenment.".format(
koans_remaining,
lessons_remaining)
# Hat's tip to Tim Peters for the zen statements from The 'Zen
# of Python' (http://www.python.org/dev/peps/pep-0020/)
#
# Also a hat's tip to Ara T. Howard for the zen statements from his
# metakoans Ruby Quiz (http://rubyquiz.com/quiz67.html) and
# Edgecase's later permutation in the Ruby Koans
def say_something_zenlike(self):
if self.failures:
turn = self.pass_count % 37
zenness = "";
if turn == 0:
zenness = "Beautiful is better than ugly."
elif turn == 1 or turn == 2:
zenness = "Explicit is better than implicit."
elif turn == 3 or turn == 4:
zenness = "Simple is better than complex."
elif turn == 5 or turn == 6:
zenness = "Complex is better than complicated."
elif turn == 7 or turn == 8:
zenness = "Flat is better than nested."
elif turn == 9 or turn == 10:
zenness = "Sparse is better than dense."
elif turn == 11 or turn == 12:
zenness = "Readability counts."
elif turn == 13 or turn == 14:
zenness = "Special cases aren't special enough to " \
"break the rules."
elif turn == 15 or turn == 16:
zenness = "Although practicality beats purity."
elif turn == 17 or turn == 18:
zenness = "Errors should never pass silently."
elif turn == 19 or turn == 20:
zenness = "Unless explicitly silenced."
elif turn == 21 or turn == 22:
zenness = "In the face of ambiguity, refuse the " \
"temptation to guess."
elif turn == 23 or turn == 24:
zenness = "There should be one-- and preferably only " \
"one --obvious way to do it."
elif turn == 25 or turn == 26:
zenness = "Although that way may not be obvious at " \
"first unless you're Dutch."
elif turn == 27 or turn == 28:
zenness = "Now is better than never."
elif turn == 29 or turn == 30:
zenness = "Although never is often better than right " \
"now."
elif turn == 31 or turn == 32:
zenness = "If the implementation is hard to explain, " \
"it's a bad idea."
elif turn == 33 or turn == 34:
zenness = "If the implementation is easy to explain, " \
"it may be a good idea."
else:
zenness = "Namespaces are one honking great idea -- " \
"let's do more of those!"
return "{0}{1}{2}{3}".format(Fore.CYAN, zenness, Fore.RESET, Style.NORMAL);
else:
return "{0}Nobody ever expects the Spanish Inquisition." \
.format(Fore.CYAN)
# Hopefully this will never ever happen!
return "The temple is collapsing! Run!!!"
def total_lessons(self):
all_lessons = self.filter_all_lessons()
if all_lessons:
return len(all_lessons)
else:
return 0
def total_koans(self):
return self.tests.countTestCases()
def filter_all_lessons(self):
cur_dir = os.path.split(os.path.realpath(__file__))[0]
if not self.all_lessons:
self.all_lessons = glob.glob('{0}/../koans/about*.py'.format(cur_dir))
self.all_lessons = filter(lambda filename:
"about_extra_credit" not in filename,
self.all_lessons)
return self.all_lessons
| mit |
quole/gensim | gensim/corpora/wikicorpus.py | 2 | 13419 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Copyright (C) 2012 Lars Buitinck <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Construct a corpus from a Wikipedia (or other MediaWiki-based) database dump.
If you have the `pattern` package installed, this module will use a fancy
lemmatization to get a lemma of each token (instead of plain alphabetic
tokenizer). The package is available at https://github.com/clips/pattern .
See scripts/process_wiki.py for a canned (example) script based on this
module.
"""
import bz2
import logging
import re
from xml.etree.cElementTree import iterparse # LXML isn't faster, so let's go with the built-in solution
import multiprocessing
from gensim import utils
# cannot import whole gensim.corpora, because that imports wikicorpus...
from gensim.corpora.dictionary import Dictionary
from gensim.corpora.textcorpus import TextCorpus
logger = logging.getLogger(__name__)
# ignore articles shorter than ARTICLE_MIN_WORDS characters (after full preprocessing)
ARTICLE_MIN_WORDS = 50
RE_P0 = re.compile('<!--.*?-->', re.DOTALL | re.UNICODE) # comments
RE_P1 = re.compile('<ref([> ].*?)(</ref>|/>)', re.DOTALL | re.UNICODE) # footnotes
RE_P2 = re.compile("(\n\[\[[a-z][a-z][\w-]*:[^:\]]+\]\])+$", re.UNICODE) # links to languages
RE_P3 = re.compile("{{([^}{]*)}}", re.DOTALL | re.UNICODE) # template
RE_P4 = re.compile("{{([^}]*)}}", re.DOTALL | re.UNICODE) # template
RE_P5 = re.compile('\[(\w+):\/\/(.*?)(( (.*?))|())\]', re.UNICODE) # remove URL, keep description
RE_P6 = re.compile("\[([^][]*)\|([^][]*)\]", re.DOTALL | re.UNICODE) # simplify links, keep description
RE_P7 = re.compile('\n\[\[[iI]mage(.*?)(\|.*?)*\|(.*?)\]\]', re.UNICODE) # keep description of images
RE_P8 = re.compile('\n\[\[[fF]ile(.*?)(\|.*?)*\|(.*?)\]\]', re.UNICODE) # keep description of files
RE_P9 = re.compile('<nowiki([> ].*?)(</nowiki>|/>)', re.DOTALL | re.UNICODE) # outside links
RE_P10 = re.compile('<math([> ].*?)(</math>|/>)', re.DOTALL | re.UNICODE) # math content
RE_P11 = re.compile('<(.*?)>', re.DOTALL | re.UNICODE) # all other tags
RE_P12 = re.compile('\n(({\|)|(\|-)|(\|}))(.*?)(?=\n)', re.UNICODE) # table formatting
RE_P13 = re.compile('\n(\||\!)(.*?\|)*([^|]*?)', re.UNICODE) # table cell formatting
RE_P14 = re.compile('\[\[Category:[^][]*\]\]', re.UNICODE) # categories
# Remove File and Image template
RE_P15 = re.compile('\[\[([fF]ile:|[iI]mage)[^]]*(\]\])', re.UNICODE)
# MediaWiki namespaces (https://www.mediawiki.org/wiki/Manual:Namespace) that
# ought to be ignored
IGNORED_NAMESPACES = ['Wikipedia', 'Category', 'File', 'Portal', 'Template',
'MediaWiki', 'User', 'Help', 'Book', 'Draft',
'WikiProject', 'Special', 'Talk']
def filter_wiki(raw):
"""
Filter out wiki mark-up from `raw`, leaving only text. `raw` is either unicode
or utf-8 encoded string.
"""
# parsing of the wiki markup is not perfect, but sufficient for our purposes
# contributions to improving this code are welcome :)
text = utils.to_unicode(raw, 'utf8', errors='ignore')
text = utils.decode_htmlentities(text) # '&nbsp;' --> '\xa0'
return remove_markup(text)
def remove_markup(text):
text = re.sub(RE_P2, "", text) # remove the last list (=languages)
# the wiki markup is recursive (markup inside markup etc)
# instead of writing a recursive grammar, here we deal with that by removing
# markup in a loop, starting with inner-most expressions and working outwards,
# for as long as something changes.
text = remove_template(text)
text = remove_file(text)
iters = 0
while True:
old, iters = text, iters + 1
text = re.sub(RE_P0, "", text) # remove comments
text = re.sub(RE_P1, '', text) # remove footnotes
text = re.sub(RE_P9, "", text) # remove outside links
text = re.sub(RE_P10, "", text) # remove math content
text = re.sub(RE_P11, "", text) # remove all remaining tags
text = re.sub(RE_P14, '', text) # remove categories
text = re.sub(RE_P5, '\\3', text) # remove urls, keep description
text = re.sub(RE_P6, '\\2', text) # simplify links, keep description only
# remove table markup
text = text.replace('||', '\n|') # each table cell on a separate line
text = re.sub(RE_P12, '\n', text) # remove formatting lines
text = re.sub(RE_P13, '\n\\3', text) # leave only cell content
# remove empty mark-up
text = text.replace('[]', '')
if old == text or iters > 2: # stop if nothing changed between two iterations or after a fixed number of iterations
break
# the following is needed to make the tokenizer see '[[socialist]]s' as a single word 'socialists'
# TODO is this really desirable?
text = text.replace('[', '').replace(']', '') # promote all remaining markup to plain text
return text
def remove_template(s):
"""Remove template wikimedia markup.
Return a copy of `s` with all the wikimedia markup template removed. See
http://meta.wikimedia.org/wiki/Help:Template for wikimedia templates
details.
Note: Since template can be nested, it is difficult remove them using
regular expresssions.
"""
# Find the start and end position of each template by finding the opening
# '{{' and closing '}}'
n_open, n_close = 0, 0
starts, ends = [], []
in_template = False
prev_c = None
for i, c in enumerate(iter(s)):
if not in_template:
if c == '{' and c == prev_c:
starts.append(i - 1)
in_template = True
n_open = 1
if in_template:
if c == '{':
n_open += 1
elif c == '}':
n_close += 1
if n_open == n_close:
ends.append(i)
in_template = False
n_open, n_close = 0, 0
prev_c = c
# Remove all the templates
s = ''.join([s[end + 1:start] for start, end in
zip(starts + [None], [-1] + ends)])
return s
def remove_file(s):
"""Remove the 'File:' and 'Image:' markup, keeping the file caption.
Return a copy of `s` with all the 'File:' and 'Image:' markup replaced by
their corresponding captions. See http://www.mediawiki.org/wiki/Help:Images
for the markup details.
"""
# The regex RE_P15 match a File: or Image: markup
for match in re.finditer(RE_P15, s):
m = match.group(0)
caption = m[:-2].split('|')[-1]
s = s.replace(m, caption, 1)
return s
def tokenize(content):
"""
Tokenize a piece of text from wikipedia. The input string `content` is assumed
to be mark-up free (see `filter_wiki()`).
Return list of tokens as utf8 bytestrings. Ignore words shorted than 2 or longer
that 15 characters (not bytes!).
"""
# TODO maybe ignore tokens with non-latin characters? (no chinese, arabic, russian etc.)
return [
utils.to_unicode(token) for token in utils.tokenize(content, lower=True, errors='ignore')
if 2 <= len(token) <= 15 and not token.startswith('_')
]
def get_namespace(tag):
"""Returns the namespace of tag."""
m = re.match("^{(.*?)}", tag)
namespace = m.group(1) if m else ""
if not namespace.startswith("http://www.mediawiki.org/xml/export-"):
raise ValueError("%s not recognized as MediaWiki dump namespace"
% namespace)
return namespace
_get_namespace = get_namespace
def extract_pages(f, filter_namespaces=False):
"""
Extract pages from a MediaWiki database dump = open file-like object `f`.
Return an iterable over (str, str, str) which generates (title, content, pageid) triplets.
"""
elems = (elem for _, elem in iterparse(f, events=("end",)))
# We can't rely on the namespace for database dumps, since it's changed
# it every time a small modification to the format is made. So, determine
# those from the first element we find, which will be part of the metadata,
# and construct element paths.
elem = next(elems)
namespace = get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
page_tag = "{%(ns)s}page" % ns_mapping
text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping
title_path = "./{%(ns)s}title" % ns_mapping
ns_path = "./{%(ns)s}ns" % ns_mapping
pageid_path = "./{%(ns)s}id" % ns_mapping
for elem in elems:
if elem.tag == page_tag:
title = elem.find(title_path).text
text = elem.find(text_path).text
if filter_namespaces:
ns = elem.find(ns_path).text
if ns not in filter_namespaces:
text = None
pageid = elem.find(pageid_path).text
yield title, text or "", pageid # empty page will yield None
# Prune the element tree, as per
# http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
# except that we don't need to prune backlinks from the parent
# because we don't use LXML.
# We do this only for <page>s, since we need to inspect the
# ./revision/text element. The pages comprise the bulk of the
# file, so in practice we prune away enough.
elem.clear()
_extract_pages = extract_pages # for backward compatibility
def process_article(args):
"""
Parse a wikipedia article, returning its content as a list of tokens
(utf8-encoded strings).
"""
text, lemmatize, title, pageid = args
text = filter_wiki(text)
if lemmatize:
result = utils.lemmatize(text)
else:
result = tokenize(text)
return result, title, pageid
class WikiCorpus(TextCorpus):
"""
Treat a wikipedia articles dump (\*articles.xml.bz2) as a (read-only) corpus.
The documents are extracted on-the-fly, so that the whole (massive) dump
can stay compressed on disk.
>>> wiki = WikiCorpus('enwiki-20100622-pages-articles.xml.bz2') # create word->word_id mapping, takes almost 8h
>>> MmCorpus.serialize('wiki_en_vocab200k.mm', wiki) # another 8h, creates a file in MatrixMarket format plus file with id->word
"""
def __init__(self, fname, processes=None, lemmatize=utils.has_pattern(), dictionary=None, filter_namespaces=('0',)):
"""
Initialize the corpus. Unless a dictionary is provided, this scans the
corpus once, to determine its vocabulary.
If `pattern` package is installed, use fancier shallow parsing to get
token lemmas. Otherwise, use simple regexp tokenization. You can override
this automatic logic by forcing the `lemmatize` parameter explicitly.
self.metadata if set to true will ensure that serialize will write out article titles to a pickle file.
"""
self.fname = fname
self.filter_namespaces = filter_namespaces
self.metadata = False
if processes is None:
processes = max(1, multiprocessing.cpu_count() - 1)
self.processes = processes
self.lemmatize = lemmatize
if dictionary is None:
self.dictionary = Dictionary(self.get_texts())
else:
self.dictionary = dictionary
def get_texts(self):
"""
Iterate over the dump, returning text version of each article as a list
of tokens.
Only articles of sufficient length are returned (short articles & redirects
etc are ignored).
Note that this iterates over the **texts**; if you want vectors, just use
the standard corpus interface instead of this function::
>>> for vec in wiki_corpus:
>>> print(vec)
"""
articles, articles_all = 0, 0
positions, positions_all = 0, 0
texts = ((text, self.lemmatize, title, pageid) for title, text, pageid in extract_pages(bz2.BZ2File(self.fname), self.filter_namespaces))
pool = multiprocessing.Pool(self.processes)
# process the corpus in smaller chunks of docs, because multiprocessing.Pool
# is dumb and would load the entire input into RAM at once...
for group in utils.chunkize(texts, chunksize=10 * self.processes, maxsize=1):
for tokens, title, pageid in pool.imap(process_article, group): # chunksize=10):
articles_all += 1
positions_all += len(tokens)
# article redirects and short stubs are pruned here
if len(tokens) < ARTICLE_MIN_WORDS or any(title.startswith(ignore + ':') for ignore in IGNORED_NAMESPACES):
continue
articles += 1
positions += len(tokens)
if self.metadata:
yield (tokens, (pageid, title))
else:
yield tokens
pool.terminate()
logger.info(
"finished iterating over Wikipedia corpus of %i documents with %i positions"
" (total %i articles, %i positions before pruning articles shorter than %i words)",
articles, positions, articles_all, positions_all, ARTICLE_MIN_WORDS)
self.length = articles # cache corpus length
# endclass WikiCorpus
| lgpl-2.1 |
vkroz/kafka | system_test/utils/system_test_utils.py | 88 | 23697 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# system_test_utils.py
# ===================================
import copy
import difflib
import inspect
import json
import logging
import os
import re
import signal
import socket
import subprocess
import sys
import time
logger = logging.getLogger("namedLogger")
aLogger = logging.getLogger("anonymousLogger")
thisClassName = '(system_test_utils)'
d = {'name_of_class': thisClassName}
def get_current_unix_timestamp():
ts = time.time()
return "{0:.6f}".format(ts)
def get_local_hostname():
return socket.gethostname()
def sys_call(cmdStr):
output = ""
#logger.info("executing command [" + cmdStr + "]", extra=d)
p = subprocess.Popen(cmdStr, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in p.stdout.readlines():
output += line
return output
def remote_async_sys_call(host, cmd):
cmdStr = "ssh " + host + " \"" + cmd + "\""
logger.info("executing command [" + cmdStr + "]", extra=d)
async_sys_call(cmdStr)
def remote_sys_call(host, cmd):
cmdStr = "ssh " + host + " \"" + cmd + "\""
logger.info("executing command [" + cmdStr + "]", extra=d)
sys_call(cmdStr)
def get_dir_paths_with_prefix(fullPath, dirNamePrefix):
dirsList = []
for dirName in os.listdir(fullPath):
if not os.path.isfile(dirName) and dirName.startswith(dirNamePrefix):
dirsList.append(os.path.abspath(fullPath + "/" + dirName))
return dirsList
def get_testcase_prop_json_pathname(testcasePathName):
testcaseDirName = os.path.basename(testcasePathName)
return testcasePathName + "/" + testcaseDirName + "_properties.json"
def get_json_list_data(infile):
json_file_str = open(infile, "r").read()
json_data = json.loads(json_file_str)
data_list = []
for key,settings in json_data.items():
if type(settings) == list:
for setting in settings:
if type(setting) == dict:
kv_dict = {}
for k,v in setting.items():
kv_dict[k] = v
data_list.append(kv_dict)
return data_list
def get_dict_from_list_of_dicts(listOfDicts, lookupKey, lookupVal):
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '1', 'role': 'broker', 'hostname': 'localhost'}
#
# Usage:
#
# 1. get_data_from_list_of_dicts(self.clusterConfigsList, "entity_id", "0", "role")
# returns:
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
#
# 2. get_data_from_list_of_dicts(self.clusterConfigsList, None, None, "role")
# returns:
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '1', 'role': 'broker', 'hostname': 'localhost'}
retList = []
if ( lookupVal is None or lookupKey is None ):
for dict in listOfDicts:
for k,v in dict.items():
if ( k == fieldToRetrieve ): # match with fieldToRetrieve ONLY
retList.append( dict )
else:
for dict in listOfDicts:
for k,v in dict.items():
if ( k == lookupKey and v == lookupVal ): # match with lookupKey and lookupVal
retList.append( dict )
return retList
def get_data_from_list_of_dicts(listOfDicts, lookupKey, lookupVal, fieldToRetrieve):
# Sample List of Dicts:
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '1', 'role': 'broker', 'hostname': 'localhost'}
#
# Usage:
# 1. get_data_from_list_of_dicts(self.clusterConfigsList, "entity_id", "0", "role")
# => returns ['zookeeper']
# 2. get_data_from_list_of_dicts(self.clusterConfigsList, None, None, "role")
# => returns ['zookeeper', 'broker']
retList = []
if ( lookupVal is None or lookupKey is None ):
for dict in listOfDicts:
for k,v in dict.items():
if ( k == fieldToRetrieve ): # match with fieldToRetrieve ONLY
try:
retList.append( dict[fieldToRetrieve] )
except:
logger.debug("field not found: " + fieldToRetrieve, extra=d)
else:
for dict in listOfDicts:
for k,v in dict.items():
if ( k == lookupKey and v == lookupVal ): # match with lookupKey and lookupVal
try:
retList.append( dict[fieldToRetrieve] )
except:
logger.debug("field not found: " + fieldToRetrieve, extra=d)
return retList
def get_data_by_lookup_keyval(listOfDict, lookupKey, lookupVal, fieldToRetrieve):
returnValue = ""
returnValuesList = get_data_from_list_of_dicts(listOfDict, lookupKey, lookupVal, fieldToRetrieve)
if len(returnValuesList) > 0:
returnValue = returnValuesList[0]
return returnValue
def get_json_dict_data(infile):
json_file_str = open(infile, "r").read()
json_data = json.loads(json_file_str)
data_dict = {}
for key,val in json_data.items():
if ( type(val) != list ):
data_dict[key] = val
return data_dict
def get_remote_child_processes(hostname, pid):
pidStack = []
cmdList = ['''ssh ''' + hostname,
''''pid=''' + pid + '''; prev_pid=""; echo $pid;''',
'''while [[ "x$pid" != "x" ]];''',
'''do prev_pid=$pid;''',
''' for child in $(ps -o pid,ppid ax | awk "{ if ( \$2 == $pid ) { print \$1 }}");''',
''' do echo $child; pid=$child;''',
''' done;''',
''' if [ $prev_pid == $pid ]; then''',
''' break;''',
''' fi;''',
'''done' 2> /dev/null''']
cmdStr = " ".join(cmdList)
logger.debug("executing command [" + cmdStr, extra=d)
subproc = subprocess.Popen(cmdStr, shell=True, stdout=subprocess.PIPE)
for line in subproc.stdout.readlines():
procId = line.rstrip('\n')
pidStack.append(procId)
return pidStack
def get_child_processes(pid):
pidStack = []
currentPid = pid
parentPid = ""
pidStack.append(pid)
while ( len(currentPid) > 0 ):
psCommand = subprocess.Popen("ps -o pid --ppid %s --noheaders" % currentPid, shell=True, stdout=subprocess.PIPE)
psOutput = psCommand.stdout.read()
outputLine = psOutput.rstrip('\n')
childPid = outputLine.lstrip()
if ( len(childPid) > 0 ):
pidStack.append(childPid)
currentPid = childPid
else:
break
return pidStack
def sigterm_remote_process(hostname, pidStack):
while ( len(pidStack) > 0 ):
pid = pidStack.pop()
cmdStr = "ssh " + hostname + " 'kill -15 " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
def sigkill_remote_process(hostname, pidStack):
while ( len(pidStack) > 0 ):
pid = pidStack.pop()
cmdStr = "ssh " + hostname + " 'kill -9 " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
def simulate_garbage_collection_pause_in_remote_process(hostname, pidStack, pauseTimeInSeconds):
pausedPidStack = []
# pause the processes
while len(pidStack) > 0:
pid = pidStack.pop()
pausedPidStack.append(pid)
cmdStr = "ssh " + hostname + " 'kill -SIGSTOP " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
time.sleep(int(pauseTimeInSeconds))
# resume execution of the processes
while len(pausedPidStack) > 0:
pid = pausedPidStack.pop()
cmdStr = "ssh " + hostname + " 'kill -SIGCONT " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
def terminate_process(pidStack):
while ( len(pidStack) > 0 ):
pid = pidStack.pop()
try:
os.kill(int(pid), signal.SIGTERM)
except:
print "WARN - pid:",pid,"not found"
raise
def convert_keyval_to_cmd_args(configFilePathname):
cmdArg = ""
inlines = open(configFilePathname, "r").readlines()
for inline in inlines:
line = inline.rstrip()
tokens = line.split('=', 1)
if (len(tokens) == 2):
cmdArg = cmdArg + " --" + tokens[0] + " " + tokens[1]
elif (len(tokens) == 1):
cmdArg = cmdArg + " --" + tokens[0]
else:
print "ERROR: unexpected arguments list", line
return cmdArg
def async_sys_call(cmd_str):
subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def sys_call_return_subproc(cmd_str):
p = subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p
def remote_host_file_exists(hostname, pathname):
cmdStr = "ssh " + hostname + " 'ls " + pathname + "'"
logger.debug("executing command: [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
if "No such file or directory" in line:
return False
return True
def remote_host_directory_exists(hostname, path):
cmdStr = "ssh " + hostname + " 'ls -d " + path + "'"
logger.debug("executing command: [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
if "No such file or directory" in line:
return False
return True
def remote_host_processes_stopped(hostname):
cmdStr = "ssh " + hostname + \
" \"ps auxw | grep -v grep | grep -v Bootstrap | grep -i 'java\|run\-\|producer\|consumer\|jmxtool\|kafka' | wc -l\" 2> /dev/null"
logger.info("executing command: [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
line = line.rstrip('\n')
logger.info("no. of running processes found : [" + line + "]", extra=d)
if line == '0':
return True
return False
def setup_remote_hosts(systemTestEnv):
# sanity check on remote hosts to make sure:
# - all directories (eg. java_home) specified in cluster_config.json exists in all hosts
# - no conflicting running processes in remote hosts
aLogger.info("=================================================")
aLogger.info("setting up remote hosts ...")
aLogger.info("=================================================")
clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList
localKafkaHome = os.path.abspath(systemTestEnv.SYSTEM_TEST_BASE_DIR + "/..")
# when configuring "default" java_home, use JAVA_HOME environment variable, if exists
# otherwise, use the directory with the java binary
localJavaHome = os.environ.get('JAVA_HOME')
if localJavaHome is not None:
localJavaBin = localJavaHome + '/bin/java'
else:
subproc = sys_call_return_subproc("which java")
for line in subproc.stdout.readlines():
if line.startswith("which: no "):
logger.error("No Java binary found in local host", extra=d)
return False
else:
line = line.rstrip('\n')
localJavaBin = line
matchObj = re.match("(.*)\/bin\/java$", line)
localJavaHome = matchObj.group(1)
listIndex = -1
for clusterEntityConfigDict in clusterEntityConfigDictList:
listIndex += 1
hostname = clusterEntityConfigDict["hostname"]
kafkaHome = clusterEntityConfigDict["kafka_home"]
javaHome = clusterEntityConfigDict["java_home"]
if hostname == "localhost" and javaHome == "default":
clusterEntityConfigDictList[listIndex]["java_home"] = localJavaHome
if hostname == "localhost" and kafkaHome == "default":
clusterEntityConfigDictList[listIndex]["kafka_home"] = localKafkaHome
if hostname == "localhost" and kafkaHome == "system_test/migration_tool_testsuite/0.7":
clusterEntityConfigDictList[listIndex]["kafka_home"] = localKafkaHome + "/system_test/migration_tool_testsuite/0.7"
kafkaHome = clusterEntityConfigDict["kafka_home"]
javaHome = clusterEntityConfigDict["java_home"]
logger.debug("checking java binary [" + localJavaBin + "] in host [" + hostname + "]", extra=d)
if not remote_host_directory_exists(hostname, javaHome):
logger.error("Directory not found: [" + javaHome + "] in host [" + hostname + "]", extra=d)
return False
logger.debug("checking directory [" + kafkaHome + "] in host [" + hostname + "]", extra=d)
if not remote_host_directory_exists(hostname, kafkaHome):
logger.info("Directory not found: [" + kafkaHome + "] in host [" + hostname + "]", extra=d)
if hostname == "localhost":
return False
else:
localKafkaSourcePath = systemTestEnv.SYSTEM_TEST_BASE_DIR + "/.."
logger.debug("copying local copy of [" + localKafkaSourcePath + "] to " + hostname + ":" + kafkaHome, extra=d)
copy_source_to_remote_hosts(hostname, localKafkaSourcePath, kafkaHome)
return True
def copy_source_to_remote_hosts(hostname, sourceDir, destDir):
cmdStr = "rsync -avz --delete-before " + sourceDir + "/ " + hostname + ":" + destDir
logger.info("executing command [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
dummyVar = 1
def remove_kafka_home_dir_at_remote_hosts(hostname, kafkaHome):
if remote_host_file_exists(hostname, kafkaHome + "/bin/kafka-run-class.sh"):
cmdStr = "ssh " + hostname + " 'chmod -R 777 " + kafkaHome + "'"
logger.info("executing command [" + cmdStr + "]", extra=d)
sys_call(cmdStr)
cmdStr = "ssh " + hostname + " 'rm -rf " + kafkaHome + "'"
logger.info("executing command [" + cmdStr + "]", extra=d)
#sys_call(cmdStr)
else:
logger.warn("possible destructive command [" + cmdStr + "]", extra=d)
logger.warn("check config file: system_test/cluster_config.properties", extra=d)
logger.warn("aborting test...", extra=d)
sys.exit(1)
def get_md5_for_file(filePathName, blockSize=8192):
md5 = hashlib.md5()
f = open(filePathName, 'rb')
while True:
data = f.read(blockSize)
if not data:
break
md5.update(data)
return md5.digest()
def load_cluster_config(clusterConfigPathName, clusterEntityConfigDictList):
# empty the list
clusterEntityConfigDictList[:] = []
# retrieve each entity's data from cluster config json file
# as "dict" and enter them into a "list"
jsonFileContent = open(clusterConfigPathName, "r").read()
jsonData = json.loads(jsonFileContent)
for key, cfgList in jsonData.items():
if key == "cluster_config":
for cfg in cfgList:
clusterEntityConfigDictList.append(cfg)
def setup_remote_hosts_with_testcase_level_cluster_config(systemTestEnv, testCasePathName):
# =======================================================================
# starting a new testcase, check for local cluster_config.json
# =======================================================================
# 1. if there is a xxxx_testsuite/testcase_xxxx/cluster_config.json
# => load it into systemTestEnv.clusterEntityConfigDictList
# 2. if there is NO testcase_xxxx/cluster_config.json but has a xxxx_testsuite/cluster_config.json
# => retore systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite
# 3. if there is NO testcase_xxxx/cluster_config.json NOR xxxx_testsuite/cluster_config.json
# => restore system_test/cluster_config.json
testCaseLevelClusterConfigPathName = testCasePathName + "/cluster_config.json"
if os.path.isfile(testCaseLevelClusterConfigPathName):
# if there is a cluster_config.json in this directory, load it and use it for this testsuite
logger.info("found a new cluster_config : " + testCaseLevelClusterConfigPathName, extra=d)
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# load the cluster config for this testcase level
load_cluster_config(testCaseLevelClusterConfigPathName, systemTestEnv.clusterEntityConfigDictList)
# back up this testcase level cluster config
systemTestEnv.clusterEntityConfigDictListLastFoundInTestCase = copy.deepcopy(systemTestEnv.clusterEntityConfigDictList)
elif len(systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite) > 0:
# if there is NO testcase_xxxx/cluster_config.json, but has a xxxx_testsuite/cluster_config.json
# => restore the config in xxxx_testsuite/cluster_config.json
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# restore the system_test/cluster_config.json
systemTestEnv.clusterEntityConfigDictList = copy.deepcopy(systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite)
else:
# if there is NONE, restore the config in system_test/cluster_config.json
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# restore the system_test/cluster_config.json
systemTestEnv.clusterEntityConfigDictList = copy.deepcopy(systemTestEnv.clusterEntityConfigDictListInSystemTestLevel)
# set up remote hosts
if not setup_remote_hosts(systemTestEnv):
logger.error("Remote hosts sanity check failed. Aborting test ...", extra=d)
print
sys.exit(1)
print
def setup_remote_hosts_with_testsuite_level_cluster_config(systemTestEnv, testModulePathName):
# =======================================================================
# starting a new testsuite, check for local cluster_config.json:
# =======================================================================
# 1. if there is a xxxx_testsuite/cluster_config.son
# => load it into systemTestEnv.clusterEntityConfigDictList
# 2. if there is NO xxxx_testsuite/cluster_config.son
# => restore system_test/cluster_config.json
testSuiteLevelClusterConfigPathName = testModulePathName + "/cluster_config.json"
if os.path.isfile(testSuiteLevelClusterConfigPathName):
# if there is a cluster_config.json in this directory, load it and use it for this testsuite
logger.info("found a new cluster_config : " + testSuiteLevelClusterConfigPathName, extra=d)
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# load the cluster config for this testsuite level
load_cluster_config(testSuiteLevelClusterConfigPathName, systemTestEnv.clusterEntityConfigDictList)
# back up this testsuite level cluster config
systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite = copy.deepcopy(systemTestEnv.clusterEntityConfigDictList)
else:
# if there is NONE, restore the config in system_test/cluster_config.json
# empty the last testsuite level cluster config list
systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite[:] = []
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# restore the system_test/cluster_config.json
systemTestEnv.clusterEntityConfigDictList = copy.deepcopy(systemTestEnv.clusterEntityConfigDictListInSystemTestLevel)
# set up remote hosts
if not setup_remote_hosts(systemTestEnv):
logger.error("Remote hosts sanity check failed. Aborting test ...", extra=d)
print
sys.exit(1)
print
# =================================================
# lists_diff_count
# - find the no. of different items in both lists
# - both lists need not be sorted
# - input lists won't be changed
# =================================================
def lists_diff_count(a, b):
c = list(b)
d = []
for item in a:
try:
c.remove(item)
except:
d.append(item)
if len(d) > 0:
print "#### Mismatch MessageID"
print d
return len(c) + len(d)
# =================================================
# subtract_list
# - subtract items in listToSubtract from mainList
# and return the resulting list
# - both lists need not be sorted
# - input lists won't be changed
# =================================================
def subtract_list(mainList, listToSubtract):
remainingList = list(mainList)
for item in listToSubtract:
try:
remainingList.remove(item)
except:
pass
return remainingList
# =================================================
# diff_lists
# - find the diff of 2 lists and return the
# total no. of mismatch from both lists
# - diff of both lists includes:
# - no. of items mismatch
# - ordering of the items
#
# sample lists:
# a = ['8','4','3','2','1']
# b = ['8','3','4','2','1']
#
# difflib will return the following:
# 8
# + 3
# 4
# - 3
# 2
# 1
#
# diff_lists(a,b) returns 2 and prints the following:
# #### only in seq 2 : + 3
# #### only in seq 1 : - 3
# =================================================
def diff_lists(a, b):
mismatchCount = 0
d = difflib.Differ()
diff = d.compare(a,b)
for item in diff:
result = item[0:1].strip()
if len(result) > 0:
mismatchCount += 1
if '-' in result:
logger.debug("#### only in seq 1 : " + item, extra=d)
elif '+' in result:
logger.debug("#### only in seq 2 : " + item, extra=d)
return mismatchCount
| apache-2.0 |
EmmanuelJohnson/ssquiz | flask/lib/python2.7/site-packages/pip/_vendor/colorama/win32.py | 451 | 4833 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
from ctypes import windll
from ctypes import wintypes
except ImportError:
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort, POINTER
)
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", wintypes._COORD),
("dwCursorPosition", wintypes._COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", wintypes._COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
wintypes._COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
wintypes._COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
wintypes._COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position):
position = wintypes._COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = wintypes._COORD(position.Y - 1, position.X - 1)
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
| bsd-3-clause |
elkingtonmcb/pattern | pattern/web/pdf/encodingdb.py | 56 | 1548 | #!/usr/bin/env python2
import re
from psparser import PSLiteral
from glyphlist import glyphname2unicode
from latin_enc import ENCODING
## name2unicode
##
STRIP_NAME = re.compile(r'[0-9]+')
def name2unicode(name):
"""Converts Adobe glyph names to Unicode numbers."""
if name in glyphname2unicode:
return glyphname2unicode[name]
m = STRIP_NAME.search(name)
if not m: raise KeyError(name)
return unichr(int(m.group(0)))
## EncodingDB
##
class EncodingDB(object):
std2unicode = {}
mac2unicode = {}
win2unicode = {}
pdf2unicode = {}
for (name,std,mac,win,pdf) in ENCODING:
c = name2unicode(name)
if std: std2unicode[std] = c
if mac: mac2unicode[mac] = c
if win: win2unicode[win] = c
if pdf: pdf2unicode[pdf] = c
encodings = {
'StandardEncoding': std2unicode,
'MacRomanEncoding': mac2unicode,
'WinAnsiEncoding': win2unicode,
'PDFDocEncoding': pdf2unicode,
}
@classmethod
def get_encoding(klass, name, diff=None):
cid2unicode = klass.encodings.get(name, klass.std2unicode)
if diff:
cid2unicode = cid2unicode.copy()
cid = 0
for x in diff:
if isinstance(x, int):
cid = x
elif isinstance(x, PSLiteral):
try:
cid2unicode[cid] = name2unicode(x.name)
except KeyError:
pass
cid += 1
return cid2unicode
| bsd-3-clause |
dudymas/python-openstacksdk | openstack/tests/unit/block_store/test_block_store_service.py | 4 | 1110 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from openstack.block_store import block_store_service
class TestBlockStoreService(testtools.TestCase):
def test_service(self):
sot = block_store_service.BlockStoreService()
self.assertEqual("volume", sot.service_type)
self.assertEqual("public", sot.interface)
self.assertIsNone(sot.region)
self.assertIsNone(sot.service_name)
self.assertEqual(1, len(sot.valid_versions))
self.assertEqual("v2", sot.valid_versions[0].module)
self.assertEqual("v2", sot.valid_versions[0].path)
| apache-2.0 |
Serg09/socorro | socorro/external/postgresql/skiplist.py | 8 | 3829 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import logging
import psycopg2
from socorro.external import DatabaseError, MissingArgumentError
from socorro.external.postgresql.base import PostgreSQLBase
from socorro.lib import external_common
logger = logging.getLogger("webapi")
class SkipList(PostgreSQLBase):
filters = [
("category", None, ["str"]),
("rule", None, ["str"]),
]
def get(self, **kwargs):
params = external_common.parse_arguments(self.filters, kwargs)
sql_params = []
sql = """
/* socorro.external.postgresql.skiplist.SkipList.get */
SELECT category,
rule
FROM skiplist
WHERE 1=1
"""
if params.category:
sql += 'AND category=%s'
sql_params.append(params.category)
if params.rule:
sql += 'AND rule=%s'
sql_params.append(params.rule)
# Use `UPPER()` to make the sort case insensitive
# which makes it more user-friendly on the UI later
sql += """
ORDER BY UPPER(category), UPPER(rule)
"""
error_message = "Failed to retrieve skip list data from PostgreSQL"
sql_results = self.query(sql, sql_params, error_message=error_message)
results = [dict(zip(("category", "rule"), x)) for x in sql_results]
return {'hits': results, 'total': len(results)}
def post(self, **kwargs):
params = external_common.parse_arguments(self.filters, kwargs)
if not params.category:
raise MissingArgumentError('category')
if not params.rule:
raise MissingArgumentError('rule')
sql = """
/* socorro.external.postgresql.skiplist.SkipList.post */
INSERT INTO skiplist (category, rule)
VALUES (%s, %s);
"""
sql_params = [params.category, params.rule]
connection = self.database.connection()
try:
with connection.cursor() as cur:
cur.execute(sql, sql_params)
connection.commit()
except psycopg2.Error:
connection.rollback()
error_message = "Failed updating skip list in PostgreSQL"
logger.error(error_message)
raise DatabaseError(error_message)
finally:
connection.close()
return True
def delete(self, **kwargs):
params = external_common.parse_arguments(self.filters, kwargs)
if not params.category:
raise MissingArgumentError('category')
if not params.rule:
raise MissingArgumentError('rule')
sql_params = [params.category, params.rule]
count_sql = """
/* socorro.external.postgresql.skiplist.SkipList.delete */
SELECT COUNT(*) FROM skiplist
WHERE category=%s AND rule=%s
"""
sql = """
/* socorro.external.postgresql.skiplist.SkipList.delete */
DELETE FROM skiplist
WHERE category=%s AND rule=%s
"""
connection = self.database.connection()
try:
cur = connection.cursor()
count = self.count(count_sql, sql_params, connection=connection)
if not count:
return False
cur.execute(sql, sql_params)
connection.commit()
except psycopg2.Error:
connection.rollback()
error_message = "Failed delete skip list in PostgreSQL"
logger.error(error_message)
raise DatabaseError(error_message)
finally:
connection.close()
return True
| mpl-2.0 |
fuhongliang/odoo | addons/crm/wizard/crm_partner_binding.py | 257 | 4570 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class crm_partner_binding(osv.osv_memory):
"""
Handle the partner binding or generation in any CRM wizard that requires
such feature, like the lead2opportunity wizard, or the
phonecall2opportunity wizard. Try to find a matching partner from the
CRM model's information (name, email, phone number, etc) or create a new
one on the fly.
Use it like a mixin with the wizard of your choice.
"""
_name = 'crm.partner.binding'
_description = 'Handle partner binding or generation in CRM wizards.'
_columns = {
'action': fields.selection([
('exist', 'Link to an existing customer'),
('create', 'Create a new customer'),
('nothing', 'Do not link to a customer')
], 'Related Customer', required=True),
'partner_id': fields.many2one('res.partner', 'Customer'),
}
def _find_matching_partner(self, cr, uid, context=None):
"""
Try to find a matching partner regarding the active model data, like
the customer's name, email, phone number, etc.
:return int partner_id if any, False otherwise
"""
if context is None:
context = {}
partner_id = False
partner_obj = self.pool.get('res.partner')
# The active model has to be a lead or a phonecall
if (context.get('active_model') == 'crm.lead') and context.get('active_id'):
active_model = self.pool.get('crm.lead').browse(cr, uid, context.get('active_id'), context=context)
elif (context.get('active_model') == 'crm.phonecall') and context.get('active_id'):
active_model = self.pool.get('crm.phonecall').browse(cr, uid, context.get('active_id'), context=context)
# Find the best matching partner for the active model
if (active_model):
partner_obj = self.pool.get('res.partner')
# A partner is set already
if active_model.partner_id:
partner_id = active_model.partner_id.id
# Search through the existing partners based on the lead's email
elif active_model.email_from:
partner_ids = partner_obj.search(cr, uid, [('email', '=', active_model.email_from)], context=context)
if partner_ids:
partner_id = partner_ids[0]
# Search through the existing partners based on the lead's partner or contact name
elif active_model.partner_name:
partner_ids = partner_obj.search(cr, uid, [('name', 'ilike', '%'+active_model.partner_name+'%')], context=context)
if partner_ids:
partner_id = partner_ids[0]
elif active_model.contact_name:
partner_ids = partner_obj.search(cr, uid, [
('name', 'ilike', '%'+active_model.contact_name+'%')], context=context)
if partner_ids:
partner_id = partner_ids[0]
return partner_id
def default_get(self, cr, uid, fields, context=None):
res = super(crm_partner_binding, self).default_get(cr, uid, fields, context=context)
partner_id = self._find_matching_partner(cr, uid, context=context)
if 'action' in fields and not res.get('action'):
res['action'] = partner_id and 'exist' or 'create'
if 'partner_id' in fields:
res['partner_id'] = partner_id
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.