blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f2cfdac0bc96e46353d775d7f021fc5354bcd22e | 3b94cf1bb34ceff22b20812ce9b246cb47f894c0 | /training_test.py | 9bc4caf125ae9ddad4f3718b65bf39a90d713f8b | [] | no_license | phinanix/alphamini | 99c6fa601fdef56c5d1e8e6f4c9abe6632e0c3bd | 6276fe6a701cdd4d1afb9c6b969d289c5067c3f1 | refs/heads/master | 2020-04-18T03:49:46.656180 | 2019-02-06T00:58:22 | 2019-02-06T00:58:22 | 167,214,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | import training
import params as p
class TestTraining():
def ze_setup(self, board_size):
return training.Training(board_size)
def test_init(self):
train = self.ze_setup(9)
def test_train_loop(self):
train = self.ze_setup(p.board_size)
return train.training_loop('test_exp_rp','test_network',
'test_train_log_file',
rounds=2, games_per_round=4,
positions_per_round=1024)
def test_parallel_loop(self):
train = self.ze_setup(p.board_size)
return train.parallel_loop('para_exp_rp','para_network',"para_log_file",
rounds=2, games_per_round=10,
positions_per_round=64,
processes=4)
def test_loading(self):
train = training.Training(p.board_size,
network_filename="test_load_network",
exp_rp_filename="test_load_exp_rp.npz")
train.training_loop('ER_load_test', 'network_load_test',
"train_log_file",
rounds=5, games_per_round=5,
positions_per_round=512)
| [
"[email protected]"
] | |
961cf1c0c565b1c877863c1523434f4b65b3092f | b90a58b68d1fd0793e211ed2d2132a78b937a524 | /3828.py | fdd55858571fb644a17f875aa261bef4f3a73ca1 | [] | no_license | NikitaChevt/TSIS_3 | cb720e880ba60b4739c92010ac9881c3e256e932 | 646eab6f0859f78b7a2cb01c9721731ce3f61d42 | refs/heads/master | 2023-03-02T10:41:18.649306 | 2021-02-16T18:21:00 | 2021-02-16T18:21:00 | 339,490,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | a = input().split()
for i in range(0, len(a), 2):
print(a[i], end=" ") | [
"[email protected]"
] | |
313d4d0d626c87adfc0d1422c55a88b2e775e0af | 885b87e985bc41b64bc56a8d75d893358408f878 | /day3.py | 9421b67d28ae63c60b6377fc034cc59cfc83df24 | [] | no_license | Hollings/Advent-of-Code-2016 | b4e921f636030d969ab4c6c7b17edc4905bfd887 | 823847cfca2c2950dbb10a12e437af22e5ccd572 | refs/heads/master | 2020-06-10T21:45:40.291539 | 2017-07-23T06:38:44 | 2017-07-23T06:38:44 | 75,864,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,095 | py | input = """810 679 10
783 255 616
545 626 626
84 910 149
607 425 901
556 616 883
938 900 621
638 749 188
981 415 634
680 557 571
523 604 270
910 954 484
464 392 514
458 52 687
696 438 832
213 583 966
572 571 922
451 42 686
177 390 688
151 136 705
92 413 191
789 676 377
486 262 600
450 708 472
556 9 481
157 85 94
574 93 549
539 165 487
815 742 73
353 773 428
526 152 680
433 711 557
168 632 306
848 992 757
885 786 890
469 475 146
899 833 137
864 202 688
101 902 620
529 937 826
41 381 521
562 883 804
468 197 272
451 8 420
561 193 630
597 951 383
171 845 251
541 810 157
268 46 712
332 2 397
100 47 436
194 665 205
325 277 21
170 652 205
765 165 506
15 257 144
762 124 401
662 543 531
29 425 308
667 785 299
935 758 405
504 998 367
771 947 630
490 933 978
441 498 896
862 896 607
655 935 194
286 240 324
368 723 311
419 762 600
316 903 529
197 215 215
551 461 77
855 318 7
894 690 86
451 648 416
608 132 385
420 761 112
560 711 195
371 750 506
188 307 584
26 377 622
304 701 292
286 630 642
883 880 379
774 564 597
300 692 701
529 595 27
740 76 445
567 648 422
340 163 901
374 775 902
308 827 882
529 371 374
996 587 162
534 360 516
924 160 276
724 896 687
929 971 578
798 252 761
512 991 812
465 758 49
724 446 571
482 196 544
553 247 86
624 552 778
73 143 127
556 471 749
224 927 383
133 636 847
174 985 569
572 819 881
282 818 383
535 429 780
953 540 815
577 302 494
530 654 370
670 739 168
700 695 806
196 48 928
255 805 749
65 96 969
292 860 929
556 269 297
43 832 407
542 723 438
919 139 407
709 194 955
847 237 933
321 41 216
778 749 374
782 745 529
716 572 251
90 49 976
639 557 740
148 125 784
143 819 382
71 729 563
309 500 806
25 412 594
296 600 237
681 187 142
758 913 288
163 972 266
197 352 190
383 190 562
206 214 393
566 307 294
2 284 335
564 472 394
635 928 589
169 744 574
710 386 589
970 386 827
943 424 134
846 269 712
266 765 615
344 824 685
250 222 554
377 586 859
398 526 275
317 996 937
503 364 389
212 782 533
584 539 589
731 200 584
773 389 578
43 482 104
432 140 339
193 758 673
612 882 582
314 920 130
522 40 26
695 939 149
955 121 552
728 850 661
524 766 433
817 221 992
753 580 543
72 392 873
445 897 3
144 508 567
354 990 566
477 392 687
602 846 520
321 577 677
716 518 55
367 77 545
361 473 504
98 893 887
854 920 887
860 174 30
389 857 797
686 968 907
613 275 595
855 440 906
749 494 735
527 895 550
767 971 488
118 814 148
854 193 480
847 425 378
697 159 357
282 476 48
96 314 176
949 597 903
956 478 885
714 754 278
757 547 210
53 223 170
355 725 928
930 780 762
924 581 266
570 132 283
625 674 529
159 719 325
316 670 929
55 655 542
344 19 791
437 805 312
327 867 647
521 405 496
383 58 117
638 36 175
924 59 112
401 66 353
740 785 823
713 725 622
821 702 246
378 24 958
690 718 924
486 788 537
377 214 670
514 720 427
451 927 877
808 868 872
554 94 2
534 516 715
735 318 125
880 496 755
724 115 567
23 105 89
725 55 561
599 44 581
378 661 173
628 640 632
747 817 448
557 248 338
743 833 776
309 895 759
18 696 851
328 775 356
220 37 499
865 390 651
736 397 205
645 949 170
638 860 143
23 262 98
822 46 842
663 687 860
941 700 745
762 304 509
154 275 369
728 155 324
99 113 485
245 82 62
294 76 484
215 664 398
146 336 461
102 591 503
535 814 749
250 410 892
672 467 212
304 108 285
300 246 11
4 304 284
115 132 112
460 334 739
453 281 792
505 591 6
482 413 975
26 763 980
226 377 727
406 59 39
570 325 691
333 438 966
267 792 229
130 384 854
375 165 187
37 498 403
357 509 242
710 796 296
708 187 265
46 762 279
84 589 760
578 38 226
624 558 570
338 517 276
547 498 648
626 265 677
144 662 193
581 820 407
477 567 232
582 890 926
167 458 502
635 841 607
505 346 239
522 970 506
608 830 686
100 89 353
95 159 652
24 163 786
328 313 534
793 52 249
750 274 683
885 463 247
534 326 391
938 726 199
893 620 120
899 410 508
226 896 459
677 694 780
880 15 831
909 683 903
55 7 541
294 221 109
286 216 507
239 652 380
948 760 431
772 258 275
562 226 631
503 264 765
690 42 369
761 541 373
232 596 75
925 60 402
550 181 16
600 579 701
92 419 696
26 117 290
4 487 157
21 474 308
99 827 835
279 216 451
267 739 749
309 456 262
320 91 282
52 431 304
773 784 932
474 483 932
703 975 257
851 227 584
17 224 365
845 96 536
258 150 905
797 119 876
862 196 220
954 964 355
534 979 302
905 509 628
153 185 273
169 538 509
43 477 356
702 357 940
340 403 284
638 86 744
329 426 903
222 720 682
127 624 253
28 849 485
555 158 599
553 690 443
598 926 185
611 934 868
986 8 983
166 396 946
500 822 662
507 715 828
294 790 587
661 779 235
549 594 657
771 918 800
923 896 983
866 203 437
723 465 852
589 717 731
332 331 710
984 484 794
750 479 886
857 5 286
400 841 63
665 513 508
841 739 513
331 586 669
420 561 690
346 104 22
847 758 149
570 211 816
524 868 962
483 229 317
408 555 325
682 650 285
646 987 974
467 368 779
442 640 968
644 131 184
903 916 162
565 890 91
474 763 351
569 178 709
520 618 666
437 75 213
509 471 758
298 486 904
364 416 429
513 971 271
169 863 202
15 206 565
163 69 713
167 186 542
908 550 89
936 764 451
118 467 464
89 385 375
179 165 545
143 514 187
313 47 636
477 830 550
769 808 577
74 756 630
698 799 654
721 387 36
993 763 945
707 746 7
955 113 948
723 532 526
174 795 204
671 968 575
523 256 109
570 186 296
350 351 215
141 251 22
532 217 695
460 37 719
695 69 516
36 597 350
670 552 556
287 143 35
400 801 45
133 921 71
637 169 646
108 721 890
655 681 311
885 393 603
375 388 113
976 522 534
15 516 627
685 602 535
669 390 781
845 950 348
388 30 379
825 955 46
360 579 898
363 573 660
33 30 864
905 723 916
968 648 655
178 181 363
754 262 268
883 837 45
216 687 222
520 973 909
808 968 943
335 3 202
211 605 517
32 298 358
184 488 173
741 23 328
400 482 144
626 491 451
920 546 219
363 734 861
739 417 685
954 470 541
598 679 950
550 372 450
980 459 213
353 374 293
720 220 256
173 29 571
289 769 833
372 793 345
578 298 332
763 225 167
258 519 307
504 7 649
186 319 883
358 322 918
293 60 330
373 562 550
310 532 573
741 129 533
701 614 869
54 736 587
451 131 817
499 784 651
931 681 193
674 311 500
900 312 197
553 94 331
9 715 572
590 97 275
579 713 299
20 345 741
817 738 534
819 963 497
168 303 997
462 599 698
400 772 485
755 922 928
591 847 180
500 135 977
946 940 751
658 368 790
720 714 141
850 261 594
615 116 476
660 156 488
485 895 378
797 992 614
847 652 838
842 516 364
745 444 329
175 362 84
684 223 578
43 291 394
702 222 862
208 247 494
601 236 234
780 53 675
754 135 126
26 776 52
735 716 136
591 829 171
606 373 824
51 926 766
273 161 558
215 557 149
393 703 653
318 208 207
891 54 570
790 153 689
521 693 423
559 986 542
58 611 404
178 509 602
684 120 975
791 407 811
94 321 66
14 317 266
108 14 271
580 454 391
781 82 849
419 406 775
396 298 237
448 375 330
747 301 322
103 835 120
138 897 630
127 102 546
518 552 412
398 442 43
586 972 380
30 535 91
42 384 962
61 414 942
610 147 65
945 155 418
667 54 375
473 251 187
440 222 124
886 158 163
862 493 149
805 451 536
59 108 458
663 613 719
264 525 574
755 176 168
390 6 783
50 561 233
401 568 582
121 979 769
94 77 830
195 938 201
124 626 161
668 633 35
662 29 164
394 658 768
203 918 850
466 425 399
353 804 714
323 851 640
152 939 642
29 309 484
579 529 822
608 262 731
38 756 450
433 828 740
431 895 693
392 477 399
25 925 513
368 969 491
671 736 911
307 198 660
662 859 311
853 596 526
917 24 461
677 574 960
697 220 90
203 458 102
499 284 29
400 79 582
484 195 597
575 276 912
493 269 347
23 593 223
476 802 358
33 944 255
715 117 460
739 885 586
748 954 527
734 773 643
542 202 117
15 976 460
309 830 331
319 208 557
458 822 461
545 784 690
878 372 858
57 295 470
268 537 822
271 301 699
806 909 878
744 182 571
106 895 468
121 778 28
641 202 593
710 724 592
125 784 603
654 771 83
721 87 543
585 724 89
381 739 524
623 28 494
869 729 292
228 736 298
803 10 95
700 224 786
738 512 9
708 407 775
558 645 863
45 209 466
540 809 587
372 512 717
416 203 974
272 496 928
816 141 903
675 894 84
567 900 957
827 122 189
882 860 56
98 792 196
861 461 209
685 339 87
585 464 235
640 156 703
817 596 321
893 462 996
679 536 208
199 455 365
873 260 492
528 179 563
689 563 849
887 417 507
64 270 198
595 214 166
566 232 242
921 102 212
187 202 335
992 169 475
736 754 200
655 374 127
84 492 193
21 709 972
199 208 236
216 683 926
479 669 604
437 872 293
789 256 515
341 948 637
142 933 536
207 82 218
702 249 779
253 369 874
508 255 254
91 536 541
212 813 28
144 406 563
180 513 277
421 842 639
570 520 522
224 830 592
153 582 606
81 415 239
160 553 735
525 348 778
454 352 626
609 460 169
559 57 334
784 428 242
706 867 289
637 914 281
620 407 83
152 446 90
260 331 799
301 677 725
708 254 328
418 147 798
732 344 963
627 626 302
670 241 76
220 383 376
733 124 50
795 673 466
136 637 423
823 258 700
204 936 878
730 976 981
272 310 894
333 201 863
90 122 621
90 811 209
275 904 283
193 125 189
127 961 283
347 529 829
352 738 734
878 726 411
942 54 34
429 750 426
367 938 424
501 447 757
566 773 648
382 140 899
462 353 90
230 493 945
425 290 415
894 360 21
897 529 431
914 124 338
78 766 876
858 664 764
598 664 317
630 548 772
30 483 604
642 331 545
518 702 474
546 750 887
252 663 547
813 917 671
852 367 894
97 192 265
661 587 858
726 674 748
578 178 878
327 535 608
426 419 871
559 837 229
851 721 708
860 978 770
308 604 626
198 168 408
138 628 799
669 525 918
804 762 652
389 429 554
618 566 360
814 648 887
677 697 659
600 660 162
256 749 195
840 734 216
445 192 960
341 226 975
699 140 114
763 833 533
234 835 38
798 10 569
190 745 418
183 563 486
295 224 197
437 724 885
197 706 328
268 709 702
351 679 694
642 555 769
333 521 883
182 532 772
517 543 711
657 154 169
134 888 300
217 121 209
346 796 100
755 681 817
277 733 980
677 162 481
527 191 433
293 999 653
429 850 503
562 205 402
217 323 414
565 402 43
730 223 537
4 701 567
737 570 523
644 510 459
390 252 367
344 715 179
62 236 586
527 310 137
526 96 548
585 357 407
768 532 384
591 421 43
928 129 533
228 469 848
886 349 596
392 231 867
507 664 870
546 881 121
28 306 275
688 284 261
683 495 31
733 191 899
83 785 730
738 668 220
795 69 237
148 175 238
872 139 100
673 671 744
222 421 346
824 971 589
283 135 474
626 48 487
426 172 548
796 463 616
547 349 568
717 798 428
248 977 192
337 683 128
480 487 231
817 559 882
413 935 879
694 724 447
221 458 449
649 523 725
689 131 311
726 707 273
712 689 127
65 338 183
612 523 679
631 834 297
701 320 433
265 518 602
691 519 160
463 4 575
777 590 394
790 975 201
22 449 242
578 308 911
371 157 191
489 263 789
962 696 390
494 760 494
760 656 350
57 322 551
639 105 616
676 402 236
269 464 893
265 573 312
472 822 682
410 385 584
882 56 493
596 330 827
184 494 873
61 580 793
157 260 128
440 239 390
701 174 230
946 357 394
273 423 258
529 438 733
552 75 892
946 755 996
64 836 112
971 192 928
188 378 692
179 299 676
91 177 202
748 644 634
551 355 345
265 504 410
644 58 450
103 716 556
691 679 128
166 255 174
415 682 368
474 862 434
348 462 133
704 626 374
979 835 426
239 897 288
381 953 234
181 65 504
61 803 297
761 22 946
771 822 908
900 914 563
656 948 114
349 202 594
322 294 811
535 484 837
532 438 869
700 94 814
691 557 159
201 512 738
598 652 742
269 642 772
698 23 49
376 375 689
375 476 819
426 421 559
683 775 420
876 374 995
281 556 587
990 137 273
782 928 299
895 829 65
228 687 764
62 496 905
210 277 352
732 461 535
418 364 561
958 373 189
640 617 27
185 680 698
697 507 688
324 836 143
434 868 658
342 516 628
351 760 280
796 663 876
977 133 813
169 326 101
139 575 796
236 597 851
191 704 375
568 733 436
615 68 728
478 768 617
531 594 596
898 898 64
596 181 707
371 381 259
609 406 528
810 271 308
211 975 596
963 896 551
94 362 418
812 351 848
732 495 708
866 246 209
973 682 792
898 535 672
667 237 783
325 642 229
419 654 754
328 374 7
359 468 93
91 453 93
923 741 53
721 938 589
235 716 605
466 387 199
554 430 681
166 181 864
699 998 953
999 962 718
330 124 822
443 536 930
293 631 674
197 574 315
407 183 293
432 417 537
31 571 657
901 555 463
686 456 465
217 259 3
742 535 427
881 347 555
769 659 299
134 577 20
252 566 877
181 10 885
191 829 994
744 649 867
910 354 781
68 767 930
88 716 850
22 290 121
226 212 666
266 327 812
356 112 148
252 397 741
325 674 834
389 442 946
898 83 618
51 807 862
844 772 461
831 546 467
644 476 539
758 758 722
346 512 463
157 427 697
439 672 243
192 869 150
890 977 753
962 767 607
818 926 500
960 927 219
377 9 389
661 191 869
695 149 368
358 342 778
474 396 202
546 585 853
74 281 734
830 295 611
19 813 388
847 963 378
78 140 278
531 580 246
550 546 415
739 419 197
803 266 247
285 672 123
669 51 665
525 662 5
998 619 667
737 368 910
533 550 245
899 667 932
80 302 566
508 1 576
454 303 15
752 463 159
119 380 906
702 279 942
234 198 326
262 207 305
214 388 64
975 779 523
975 243 519
694 895 79
750 477 112
746 470 108
201 299 119
748 890 652
808 897 387
908 617 466
739 750 302
887 765 558
464 97 662
11 745 109
454 537 27
446 363 118
265 33 670
862 497 147
681 488 582
370 131 389
645 652 560
496 548 779
910 434 642
793 105 303
232 468 916
932 5 657
782 634 626
429 642 326
946 618 408
760 711 553
561 391 385
614 834 961
585 853 375
188 562 635
775 758 496
300 128 476
747 817 333
288 608 259
410 883 700
142 691 562
222 270 870
654 341 896
548 133 474
49 712 796
486 607 561
483 920 970
510 553 658
876 682 369
654 744 670
508 888 671
648 111 694
213 954 529
548 879 258
342 15 155
265 880 313
613 36 583
285 774 605
696 776 742
772 230 561
239 304 710
602 387 940
871 107 512
182 321 376
927 392 527
677 124 195
312 270 938
755 308 986
400 779 601
876 843 690
964 719 119
925 665 237
730 719 310
352 86 123
583 801 629
697 340 198
150 635 446
905 183 133
648 654 298
445 743 383
483 628 344
460 822 64
264 872 384
496 291 691
130 742 608
491 590 986
737 317 602
442 179 684
617 256 642
711 688 915
679 804 29
127 869 890
621 677 347
306 486 533
645 198 481
706 855 997
686 743 117
152 947 939
271 251 352
324 621 83
562 745 349
901 797 273
7 84 696
895 857 751
692 663 805
692 489 122
876 848 930
667 851 155
226 218 502
447 876 635
395 40 430
652 999 312
362 992 135
714 360 668
603 393 858
176 36 470
956 803 884
678 829 391
340 128 810
643 777 545
71 314 335
705 667 881
119 708 664
480 524 560
432 183 165
983 946 881
788 472 442
386 767 510
864 823 566
764 684 955
155 309 725
459 300 826
627 85 796
497 376 448
827 969 784
408 875 120
764 883 698
81 590 675
128 549 653
127 606 712
668 989 706
776 440 615
121 840 169
641 648 803
224 671 825
733 419 107
86 208 359
383 809 426
322 741 122
772 75 577
844 100 782
128 139 344
702 420 230
311 488 724
633 209 661
33 564 249
459 120 886
493 473 761
252 719 939
506 628 748
673 843 501
124 54 798
421 761 726
521 732 70
395 438 839
600 434 851
464 374 29
598 900 349
817 637 266
558 625 311
503 806 254
527 415 447
131 972 675
816 36 481
870 880 637
215 908 266
973 18 622
973 940 514
463 923 875
472 982 282
868 808 269
544 272 456
961 836 90
130 888 215
974 276 275
309 233 253
973 46 438
842 277 438
366 80 179
419 901 846
82 907 966
596 354 513
381 362 490
846 11 884
22 718 970
396 766 862
397 62 598
222 158 646
814 712 225
732 629 623
809 626 692
979 632 811
503 139 372
462 517 811
256 899 609
216 570 483
902 733 385
89 928 4
887 695 386
35 568 155
781 58 203
775 604 291
367 692 689
101 158 677
336 580 368
981 337 174
900 880 593
275 613 463
311 907 363
368 83 832
64 974 980
157 562 421
12 820 590
160 464 322
245 444 382
9 312 134
257 306 288
237 449 297
142 600 661
320 363 821
721 84 89
589 509 116
413 594 181
890 477 712
742 65 245
229 432 917
536 189 821
732 401 407
515 210 512
733 778 2
852 451 210
130 360 208
230 408 748
667 499 94
467 112 789
649 764 715
253 908 53
775 878 673
265 5 24
717 434 72
687 428 72
268 436 903
678 450 742
636 40 792
555 104 649
538 608 340
370 525 847
555 830 585
763 92 375
754 898 314
153 560 139
224 663 666
138 344 595
278 448 532
413 492 470
432 98 335
148 795 903
729 903 101
818 186 960
853 631 290
761 170 666
171 582 732
189 731 633
779 20 287
883 726 449
701 139 747
571 29 567
918 166 232
98 356 853
815 512 449
911 504 671
728 414 257
515 517 657
590 854 517
388 526 831
646 217 989
845 355 289
573 306 156
563 11 456
107 320 601
37 287 714
167 290 958
198 37 287
896 491 695
712 282 239
223 252 604
524 955 584
883 890 665
818 817 242
518 236 632
410 222 191
310 135 666
983 634 348
671 476 306
986 665 111
109 220 399
717 738 695
764 825 534
616 315 977
628 142 873
19 287 155
967 255 868
191 80 844
986 220 988
419 521 444
454 916 489
71 859 500
897 459 731
823 791 216
351 677 556
840 208 612
983 156 22
988 318 633
472 628 495
341 608 343
771 779 528
818 149 422
598 52 436
678 130 285
455 502 177
461 245 81
466 382 258
181 661 64
808 499 22
892 243 76
341 643 531
717 328 856
811 779 683
666 220 797
613 453 417
978 632 462
457 620 387
558 681 351
105 337 432
880 55 818
438 63 136
709 100 700
229 792 280
427 985 53
442 385 325
918 328 642
754 291 642
970 74 973
296 55 952
577 458 924
645 507 523
589 149 6
491 933 297
871 822 303
436 938 577
98 762 322
368 875 708
607 636 385
488 362 722
642 379 510
271 30 954
338 296 210
125 279 887
614 178 645
268 237 471
578 60 720
776 691 995
814 565 784
58 358 474
968 573 398
358 613 323
851 694 665
109 4 181
366 741 777
447 747 870
738 460 241
905 694 448
440 901 565
293 278 940
822 276 877
746 2 338
227 915 30
604 733 486
501 359 493
536 79 751
621 623 135
524 547 812
917 11 982
505 55 826
580 55 287
228 805 345
586 101 202
624 829 465
262 645 636
942 775 496
724 942 398
803 499 16
326 565 969
751 977 964
320 725 153
258 772 689
107 421 839
402 399 578
116 927 560
508 685 100
970 581 680
119 98 451
904 580 314
207 186 373
791 286 21
917 199 388
210 549 203
212 270 266
2 429 355
297 647 659
233 537 895
142 284 332
219 237 361
246 247 401
288 81 328
360 346 279
21 262 298
343 211 50
637 778 813
820 240 32
660 781 805
638 470 759
779 198 372
158 392 433
5 274 133
189 346 169
194 74 37
13 767 447
167 546 364
176 618 336
554 638 712
615 663 776
824 62 142
582 320 499
302 278 545
751 296 71
366 35 493
196 657 381
364 685 134
888 756 128
17 799 479
872 685 363
879 279 556
665 164 40
264 418 539
627 575 589
978 792 584
662 693 9
988 838 552
870 299 11
141 674 546
460 912 693
216 795 292
531 699 441
207 795 373
719 461 831
571 491 664
142 282 59
48 89 556
147 278 506
334 990 607
483 42 370
766 978 303
343 336 215
283 745 857
306 587 642
566 764 323
372 267 609
878 505 315
282 877 342
283 369 682
4 823 926
339 831 891
521 33 942
704 816 318
416 621 503
163 684 625
514 141 646
362 81 368
134 819 425
324 768 190
985 309 356
41 491 802
997 793 905
976 684 837
368 954 863
878 407 43
216 662 557
82 425 547
286 486 43
841 595 727
809 169 417
233 566 654
547 419 783
91 422 981
628 1 945
83 747 306
399 806 592
346 708 392
813 865 624
516 636 29
592 753 610
440 460 145
457 457 114
40 19 165
494 659 248
647 950 224
810 965 241
913 630 245
919 652 409
38 151 355
430 239 96
372 597 360
711 494 370
176 710 108
130 230 503
188 509 421
850 394 702
68 744 665
919 923 873"""
def triExists(sides):
# Given three sides to a triangle, return 1 if the triangle is possible
i=0
# print(sides)
sides = list(map(int, sides))
while i<3:
# print(sum(sides)-sides[i])
# print(sides[i])
if sides[i]>=sum(sides)-sides[i]:
return(0)
i+=1
return(1)
correct = 0;
# Count number of possible triangles in string
for triangle in input.split("\n"):
triangle = triangle.strip().replace(" "," ").replace(" "," ").split(" ")
correct += triExists(triangle)
print(correct)
| [
"[email protected]"
] | |
b95c7f4ca50ce13c5d29d06f55b53d6d52f5d19b | f285adf0dffad2dbdb5fa48a3862ad26e7e51385 | /scripts/py/custom_multiqc.py | 98fc543ef3b192a932bede3f134585a31c83c6e2 | [] | no_license | crickbabs/qualityControl_byBABS | af02a661ef48a05730c6d40ae0a81ea961161190 | 61be42281a017ad370e7aa99bfee3b803d4655b7 | refs/heads/master | 2020-03-19T21:21:21.889718 | 2018-11-29T14:02:58 | 2018-11-29T14:02:58 | 136,937,320 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | from multiqc.plots import bargraph
data = {
'sample 1': {
'aligned': 23542,
'not_aligned': 343,
},
'sample 2': {
'not_aligned': 7328,
'aligned': 1275,
}
}
html_content = bargraph.plot(data)
print(html_content)
| [
"[email protected]"
] | |
064e9992acc527a698e13fc86d79b5cdc471330f | 0aa53bb392975595cfcb6ca1e532cc3d7ec59a2e | /galois/field/__init__.py | 2fb0e636512699226e6a43c0fb97ad0eeddefeea | [
"MIT"
] | permissive | varun19299/galois | a31cfe83f217d6433c773add55905f69d89281fe | cd97abd08dd4bfcaafc6f61cf93718c45d244f49 | refs/heads/master | 2023-06-03T22:44:55.303349 | 2021-06-19T14:13:58 | 2021-06-19T14:41:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | """
A subpackage containing arrays and polynomials over Galois fields.
"""
from .array import *
from .conway import *
from .factory import *
from .gf2 import *
from .meta_class import *
from .oakley import *
from .poly import *
from .poly_functions import *
# Define the GF2 primitive polynomial here, not in gf2.py, to avoid a circular dependency.
# The primitive polynomial is p(x) = x - alpha, where alpha=1. Over GF2, this is equivalent
# to p(x) = x + 1
GF2._irreducible_poly = Poly([1, 1]) # pylint: disable=protected-access
| [
"[email protected]"
] | |
d9a9888608bd988e9ebb46e0af07dce879c3a15f | 30fe7671b60825a909428a30e3793bdf16eaaf29 | /.metadata/.plugins/org.eclipse.core.resources/.history/13/8018e1a3a8e200161174a93fd5908e78 | bef53b89f1b6c63371a1e3091835fe5771f3dd50 | [] | no_license | abigdream84/PythonStudy | 0fc7a3b6b4a03a293b850d0ed12d5472483c4fb1 | 059274d3ba6f34b62ff111cda3fb263bd6ca8bcb | refs/heads/master | 2021-01-13T04:42:04.306730 | 2017-03-03T14:54:16 | 2017-03-03T14:54:16 | 79,123,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | #!/usr/bin/env python
#coding:UTF-8
from talk_demo.utility.MySqlHelper import MySqlHelper
class indextb(object):
def __init__(self):
self.__helper = MySqlHelper()
def __getCount(self):
sql = 'select count(*) from indextb'
row_count = self.__helper.select(sql)[0][0]
return row_count
def chkWord(self,mess):
aa = indextb()
print(aa.getCount())
print(type(aa.getCount()))
| [
"[email protected]"
] | ||
08fdcbb1030cb33e77de2a52675f40c9b7bba88b | 702460d29a0ccafde1c4960db7a3408fffba1b63 | /setup.py | a947779c67cfcc805548b2ab1ff2480a6c83fb64 | [] | no_license | nandaguidotti/desafio-senai | 72daed1fc26feaf7171f28e7a4e1adf5a678d509 | e90315e6c32f254c794c5f593bce8de77f1dd640 | refs/heads/main | 2023-06-18T01:08:46.216988 | 2021-07-16T13:46:29 | 2021-07-16T13:46:29 | 385,730,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | from setuptools import setup
setup(
name='desafio-senai',
version='0.0.1',
packages=[''],
url='',
license='',
author='Fernanda Guidoti Stramantino',
author_email='',
description='Plataforma Vita'
)
| [
"[email protected]"
] | |
e6c8ed74ef4fc35aeaf44c4999a33747e4d1a410 | 1d2149a87b694568dc8bedc41b4b39dfcf68d27e | /209. Minimum Size Subarray Sum new.py | b0bace484a6f9d38a6f1c185c26411e0a2d5fd1c | [] | no_license | jianminchen/LeetCode-IIII | 42eefecf49b3fc8e3a6c045276c0d1a8e35fc72d | d7d4aaca71c5e9d2652e56e71adfa619b7c237ad | refs/heads/master | 2021-01-23T00:15:29.369687 | 2016-08-28T20:16:31 | 2016-08-28T20:16:31 | 67,006,360 | 1 | 0 | null | 2016-08-31T05:30:53 | 2016-08-31T05:30:52 | null | UTF-8 | Python | false | false | 536 | py | import sys
class Solution(object):
def minSubArrayLen(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
l = 0
r = 0
res = sys.maxint
while r < len(nums):
if l == r:
temp = nums[r]
else:
temp = sum(nums[l:r+1])
if temp < s:
r += 1
else:
res = min(res, r-l+1)
l += 1
return res if res < sys.maxint else 0
| [
"[email protected]"
] | |
19a569dca6bc779475b7a6659839701c264a87f1 | e3b0f647d50d067d9a75fa2aed5b7160f2d1b17b | /src/table/web/MockWebServer.py | faef72f512c4b474a47290eb5336592afd33547d | [] | no_license | janion/LedTable | 6eb00448c85310844635fe6acd5acb3d9d136792 | 7bac353ad6da75b25ab68cfe84e20c6c9f45a3eb | refs/heads/master | 2021-01-20T15:27:39.179368 | 2018-08-26T09:56:35 | 2018-08-26T09:56:35 | 90,772,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,329 | py | import wx
from threading import Thread
class WebServerThread(object):
def __init__(self, service):
pass
def start(self):
pass
class WebServer(object):
def __init__(self, pixelUpdater, writerFactory, patternManager):
guiThread = Thread(target=self.showWindow, args=[pixelUpdater, writerFactory, patternManager])
guiThread.start()
def showWindow(self, pixelUpdater, writerFactory, patternManager):
window = Window(pixelUpdater, writerFactory, patternManager)
window.Show()
# wx.CallAfter(window.Show)
class Window(wx.Frame):
def __init__(self, pixelUpdater, writerFactory, patternManager):
wx.Frame.__init__(self, pixelUpdater.window, -1, "Mock web service")
self.updater = pixelUpdater
self.writerFactory = writerFactory
self.patterns = patternManager
self._buildGui()
def _buildGui(self):
self.panel = wx.Panel(self)
customSizer = self._buildCustomSizer()
# builtinSizer = self._buildBuiltinSizer()
self.panel.SetSizer(customSizer)
def _buildCustomSizer(self):
patterns = self.patterns.getPatterns()
sizer = wx.GridBagSizer(6, len(patterns) + 1)
self._addTableTitles(sizer)
for x in range(len(patterns)):
pattern = patterns[x]
setBtn = wx.Button(self.panel, -1, "Set")
removeBtn = wx.Button(self.panel, -1, "Remove")
setBtn.Bind(wx.EVT_BUTTON, lambda event: self.patterns.setPattern(pattern.getName()))
removeBtn.Bind(wx.EVT_BUTTON, lambda event: self.patterns.removePattern(pattern.getName()))
sizer.Add(setBtn, (0, x + 1))
sizer.Add(removeBtn, (1, x + 1))
sizer.Add(wx.TextCtrl(self.panel, -1, pattern.getName()), (2, x + 1))
sizer.Add(wx.TextCtrl(self.panel, -1, pattern.getRedFunctionString()), (3, x + 1))
sizer.Add(wx.TextCtrl(self.panel, -1, pattern.getGreenFunctionString()), (4, x + 1))
sizer.Add(wx.TextCtrl(self.panel, -1, pattern.getBlueFunctionString()), (5, x + 1))
def _addTableTitles(self, sizer):
titles = ["Set", "Remove", "Name", "Red", "Green", "Blue"]
for x in range(len(titles)):
sizer.Add(wx.TextCtrl(self.panel, -1, titles[x]), (x, 0))
| [
"[email protected]"
] | |
9191eecaf09ed54641812f1551a562eb5ed2c50d | 1af2a0deace8f8836c57b06a3a2c7bb16d6556f7 | /Face detection/Face Detection using Haar Cascade OpenCv.py | 69ae799f82ff19cabec8130a9c69328d085bbcec | [] | no_license | AasaiAlangaram/OpenCv-Tutorials | e3ffe96c05806b1045b046dc17bc2b667bb85703 | 93799728124a66ae917574e56f6edb1490db297f | refs/heads/master | 2020-12-21T13:49:34.712094 | 2020-05-07T04:31:57 | 2020-05-07T04:31:57 | 236,448,955 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | import cv2
# To use a image file as input
img_path = 'C:\\Users\\aasai\\Desktop\\TheHobbyists - Opencv Tutorials\\27-03-2020\\aasai.jpeg'
# load our image and convert it to grayscale
image = cv2.imread(img_path)
print(image.shape)
image = cv2.resize(image,(450,600))
# print('row,column,no.of.channels',image.shape)
#convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# load the face detector and detect faces in the image
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
rects = face_cascade.detectMultiScale(gray, scaleFactor=1.05, minNeighbors=8)
print(rects)
print("[INFO] detected {} faces".format(len(rects)))
# loop over the bounding boxes and draw a rectangle around each face
for (x, y, w, h) in rects:
cv2.rectangle(image, (x, y), (x + w, y + h), (0,255,0), 2)
# show the detected faces
cv2.imshow("Faces", image)
cv2.waitKey(0)
| [
"[email protected]"
] | |
0d206ca6bfc59b8c0452663c607e72b2c20a26de | b7e5410387608dd27eab49d30e4e43ece0f8d4e1 | /djangoprojects/djmodelproject1/djmodelproject1/settings.py | 74a6f7346be2d3efb04f4947e1d25d574e9ac0a6 | [] | no_license | DhanaDhana/DhanaGit | cb0caab8a814649c4b8520f0553c1e01f76f9140 | 5eb58f11271a74ed4571e30e29470cf64e53c9ae | refs/heads/master | 2023-04-07T10:16:44.668810 | 2021-04-01T18:01:20 | 2021-04-01T18:01:20 | 264,395,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,188 | py | """
Django settings for djmodelproject1 project.
Generated by 'django-admin startproject' using Django 3.0.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l7d+y4fw^_+53l+x=a16b@i4(d#m%z9x-m=&sf50^%y9+iyzgt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'testapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djmodelproject1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djmodelproject1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
743eaa10ff30d7a2a96df3ea09c4cfabf35a93d0 | cc40d596b02b7115bd8dc3f8fd3aa1d5d2fbb203 | /extractData_recoverTest.py | 2e1f5d9c8ed1e7c458fa544b6a8e5208f690d652 | [
"MIT"
] | permissive | OLStefan/Adversarials-1Speech-Recognition | 45ceb4681b3b9fc8938a0becad428b3eafa304dc | cf2b5a7ddeab7df5b79914ac8135d129231853b4 | refs/heads/master | 2020-04-08T23:18:38.689489 | 2019-03-25T15:04:20 | 2019-03-25T15:04:20 | 159,819,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,820 | py | import os
import numpy as np
import scipy.io.wavfile as wav
import python_speech_features
def main():
adv_dirs = ["adversarials-carlini"]
adv1_dirs = ["adversarials-carlini-alzantot_recoverTest"]
attacks = ["alzantot", "carlini"]
f = open("data_recoverTest.txt", "w+")
for folder in adv1_dirs:
print(folder)
folder_index = adv1_dirs.index(folder)
for subfolder in os.listdir(folder):
print("\t" + subfolder)
for input in os.listdir(os.path.join(folder, subfolder)):
src_rate, src_wave = wav.read(os.path.join(folder, subfolder, input))
src_mfcc_feat = python_speech_features.mfcc(src_wave, src_rate).flatten()
src_wave_L0 = np.linalg.norm(src_wave, ord=0)
src_wave_L2 = np.linalg.norm(src_wave, ord=2)
src_wave_Linf = np.linalg.norm(src_wave, ord=np.inf)
src_mfcc_L0 = np.linalg.norm(src_mfcc_feat, ord=1)
src_mfcc_L2 = np.linalg.norm(src_mfcc_feat, ord=2)
src_mfcc_Linf = np.linalg.norm(src_mfcc_feat, ord=np.inf)
manip_1, orig, file1, file2, file3, rest = input.split('_', 5)
file = file1+"_"+file2+"_"+file3
orig_rate, orig_wave = wav.read(
os.path.join(adv_dirs[int(folder_index / 2)], manip_1, orig + '_' + file))
orig_mfcc_feat = python_speech_features.mfcc(orig_wave, orig_rate).flatten()
orig_wave_L0 = np.linalg.norm(orig_wave, ord=0)
orig_wave_L2 = np.linalg.norm(orig_wave, ord=2)
orig_wave_Linf = np.linalg.norm(orig_wave, ord=np.inf)
orig_mfcc_L0 = np.linalg.norm(orig_mfcc_feat, ord=1)
orig_mfcc_L2 = np.linalg.norm(orig_mfcc_feat, ord=2)
orig_mfcc_Linf = np.linalg.norm(orig_mfcc_feat, ord=np.inf)
diff_wave_L0 = orig_wave_L0 - src_wave_L0
diff_wave_L2 = orig_wave_L2 - src_wave_L2
diff_wave_Linf = orig_wave_Linf - src_wave_Linf
diff_mfcc_L0 = orig_mfcc_L0 - src_mfcc_L0
diff_mfcc_L2 = orig_mfcc_L2 - src_mfcc_L2
diff_mfcc_Linf = orig_mfcc_Linf - src_mfcc_Linf
# filename Orig 1stAttack 1stManip 2ndAttck 2ndManip diff_wave_L0 diff_wave_L2 diff_wave_Linf diff_mfcc_L1 diff_mfcc_L2 diff_mfcc_Linf
f.write(file + "," + orig + "," + attacks[int(folder_index / 2) + 1] + "," + manip_1 + "," + attacks[
folder_index % 2] + "," + subfolder +
"," + str(diff_wave_L0) + "," + str(diff_wave_L2) + "," + str(diff_wave_Linf) + "," + str(
diff_mfcc_L0) + "," + str(diff_mfcc_L2) + "," + str(diff_mfcc_Linf) + "\n")
f.close()
main()
| [
"[email protected]"
] | |
f50cd0d513e0cc472dcc52db46f3ac503ecddeae | 897182ab55e0a334a52bdc1be6c85b831815f5f7 | /code/required_schema_validation.py | d4bef4d2bc0c34823748d73f9c717fd45de9f53a | [
"MIT"
] | permissive | svebk/qpr-winter-2017 | e6e4e799706fb3ee996cd7772909f8bb25028975 | 3cf6eead549481591a1d83738af0e56a0ceeae56 | refs/heads/master | 2020-05-26T02:13:37.399071 | 2017-03-17T14:58:38 | 2017-03-17T14:58:38 | 84,985,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | import json
import sys
# Define what team's submission is being validated
team_A_submission_file = sys.argv[1]
# Load team submission
team_A_submission = []
with open(team_A_submission_file, 'r') as f:
for line in f:
entry = json.loads(line)
team_A_submission.append(entry)
# Validate team submission
fail = 0
for entry in team_A_submission:
if 'cluster_id' not in entry.keys():
print "CODE BREAKING ERROR: 'cluster_id' field missing. Submission is not valid."
fail = 1
if 'score' not in entry.keys():
print "CODE BREAKING ERROR: 'score' field missing. Submission is not valid."
fail = 1
if type(entry['score']) != float and type(entry['score']) != int:
print "CODE BREAKING ERROR: 'score' not a real number or integer. Submission is not valid."
fail = 1
if entry['score'] < 0.0 or entry['score'] > 1.0:
print "WARNING: 'score' not between 0.0 and 1.0 as expected."
if fail == 0:
print "Submission format is valid."
else:
print "CODE BREAKING ERROR: Check commment(s) above. Submission is not valid."
| [
"[email protected]"
] | |
d0c9aceb2b2fc8886d58f4926f788c9717c77b85 | 2c3641d3456bc1a91596a06990926dcc299d4c9e | /subscription-api/subscriptions/serializers.py | a785602f0435c1940e0c515736ba431ae756e247 | [] | no_license | pavanasri-nyros/CRUD-APP-VUE-DJANGO | 67030b5a711008b091f741371f8972b704c72e36 | 79a7e47bd89b25164679433cc20edc6179d1bdc5 | refs/heads/main | 2022-12-24T23:56:29.341842 | 2020-10-02T09:41:12 | 2020-10-02T09:41:12 | 300,567,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from rest_framework import serializers
from .models import Subscription
class SubscriptionSerializer(serializers.ModelSerializer):
class Meta:
model = Subscription
fields = ('name', 'description', 'currency',
'amount', 'created_at', 'updated_at'
) | [
"[email protected]"
] | |
f1e44a5597af174b8da53296dcfbe9690b0c0ab1 | 2829516f0a67c59a4cf44f987ad7f2a1b56994c4 | /a/works6.py | 7c02a8210f23b9a4911ca9ae75d9364ae12d0a29 | [] | no_license | SupremeHighStar/day7_2 | 72394b04ac38df72eb663a1fb93a720d2907e604 | 1a3b0c3bd0f9e1829579ea8a53d7c20bce383a3d | refs/heads/master | 2020-03-31T21:43:34.407518 | 2018-10-11T12:52:59 | 2018-10-11T12:52:59 | 152,591,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | #!/usr/bin/env/python
# _*_ coding:utf-8 -*-
aaa = ("""
[1] aaa
[2] bbb
[q] quit
suru:
""")
sr = raw_input(aaa)
while not (sr == "1" or sr == "2" ):
sr = sr = raw_input(aaa)
if sr == "q":
break
| [
"[email protected]"
] | |
f5abd043ce2d179dceafe9d4afd0067f9bc70c2d | fd7f8e803ab2c14e074f80db30410ff0e84ace55 | /backend/manage.py | 35af447178a979ea408cd408300adc0cba4ca3ae | [] | no_license | crowdbotics-apps/studentgizor-25888 | 166e7d93245e3d044f72494e62476460ecea92d6 | c0688025c71d9cd74b9c9c919e25a7b435cc52cd | refs/heads/master | 2023-04-10T03:51:05.784712 | 2021-04-23T14:24:47 | 2021-04-23T14:24:47 | 360,908,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "studentgizor_25888.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
a10d020f68a18282c47779e904610aa1abce4e3d | 58b7d7853b771a6936a744f25b8f1535bcbc2d88 | /map/projectLayers.py | 47e871f9047151631014a15cce230879c62af83f | [
"MIT"
] | permissive | avaldeon/mapqonverter | 6313eb7cef2e8cb796d78386f6d0dc576e9a4815 | cd0aa5f533194c85cf6e098fadc079ea61b63fce | refs/heads/master | 2023-08-17T14:58:30.640611 | 2021-10-12T11:12:14 | 2021-10-12T11:12:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,000 | py | # coding=utf-8
from _ctypes import COMError
from layer.layer import Layer as layerObj
import arcpy
import logging
from modules.arcGisModules import ArcGisModules
from modules.functions import change_interface
from map import brokenLayers
class ProjectLayers:
def __init__(self):
pass
@staticmethod
def create_project_layers_element(xml_document, header, layer_list, arc_object_map):
""" This creates a project layer object and its code in the dom
It does not return anything, it just fills the dom.
:param xml_document: the Document Object Model
:param header: the header in the dom
:param layer_list: the list of layers in the dataframe
:param arc_object_map: the ArcObject Map
"""
# Check if there are already projectlayers from another dataFrame
project_layers_elements = xml_document.getElementsByTagName('projectlayers')
if len(project_layers_elements) == 0:
project_layers_element = xml_document.createElement("projectlayers")
header.appendChild(project_layers_element)
arcpy.AddMessage(u'%1s.\t %1s %1s \t %1s' % ("Nr", "Name".center(50), "Status".center(33), "Typ"))
layer_path = ''
for index, layer in enumerate(layer_list):
if layer.isGroupLayer:
arcpy.AddMessage(u"{index}.\tLayer: {layer_name} ".format(
index=index + 1,
layer_name=layer.name.ljust(50)
))
ProjectLayers.__create_layer_converted_message(layer, index, 'Group', xml_document)
layer_path = ProjectLayers.__get_layer_path(layer, layer_path)
continue
else:
ProjectLayers.__create_layer_element(layer, layer_list, arc_object_map, xml_document, index, layer_path)
@staticmethod
def __create_layer_converted_message(layer, index, layer_object_type, xml_document, error_found=False):
""" Create a Message if succeeded or failed - and handles failure
:param layer: the layer its all about
:param index: its index in the layerlist
:param layer_object_type: the type of the layer
:param xml_document: the Document Object Model
:param error_found: indicate if error was found - default value is false
"""
if (layer_object_type == 'unknown') | error_found:
status = "could not be converted"
ProjectLayers.__handle_broken_layer(layer, xml_document)
else:
status = "successful converted"
arcpy.AddMessage(
u"{tabs} {status} \t - {type}-Layer \n".format(
tabs=11 * "\t",
status=status,
type=layer_object_type.title()
)
)
logging.info(
u"{index}.\tLayer: {layer_name} {status} \t - {type} \n".format(
index=index + 1,
layer_name=layer.name.ljust(50),
status=status,
type=layer_object_type
)
)
@staticmethod
def __create_layer_element(layer, layer_list, arc_object_map, xml_document, index, layer_path):
""" This function creates a layer_object and its content in the DOM
:param layer: the layer its all about
:param layer_list: the list of layers in the dataframe
:param arc_object_map: the ArcObject Map
:param xml_document: the Document Object Model
:param index: the index of the layer in the layerlist
:param layer_path: the layer_path of the layer
"""
arcpy.AddMessage(u"{index}.\tLayer: {layer_name} ".format(
index=index + 1,
layer_name=layer.name.ljust(50)
))
try:
arc_layer = ProjectLayers.__get_arc_objects_layer(layer, arc_object_map)
layer_path = ProjectLayers.__get_layer_path(layer, layer_path)
layer_object = layerObj(layer, arc_layer, xml_document, layer_list, layer_path)
layer_object_type = layer_object.get_layer_type()
print layer_object_type
base_layer_element = layer_object.create_base_layer()
xml_document.createElement(
layer_object.attach_layer_type(
layer_object_type,
base_layer_element
)
)
ProjectLayers.__create_layer_converted_message(layer, index, layer_object_type, xml_document)
except (KeyError, Exception):
ProjectLayers.__create_layer_converted_message(layer, index, 'unknown', xml_document, True)
@staticmethod
def __get_layer_path(layer, layer_path):
""" Returns the layer-path if possible, otherwise takes the given layer_path variable,
which comes from a parent-layer
For Example:
Annotation-Layers count as Group-Layers and provide the Datasource-Path for the child layer.
So the layer_path is a given variable and could have the information of the parent-layer.
:param layer: the layer its all about
:param layer_path: the path of the layer foregoing group-layer
:return: the layer_path of the layer
"""
if layer.supports("DATASOURCE"):
layer_path = layer.dataSource
return layer_path
@staticmethod
def __get_arc_objects_layer(layer, arc_object_map):
""" This returns the ArcObject-Layer-Object of the (ArcPy)Layer
:param layer: the layer its all about
:param arc_object_map: the ArcObject Map
:return: the ArcObject-Layer-Object
"""
longname_layer_list = layer.longName.split("\\")
if layer.isServiceLayer and not layer.isGroupLayer:
longname_layer_list = longname_layer_list[0:-1]
longname_layer_list_objects = []
# divide the longname in the previous grouplayers and find their position
# arcpy and arcobjects counting the layers different.
# arcobject orientate from the parent-layers, it does not count all the childlayers from a a layer
# arcpy just numerates every layer
# find first layer in the longname
for arc_index in range(arc_object_map.LayerCount):
if longname_layer_list[0] == arc_object_map.Layer[arc_index].Name:
try:
longname_layer_list_objects.append(arc_object_map.Layer[arc_index])
break
except COMError:
ProjectLayers.__throw_unsafed_changes_error()
# and the rest
for index, longname_layer in enumerate(longname_layer_list[1:]):
parent = change_interface(longname_layer_list_objects[index], ArcGisModules.module_carto.ICompositeLayer)
for i in range(parent.Count):
if longname_layer == parent.Layer[i].Name:
longname_layer_list_objects.append(parent.Layer[i])
break
return longname_layer_list_objects[-1]
@staticmethod
def __handle_broken_layer(layer, xml_document):
""" add a layer, which is not convertable, unknown, had errors to the broken_layers_list
and delete its maplayer element from the dom.
:param layer: the layer its all about
:param xml_document: the Document Object Model
:return:
"""
brokenLayers.BrokenLayers.broken_layer_list.append(layer)
map_layer_node = xml_document.getElementsByTagName('maplayer')[-1]
parent = map_layer_node.parentNode
parent.removeChild(map_layer_node)
@staticmethod
def __throw_unsafed_changes_error():
arcpy.AddError("###################################################")
arcpy.AddError("Alert! Are there unsafed changes in the project?")
arcpy.AddError("Save Changes and try exporting again.")
arcpy.AddError("###################################################")
| [
"[email protected]"
] | |
01ed1d720e0867de156ebab1d5ecccb4b44f3ef5 | 79e1d04867c4298b23c907f92c7119e4bea8ef02 | /allennlp/allennlp/tests/data/dataset_readers/reading_comprehension/util_test.py | d33ef08720b896078d1a625f8e91b7379a10d127 | [
"Apache-2.0"
] | permissive | ethanjperez/convince | 53db0bcd978831799c68fe63ecb0c91473ec40c4 | ccf60824b28f0ce8ceda44a7ce52a0d117669115 | refs/heads/master | 2023-01-08T09:12:16.722614 | 2021-11-03T18:50:30 | 2021-11-03T18:50:30 | 205,189,291 | 27 | 8 | Apache-2.0 | 2023-01-05T22:43:12 | 2019-08-29T15:03:34 | Python | UTF-8 | Python | false | false | 3,068 | py | # pylint: disable=no-self-use,invalid-name
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.dataset_readers.reading_comprehension import util
from allennlp.data.tokenizers import WordTokenizer
class TestReadingComprehensionUtil(AllenNlpTestCase):
def test_char_span_to_token_span_handles_easy_cases(self):
# These are _inclusive_ spans, on both sides.
tokenizer = WordTokenizer()
passage = "On January 7, 2012, Beyoncé gave birth to her first child, a daughter, Blue Ivy " +\
"Carter, at Lenox Hill Hospital in New York. Five months later, she performed for four " +\
"nights at Revel Atlantic City's Ovation Hall to celebrate the resort's opening, her " +\
"first performances since giving birth to Blue Ivy."
tokens = tokenizer.tokenize(passage)
offsets = [(t.idx, t.idx + len(t.text)) for t in tokens]
# "January 7, 2012"
token_span = util.char_span_to_token_span(offsets, (3, 18))[0]
assert token_span == (1, 4)
# "Lenox Hill Hospital"
token_span = util.char_span_to_token_span(offsets, (91, 110))[0]
assert token_span == (22, 24)
# "Lenox Hill Hospital in New York."
token_span = util.char_span_to_token_span(offsets, (91, 123))[0]
assert token_span == (22, 28)
def test_char_span_to_token_span_handles_hard_cases(self):
# An earlier version of the code had a hard time when the answer was the last token in the
# passage. This tests that case, on the instance that used to fail.
tokenizer = WordTokenizer()
passage = "Beyonc\u00e9 is believed to have first started a relationship with Jay Z " +\
"after a collaboration on \"'03 Bonnie & Clyde\", which appeared on his seventh " +\
"album The Blueprint 2: The Gift & The Curse (2002). Beyonc\u00e9 appeared as Jay " +\
"Z's girlfriend in the music video for the song, which would further fuel " +\
"speculation of their relationship. On April 4, 2008, Beyonc\u00e9 and Jay Z were " +\
"married without publicity. As of April 2014, the couple have sold a combined 300 " +\
"million records together. The couple are known for their private relationship, " +\
"although they have appeared to become more relaxed in recent years. Beyonc\u00e9 " +\
"suffered a miscarriage in 2010 or 2011, describing it as \"the saddest thing\" " +\
"she had ever endured. She returned to the studio and wrote music in order to cope " +\
"with the loss. In April 2011, Beyonc\u00e9 and Jay Z traveled to Paris in order " +\
"to shoot the album cover for her 4, and unexpectedly became pregnant in Paris."
start = 912
end = 912 + len("Paris.")
tokens = tokenizer.tokenize(passage)
offsets = [(t.idx, t.idx + len(t.text)) for t in tokens]
token_span = util.char_span_to_token_span(offsets, (start, end))[0]
assert token_span == (184, 185)
| [
"[email protected]"
] | |
9043f54961215acd4ba476c1d404a3f231dce67c | 8329485296b1e14c6e7976187285f6ed434d3c94 | /DNN-Experiments/CIFAR-10/utils.py | 7172f3a7c95ceafe1321843d57847d7e0a0bf312 | [] | no_license | jfc43/advex | 345d564062b483b6e6aa974c0c55809fe6acade6 | 61380b3053f2300ba97f510e7344312c6852644a | refs/heads/master | 2023-02-20T23:53:32.053949 | 2022-02-17T02:41:40 | 2022-02-17T02:41:40 | 240,986,952 | 5 | 1 | null | 2023-02-15T22:53:34 | 2020-02-17T00:07:40 | Jupyter Notebook | UTF-8 | Python | false | false | 7,568 | py | import numpy as np
import tensorflow as tf
import random
from skimage import feature, transform
import matplotlib.pyplot as plt
import scipy
import scipy.stats as stats
import cifar10_input
import os
import re
import sys
config_gpu = tf.ConfigProto()
config_gpu.gpu_options.allow_growth = True
EPSILON = 1e-12
MIN_INPUT = np.zeros([1,32,32,3]).astype(np.float32)
MAX_INPUT = np.ones([1,32,32,3]).astype(np.float32)
def gini(array):
# based on bottom eq:
# http://www.statsdirect.com/help/generatedimages/equations/equation154.svg
# from:
# http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
# All values are treated equally, arrays must be 1d:
array = np.array(array, dtype=np.float64)
array = np.abs(array)
# Values cannot be 0:
array += 0.0000001
# Values must be sorted:
array = np.sort(array)
# Index per array element:
index = np.arange(1, array.shape[0] + 1)
# Number of array elements:
n = array.shape[0]
# Gini coefficient:
return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array)))
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def plot(data, xi=None, cmap='RdBu_r', axis=plt, percentile=100, dilation=3.0, alpha=0.8):
dx, dy = 0.05, 0.05
xx = np.arange(0.0, data.shape[1], dx)
yy = np.arange(0.0, data.shape[0], dy)
xmin, xmax, ymin, ymax = np.amin(xx), np.amax(xx), np.amin(yy), np.amax(yy)
extent = xmin, xmax, ymin, ymax
cmap_xi = plt.get_cmap('Greys_r')
cmap_xi.set_bad(alpha=0)
overlay = None
if xi is not None:
# Compute edges (to overlay to heatmaps later)
xi_greyscale = xi if len(xi.shape) == 2 else np.mean(xi, axis=-1)
in_image_upscaled = transform.rescale(xi_greyscale, dilation, mode='constant')
edges = feature.canny(in_image_upscaled).astype(float)
edges[edges < 0.5] = np.nan
edges[:5, :] = np.nan
edges[-5:, :] = np.nan
edges[:, :5] = np.nan
edges[:, -5:] = np.nan
overlay = edges
abs_max = np.percentile(np.abs(data), percentile)
abs_min = abs_max
if len(data.shape) == 3:
data = np.mean(data, 2)
if xi is None:
axis.imshow(data, extent=extent, interpolation='none', cmap=cmap)
else:
axis.imshow(data, extent=extent, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max)
if overlay is not None:
axis.imshow(overlay, extent=extent, interpolation='none', cmap=cmap_xi, alpha=alpha)
axis.axis('off')
axis.xticks([])
axis.yticks([])
return axis
def dataReader():
cifar = cifar10_input.CIFAR10Data('cifar10_data')
X = cifar.eval_data.xs
y = cifar.eval_data.ys
return X, y.astype(int)
def run_model(sess, model, tensor, inputs):
if len(inputs.shape) == 3:
inputs = np.expand_dims(inputs, 0)
elif len(inputs.shape) == 4:
pass
else:
raise ValueError('Invalid input dimensions!')
return sess.run(tensor, feed_dict={model.input: inputs})
def get_session(number=None):
config_gpu = tf.ConfigProto()
config_gpu.gpu_options.allow_growth = True
return tf.Session(config=config_gpu)
def integrated_gradients(
sess,
baseline,
inp,
target_label_index,
model,
gradient_func='output_input_gradient',
steps=50):
"""Computes integrated gradients for a given network and prediction label.
Integrated gradients is a technique for attributing a deep network's
prediction to its input features. It was introduced by:
https://arxiv.org/abs/1703.01365
In addition to the integrated gradients tensor, the method also
returns some additional debugging information for sanity checking
the computation. See sanity_check_integrated_gradients for how this
information is used.
This method only applies to classification networks, i.e., networks
that predict a probability distribution across two or more class labels.
Access to the specific network is provided to the method via a
'predictions_and_gradients' function provided as argument to this method.
The function takes a batch of inputs and a label, and returns the
predicted probabilities of the label for the provided inputs, along with
gradients of the prediction with respect to the input. Such a function
should be easy to create in most deep learning frameworks.
Args:
inp: The specific input for which integrated gradients must be computed.
target_label_index: Index of the target class for which integrated gradients
must be computed.
predictions_and_gradients: This is a function that provides access to the
network's predictions and gradients. It takes the following
arguments:
- inputs: A batch of tensors of the same same shape as 'inp'. The first
dimension is the batch dimension, and rest of the dimensions coincide
with that of 'inp'.
- target_label_index: The index of the target class for which gradients
must be obtained.
and returns:
- predictions: Predicted probability distribution across all classes
for each input. It has shape <batch, num_classes> where 'batch' is the
number of inputs and num_classes is the number of classes for the model.
- gradients: Gradients of the prediction for the target class (denoted by
target_label_index) with respect to the inputs. It has the same shape
as 'inputs'.
baseline: [optional] The baseline input used in the integrated
gradients computation. If None (default), the all zero tensor with
the same shape as the input (i.e., 0*input) is used as the baseline.
The provided baseline and input must have the same shape.
steps: [optional] Number of intepolation steps between the baseline
and the input used in the integrated gradients computation. These
steps along determine the integral approximation error. By default,
steps is set to 50.
Returns:
integrated_gradients: The integrated_gradients of the prediction for the
provided prediction label to the input. It has the same shape as that of
the input.
The following output is meant to provide debug information for sanity
checking the integrated gradients computation.
See also: sanity_check_integrated_gradients
prediction_trend: The predicted probability distribution across all classes
for the various (scaled) inputs considered in computing integrated gradients.
It has shape <steps, num_classes> where 'steps' is the number of integrated
gradient steps and 'num_classes' is the number of target classes for the
model.
"""
if baseline is None:
baseline = 0*inp
assert(baseline.shape == inp.shape)
# Scale input and compute gradients.
scaled_inputs = [baseline + (float(i + 1)/steps)*(inp-baseline) for i in range(0, steps)]
scaled_labels = [target_label_index for i in range(0, steps)]
if gradient_func == 'loss_input_gradient':
grads = sess.run(model.loss_input_gradient, feed_dict = {model.input: scaled_inputs, model.label: scaled_labels}) # shapes: <steps+1, inp.shape>
else:
grads = sess.run(model.output_input_gradient, feed_dict = {model.input: scaled_inputs, model.label_ph: target_label_index})
avg_grads = np.average(grads[:-1], axis=0)
integrated_gradients = (inp-baseline)*avg_grads # shape: <inp.shape>
return integrated_gradients
| [
"[email protected]"
] | |
49b6177c144299690bb63229dccf5fa4185a5523 | c7aa2c42535d576a6a92d118f662ade3d40caa45 | /traceroute_gui.py | 31a15bdab6bf7a976f7bddf44d6288c1d170795a | [] | no_license | aheadlead/computer-network-exp | d114867b2d08834bd676da1d6bbf612e06d54ed2 | cfe6c682ed3895f168151a2176224e73a607361e | refs/heads/master | 2021-01-10T14:58:54.258586 | 2016-04-16T10:27:24 | 2016-04-16T10:27:24 | 54,172,144 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py | #!/usr/bin/env python3
# coding=utf-8
import os
import platform
import re
import subprocess
from tkinter import *
from tkinter.messagebox import *
from traceroute import traceroute
if platform.system() != 'Windows':
if os.geteuid() != 0:
print('root permission needed')
exit(1)
else:
print('STOP: This program is incompatible with Microsoft Windows.')
exit(1)
ttl = 1
root = Tk()
root.geometry('{}x{}'.format(470, 400))
dest_ip_label = Label(root, text='ip address')
dest_ip_label.grid(row=0, column=0, sticky='W')
dest_ip_entry = Entry(root)
dest_ip_entry.grid(row=0, column=1)
result_text = Text(root, width=65)
result_text.grid(row=1, columnspan=4)
next_svar = StringVar(value='traceroute')
def go():
global ttl
ip_pattern = r'(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'
if re.match(ip_pattern, dest_ip_entry.get()) is None:
showerror(title='error', message='invaild ip address')
else:
r = traceroute(dest_ip_entry.get(), ttl)
if ttl == 1:
result_text.delete(1.0, END)
result_text.insert(END, 'ttl\taddress\t\ttype\tcode\t1\t2\t3\n')
result_text.insert(END, r)
if '\t3\t3' in r:
ttl = 1
next_svar.set('traceroute')
showinfo(title='done', message='traceroute finished')
else:
ttl += 1
next_svar.set('next')
next_button = Button(root, textvariable=next_svar, command=go)
next_button.grid(row=0, column=2)
root.mainloop()
| [
"[email protected]"
] | |
0dd49036943e741a2fbdee98d1a95cb710fc91f8 | a7924ed009c5caba90ce53095114b2e02d569beb | /gae/initializations.py | e74222f4f6f3c46f1cb1a0168d42afde7922e3fb | [
"MIT"
] | permissive | yebiro/gae | e46eb4072e827cbdba179c2a436a5a1ccd9828f1 | c64c5f7a3350951916fba4e6c9f7a8ceb6eef3f1 | refs/heads/master | 2020-04-28T03:37:10.136220 | 2019-03-14T09:00:05 | 2019-03-14T09:00:05 | 163,237,432 | 0 | 0 | MIT | 2019-02-23T04:35:13 | 2018-12-27T02:28:44 | Python | UTF-8 | Python | false | false | 607 | py | import tensorflow as tf
import numpy as np
def weight_variable_glorot(input_dim, output_dim, name=""):
"""Create a weight variable with Glorot & Bengio (AISTATS 2010)
initialization.
"""
init_range = np.sqrt(6.0 / (input_dim + output_dim))
#返回input_dim*output_dim的矩阵,产生于minval和maxval之间,产生的值是均匀分布的。
initial = tf.random_uniform([input_dim, output_dim], minval=-init_range,
maxval=init_range, dtype=tf.float32)
#赋值“name”的值为initial
return tf.Variable(initial, name=name)
| [
"[email protected]"
] | |
5b52bfc91a584dbd79fd12493cc8e6987fbd0e29 | 78ea078e94589179b0d3f20ece63ac39964b54f4 | /ggplot/scales/scale_identity.py | c57d07cd68b92074b40bdd1777c1a19cd8f0d0aa | [
"BSD-2-Clause"
] | permissive | sanjaydatasciencedojo/ggpy | 5f02aae3f68ef96eb413b429e193021baba2d393 | fbc0d8b45c3dfbd47d9373c2147803931e4d1c20 | refs/heads/master | 2021-05-21T02:37:14.200013 | 2020-05-24T14:22:31 | 2020-05-24T14:22:31 | 252,505,074 | 0 | 0 | BSD-2-Clause | 2020-04-02T16:13:25 | 2020-04-02T16:13:25 | null | UTF-8 | Python | false | false | 1,712 | py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from copy import deepcopy
class scale_identity(object):
"""
Use the value that you've passed for an aesthetic in the plot without mapping
it to something else. Classic example is if you have a data frame with a column
that's like this:
mycolor
0 blue
1 red
2 green
3 blue
4 red
5 blue
And you want the actual points you plot to show up as blue, red, or green. Under
normal circumstances, ggplot will generate a palette for these colors because it
thinks they are just normal categorical variables. Using scale_identity will make
it so ggplot uses the values of the field as the aesthetic mapping, so the points
will show up as the colors you want.
"""
VALID_SCALES = ["identity_type"]
def __radd__(self, gg):
gg = deepcopy(gg)
gg.scale_identity.add(self.identity_type)
return gg
class scale_alpha_identity(scale_identity):
"""
See scale_identity
"""
identity_type = "alpha"
class scale_color_identity(scale_identity):
"""
See scale_identity
"""
identity_type = "color"
class scale_fill_identity(scale_identity):
"""
See scale_identity
"""
identity_type = "fill"
class scale_linetype_identity(scale_identity):
"""
See scale_identity
"""
identity_type = "linetype"
class scale_shape_identity(scale_identity):
"""
See scale_identity
"""
identity_type = "shape"
class scale_size_identity(scale_identity):
"""
See scale_identity
"""
identity_type = "size"
| [
"[email protected]"
] | |
c9a908bec43ce33eb5b94f3b28986723611ef5fe | 06ef163d15fab6dba2c687e66cd33f479f6e0b86 | /notifications/migrations/0003_auto_20160914_1101.py | 1f668d42b107b753f0fa5bc0a915bead298cdabd | [
"MIT"
] | permissive | datamade/django-councilmatic-notifications | e070fb2d6f5517ce7dadc5d4ee72c41345769f61 | 0aca66b9e87de40f43861460ef18fedcbaa0b986 | refs/heads/master | 2021-01-13T13:23:06.496375 | 2019-04-30T15:23:49 | 2019-04-30T15:23:49 | 72,446,217 | 1 | 0 | MIT | 2019-07-31T16:42:58 | 2016-10-31T14:47:30 | Python | UTF-8 | Python | false | false | 2,447 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-14 16:01
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('notifications', '0002_auto_20160527_1624'),
]
operations = [
migrations.CreateModel(
name='EventsSubscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_datetime_updated', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(db_column='user_id', on_delete=django.db.models.deletion.CASCADE, related_name='eventssubscriptions', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AlterField(
model_name='billactionsubscription',
name='user',
field=models.ForeignKey(db_column='user_id', on_delete=django.db.models.deletion.CASCADE, related_name='billactionsubscriptions', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='billsearchsubscription',
name='user',
field=models.ForeignKey(db_column='user_id', on_delete=django.db.models.deletion.CASCADE, related_name='billsearchsubscriptions', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='committeeactionsubscription',
name='user',
field=models.ForeignKey(db_column='user_id', on_delete=django.db.models.deletion.CASCADE, related_name='committeeactionsubscriptions', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='committeeeventsubscription',
name='user',
field=models.ForeignKey(db_column='user_id', on_delete=django.db.models.deletion.CASCADE, related_name='committeeeventsubscriptions', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='personsubscription',
name='user',
field=models.ForeignKey(db_column='user_id', on_delete=django.db.models.deletion.CASCADE, related_name='personsubscriptions', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
f78cc07c6d13734cd0d3165674411f2e057b1611 | 22a2be95a2d8f44d0345435a59b69a80386745c2 | /TextPreprocessing/clean_rtf.py | 346c1e98757980ebc6f59b81a6083318018003ab | [
"MIT"
] | permissive | shams-sam/logic-lab | 7c8533c5198c9f0f8eef96c01a08f92db816f781 | 559990b0c3d44bfe59d32dcb8038a0cab3efc26e | refs/heads/master | 2020-04-12T07:21:26.218644 | 2019-09-27T04:55:20 | 2019-09-27T04:55:20 | 63,617,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,834 | py | # reference
# https://gist.github.com/gilsondev/7c1d2d753ddb522e7bc22511cfb08676
import re
def clean_rtf(text):
pattern = re.compile(r"\\([a-z]{1,32})(-?\d{1,10})?[ ]?|\\'([0-9a-f]{2})|\\([^a-z])|([{}])|[\r\n]+|(.)", re.I)
# control words which specify a "destionation".
destinations = frozenset((
'aftncn','aftnsep','aftnsepc','annotation','atnauthor','atndate','atnicn','atnid',
'atnparent','atnref','atntime','atrfend','atrfstart','author','background',
'bkmkend','bkmkstart','blipuid','buptim','category','colorschememapping',
'colortbl','comment','company','creatim','datafield','datastore','defchp','defpap',
'do','doccomm','docvar','dptxbxtext','ebcend','ebcstart','factoidname','falt',
'fchars','ffdeftext','ffentrymcr','ffexitmcr','ffformat','ffhelptext','ffl',
'ffname','ffstattext','field','file','filetbl','fldinst','fldrslt','fldtype',
'fname','fontemb','fontfile','fonttbl','footer','footerf','footerl','footerr',
'footnote','formfield','ftncn','ftnsep','ftnsepc','g','generator','gridtbl',
'header','headerf','headerl','headerr','hl','hlfr','hlinkbase','hlloc','hlsrc',
'hsv','htmltag','info','keycode','keywords','latentstyles','lchars','levelnumbers',
'leveltext','lfolevel','linkval','list','listlevel','listname','listoverride',
'listoverridetable','listpicture','liststylename','listtable','listtext',
'lsdlockedexcept','macc','maccPr','mailmerge','maln','malnScr','manager','margPr',
'mbar','mbarPr','mbaseJc','mbegChr','mborderBox','mborderBoxPr','mbox','mboxPr',
'mchr','mcount','mctrlPr','md','mdeg','mdegHide','mden','mdiff','mdPr','me',
'mendChr','meqArr','meqArrPr','mf','mfName','mfPr','mfunc','mfuncPr','mgroupChr',
'mgroupChrPr','mgrow','mhideBot','mhideLeft','mhideRight','mhideTop','mhtmltag',
'mlim','mlimloc','mlimlow','mlimlowPr','mlimupp','mlimuppPr','mm','mmaddfieldname',
'mmath','mmathPict','mmathPr','mmaxdist','mmc','mmcJc','mmconnectstr',
'mmconnectstrdata','mmcPr','mmcs','mmdatasource','mmheadersource','mmmailsubject',
'mmodso','mmodsofilter','mmodsofldmpdata','mmodsomappedname','mmodsoname',
'mmodsorecipdata','mmodsosort','mmodsosrc','mmodsotable','mmodsoudl',
'mmodsoudldata','mmodsouniquetag','mmPr','mmquery','mmr','mnary','mnaryPr',
'mnoBreak','mnum','mobjDist','moMath','moMathPara','moMathParaPr','mopEmu',
'mphant','mphantPr','mplcHide','mpos','mr','mrad','mradPr','mrPr','msepChr',
'mshow','mshp','msPre','msPrePr','msSub','msSubPr','msSubSup','msSubSupPr','msSup',
'msSupPr','mstrikeBLTR','mstrikeH','mstrikeTLBR','mstrikeV','msub','msubHide',
'msup','msupHide','mtransp','mtype','mvertJc','mvfmf','mvfml','mvtof','mvtol',
'mzeroAsc','mzeroDesc','mzeroWid','nesttableprops','nextfile','nonesttables',
'objalias','objclass','objdata','object','objname','objsect','objtime','oldcprops',
'oldpprops','oldsprops','oldtprops','oleclsid','operator','panose','password',
'passwordhash','pgp','pgptbl','picprop','pict','pn','pnseclvl','pntext','pntxta',
'pntxtb','printim','private','propname','protend','protstart','protusertbl','pxe',
'result','revtbl','revtim','rsidtbl','rxe','shp','shpgrp','shpinst',
'shppict','shprslt','shptxt','sn','sp','staticval','stylesheet','subject','sv',
'svb','tc','template','themedata','title','txe','ud','upr','userprops',
'wgrffmtfilter','windowcaption','writereservation','writereservhash','xe','xform',
'xmlattrname','xmlattrvalue','xmlclose','xmlname','xmlnstbl',
'xmlopen',
))
# Translation of some special characters.
specialchars = {
'par': '\n',
'sect': '\n\n',
'page': '\n\n',
'line': '\n',
'tab': '\t',
'emdash': '\u2014',
'endash': '\u2013',
'emspace': '\u2003',
'enspace': '\u2002',
'qmspace': '\u2005',
'bullet': '\u2022',
'lquote': '\u2018',
'rquote': '\u2019',
'ldblquote': '\201C',
'rdblquote': '\u201D',
}
stack = []
ignorable = False # Whether this group (and all inside it) are "ignorable".
ucskip = 1 # Number of ASCII characters to skip after a unicode character.
curskip = 0 # Number of ASCII characters left to skip
out = [] # Output buffer.
for match in pattern.finditer(text):
word,arg,hex,char,brace,tchar = match.groups()
if brace:
curskip = 0
if brace == '{':
# Push state
stack.append((ucskip,ignorable))
elif brace == '}':
# Pop state
ucskip,ignorable = stack.pop()
elif char: # \x (not a letter)
curskip = 0
if char == '~':
if not ignorable:
out.append('\xA0')
elif char in '{}\\':
if not ignorable:
out.append(char)
elif char == '*':
ignorable = True
elif word: # \foo
curskip = 0
if word in destinations:
ignorable = True
elif ignorable:
pass
elif word in specialchars:
out.append(specialchars[word])
elif word == 'uc':
ucskip = int(arg)
elif word == 'u':
c = int(arg)
if c < 0: c += 0x10000
if c > 127: out.append(chr(c)) #NOQA
else: out.append(chr(c))
curskip = ucskip
elif hex: # \'xx
if curskip > 0:
curskip -= 1
elif not ignorable:
c = int(hex,16)
if c > 127: out.append(chr(c)) #NOQA
else: out.append(chr(c))
elif tchar:
if curskip > 0:
curskip -= 1
elif not ignorable:
out.append(tchar)
return ''.join(out) | [
"[email protected]"
] | |
9edfdd5e557366a726000932aa0b0dd60096780a | 51086953db896af9558655a160b13ee2056c9fde | /flaskr/blueprints/schedule.py | deef11c53daaa3f23a1a52965f5922f792f6835a | [] | no_license | gilberto96/backend-string-test | 725652d00f84029f63d3d48ddc58442c48ba8121 | 3267b9333d0c9b0e11959df50f3b966d9a11cf50 | refs/heads/main | 2023-05-02T10:35:44.021637 | 2021-05-24T21:00:51 | 2021-05-24T21:00:51 | 370,176,275 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,308 | py | import functools
from ..models.task import Task
from ..models.user import User
from ..models.responses import ApiResponse
from flask_jwt_extended import jwt_required, get_jwt_identity, verify_jwt_in_request
from ..utils import valid_datetime, str_to_date
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for, jsonify
)
bp = Blueprint('schedule', __name__, url_prefix='/schedule')
def configure(app):
@bp.route('/tasks', methods=['POST'])
@jwt_required()
def register():
json = request.get_json(force = True)
if json.get('title') is None or len(json["title"]) == 0:
return jsonify(ApiResponse(0, False, 'Should specify a title for the task').toJson()), 400
if json.get('start_date') is None or len(json["start_date"]) == 0:
return jsonify(ApiResponse(0, False, 'Should specify a start date for the task').toJson()), 400
if json.get('due_date') is None or len(json["due_date"]) == 0:
return jsonify(ApiResponse(0, False, 'Should specify a due date for the task').toJson()), 400
if not valid_datetime(json["start_date"]):
return jsonify(ApiResponse(0, False, 'Start date incorrect format, must be "Y-m-d H:M:S"').toJson()), 400
if not valid_datetime(json["due_date"]):
return jsonify(ApiResponse(0, False, 'Due date incorrect format, must be "Y-m-d H:M:S"').toJson()), 400
if(str_to_date(json["start_date"]) > str_to_date(json["due_date"])):
return jsonify(ApiResponse(0, False, 'Start date must be greater than due date').toJson()), 400
if not(0 <= int(json["priority"]) < 10):
return jsonify(ApiResponse(0, False, 'Task priority must be between 0 to 10').toJson()), 400
task = Task(
title = json['title'],
description = json["description"],
start_date = json["start_date"],
due_date = json["due_date"],
priority = json["priority"],
assignee_user_id = get_jwt_identity()["id"]
)
task = task.save()
return jsonify(ApiResponse(task.id, True, "Task created").toJson())
@bp.route('/tasks/<id>', methods=['GET'])
@jwt_required()
def get(id):
task = Task.get_single(id, get_jwt_identity()["id"])
if task is None:
return jsonify(ApiResponse(0, False, 'Task not found').toJson()), 404
else:
return jsonify(task.to_dict())
@bp.route('/tasks', methods=['GET'])
@jwt_required()
def list():
user_id = get_jwt_identity()["id"]
user = User.get_by_id(user_id)
if len(user.tasks) == 0:
return jsonify([])
else:
return jsonify([ task.to_dict() for task in user.tasks ])
@bp.route('/tasks', methods=['PUT'])
@jwt_required()
def update():
json = request.get_json(force=True)
if json.get('id') is None:
return jsonify(ApiResponse(0, False, "Must provide an task id").toJson()), 400
task = Task.get_single(json["id"], get_jwt_identity()["id"])
if task is None:
return jsonify(ApiResponse(0, False, "The task does not exist").toJson()), 404
# user = User.get_by_id(json["assignee_user_id"])
# if user is None:
# return jsonify(ApiResponse(None, False, 'Assignee user not found').toJson()), 404
task.title = json['title']
task.description = json['description']
task.start_date = json['start_date']
task.due_date = json['due_date']
task.priority = json['priority']
task = task.save()
return jsonify(ApiResponse(task.id, True, "Task updated").toJson())
@bp.route('/tasks/<id>', methods=['DELETE'])
@jwt_required()
def delete(id):
if not(id.isdigit()):
return jsonify(ApiResponse(0, False, "Must provide an valid task id").toJson()), 400
task = Task.get_single(id, get_jwt_identity()["id"])
if task is None:
return jsonify(ApiResponse(0, False, "The task does not exist").toJson()), 404
return jsonify(ApiResponse(task.id, task.delete(), "Task removed").toJson())
app.register_blueprint(bp) | [
"[email protected]"
] | |
a75e53fd862abfcf88319f37ed95b87e48409e47 | 375ad47152e6b99120a91783f1a8c2cee36040b3 | /functions_practice/summer_of_69.py | ad362bf51bce24baf088aeb45aa6841f415ba8e8 | [] | no_license | arundhiman86/Python | c2620373cdf3944a3851888db80a851526c1a522 | a86d4c4c3e99e9197200b057742ac060f3143524 | refs/heads/master | 2020-05-31T13:51:58.881111 | 2019-06-05T03:17:03 | 2019-06-05T03:17:03 | 190,315,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | '''
SUMMER OF '69: Return the sum of the numbers in the array, except ignore sections of numbers starting with a 6 and extending to the next 9 (every 6 will be followed by at least one 9). Return 0 for no numbers.
summer_69([1, 3, 5]) --> 9
summer_69([4, 5, 6, 7, 8, 9]) --> 9
summer_69([2, 1, 6, 9, 11]) --> 14
'''
def summer_69(arr):
total = 0
add = True
for num in arr:
while add:
if num != 6:
total += num
break
else:
add = False
while not add:
if num != 9:
break
else:
add = True
break
return total
| [
"[email protected]"
] | |
cf155c651d505d6fc6420dc0626f1c4ab2536fc9 | 5f7119aee868e29a483cc0ac057c429daf73c0e6 | /backend/settings.py | 20674a1b087c687a3b34706dc7ed278a94f2ec6d | [] | no_license | sksharma-123/deploy | 1f0040824f73b855349e558e56914d3f368ce965 | 9bb86d45265101eb55a03c256e8666ae066bedd7 | refs/heads/master | 2023-08-16T01:51:45.721099 | 2020-05-25T00:31:24 | 2020-05-25T00:31:24 | 266,531,584 | 0 | 0 | null | 2021-09-22T19:04:55 | 2020-05-24T11:59:29 | JavaScript | UTF-8 | Python | false | false | 3,257 | py | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l+)cbn^(@zunuk(**pgfx$n)5m89!$^u%0=7cd=c$ie^ego55@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'decoder',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'build/static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') | [
"[email protected]"
] | |
04ac895d948f08d461a4a5e743c4a28362d56b19 | fc2f1d637fd2e051fb11fd8d0dfa5dc1930f94e9 | /Adversarial Robustness of Deep Convolutional Candlestick Learner/foolbox/adversarial.py | 399a19e4dd8567d3701aea001bd9e4ef19ac06cc | [] | no_license | albert0796/FinancialVision | e38cb3a738e7f5aca6cd2e315b3f93c8a0e2d8af | aa48a3b791c584f80938db8f04bbf6fa8daf984c | refs/heads/master | 2022-11-21T04:18:30.289159 | 2022-08-02T16:43:55 | 2022-08-02T16:43:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,079 | py | """
Provides a class that represents an adversarial example.
"""
import numpy as np
import numbers
from .distances import Distance
from .distances import MSE
class StopAttack(Exception):
"""Exception thrown to request early stopping of an attack
if a given (optional!) threshold is reached."""
pass
class Adversarial(object):
"""Defines an adversarial that should be found and stores the result.
The :class:`Adversarial` class represents a single adversarial example
for a given model, criterion and reference input. It can be passed to
an adversarial attack to find the actual adversarial perturbation.
Parameters
----------
model : a :class:`Model` instance
The model that should be fooled by the adversarial.
criterion : a :class:`Criterion` instance
The criterion that determines which inputs are adversarial.
unperturbed : a :class:`numpy.ndarray`
The unperturbed input to which the adversarial input should be as close as possible.
original_class : int
The ground-truth label of the unperturbed input.
distance : a :class:`Distance` class
The measure used to quantify how close inputs are.
threshold : float or :class:`Distance`
If not None, the attack will stop as soon as the adversarial
perturbation has a size smaller than this threshold. Can be
an instance of the :class:`Distance` class passed to the distance
argument, or a float assumed to have the same unit as the
the given distance. If None, the attack will simply minimize
the distance as good as possible. Note that the threshold only
influences early stopping of the attack; the returned adversarial
does not necessarily have smaller perturbation size than this
threshold; the `reached_threshold()` method can be used to check
if the threshold has been reached.
"""
def __init__(
self,
model,
criterion,
unperturbed,
original_class,
distance=MSE,
threshold=None,
verbose=False,
):
self.__model = model
self.__criterion = criterion
self.__unperturbed = unperturbed
self.__unperturbed_for_distance = unperturbed
self.__original_class = original_class
self.__distance = distance
if threshold is not None and not isinstance(threshold, Distance):
threshold = distance(value=threshold)
self.__threshold = threshold
self.verbose = verbose
self.__best_adversarial = None
self.__best_distance = distance(value=np.inf)
self.__best_adversarial_output = None
self._total_prediction_calls = 0
self._total_gradient_calls = 0
self._best_prediction_calls = 0
self._best_gradient_calls = 0
# check if the original input is already adversarial
self._check_unperturbed()
def _check_unperturbed(self):
try:
self.forward_one(self.__unperturbed)
except StopAttack:
# if a threshold is specified and the unperturbed input is
# misclassified, this can already cause a StopAttack
# exception
assert self.distance.value == 0.0
def _reset(self):
self.__best_adversarial = None
self.__best_distance = self.__distance(value=np.inf)
self.__best_adversarial_output = None
self._best_prediction_calls = 0
self._best_gradient_calls = 0
self._check_unperturbed()
@property
def perturbed(self):
"""The best adversarial example found so far."""
return self.__best_adversarial
@property
def output(self):
"""The model predictions for the best adversarial found so far.
None if no adversarial has been found.
"""
return self.__best_adversarial_output
@property
def adversarial_class(self):
"""The argmax of the model predictions for the best adversarial found so far.
None if no adversarial has been found.
"""
if self.output is None:
return None
return np.argmax(self.output)
@property
def distance(self):
"""The distance of the adversarial input to the original input."""
return self.__best_distance
@property
def unperturbed(self):
"""The original input."""
return self.__unperturbed
@property
def original_class(self):
"""The class of the original input (ground-truth, not model prediction)."""
return self.__original_class
@property
def _model(self): # pragma: no cover
"""Should not be used."""
return self.__model
@property
def _criterion(self): # pragma: no cover
"""Should not be used."""
return self.__criterion
@property
def _distance(self): # pragma: no cover
"""Should not be used."""
return self.__distance
def set_distance_dtype(self, dtype):
assert dtype >= self.__unperturbed.dtype
self.__unperturbed_for_distance = self.__unperturbed.astype(dtype, copy=False)
def reset_distance_dtype(self):
self.__unperturbed_for_distance = self.__unperturbed
def normalized_distance(self, x):
"""Calculates the distance of a given input x to the original input.
Parameters
----------
x : `numpy.ndarray`
The input x that should be compared to the original input.
Returns
-------
:class:`Distance`
The distance between the given input and the original input.
"""
return self.__distance(self.__unperturbed_for_distance, x, bounds=self.bounds())
def reached_threshold(self):
"""Returns True if a threshold is given and the currently
best adversarial distance is smaller than the threshold."""
return self.__threshold is not None and self.__best_distance <= self.__threshold
def __new_adversarial(self, x, predictions, in_bounds):
x = x.copy() # to prevent accidental inplace changes
distance = self.normalized_distance(x)
if in_bounds and self.__best_distance > distance:
# new best adversarial
if self.verbose:
print("new best adversarial: {}".format(distance))
self.__best_adversarial = x
self.__best_distance = distance
self.__best_adversarial_output = predictions
self._best_prediction_calls = self._total_prediction_calls
self._best_gradient_calls = self._total_gradient_calls
if self.reached_threshold():
raise StopAttack
return True, distance
return False, distance
def __is_adversarial(self, x, predictions, in_bounds):
"""Interface to criterion.is_adverarial that calls
__new_adversarial if necessary.
Parameters
----------
x : :class:`numpy.ndarray`
The input that should be checked.
predictions : :class:`numpy.ndarray`
A vector with the pre-softmax predictions for some input x.
label : int
The label of the unperturbed reference input.
"""
is_adversarial = self.__criterion.is_adversarial(
predictions, self.__original_class
)
assert isinstance(is_adversarial, bool) or isinstance(is_adversarial, np.bool_)
if is_adversarial:
is_best, distance = self.__new_adversarial(x, predictions, in_bounds)
else:
is_best = False
distance = None
return is_adversarial, is_best, distance
@property
def target_class(self):
"""Interface to criterion.target_class for attacks.
"""
try:
target_class = self.__criterion.target_class()
except AttributeError:
target_class = None
return target_class
def num_classes(self):
n = self.__model.num_classes()
assert isinstance(n, numbers.Number)
return n
def bounds(self):
min_, max_ = self.__model.bounds()
assert isinstance(min_, numbers.Number)
assert isinstance(max_, numbers.Number)
assert min_ < max_
return min_, max_
def in_bounds(self, input_):
min_, max_ = self.bounds()
return min_ <= input_.min() and input_.max() <= max_
def channel_axis(self, batch):
"""Interface to model.channel_axis for attacks.
Parameters
----------
batch : bool
Controls whether the index of the axis for a batch of inputs
(4 dimensions) or a single input (3 dimensions) should be returned.
"""
axis = self.__model.channel_axis()
if not batch:
axis = axis - 1
return axis
def has_gradient(self):
"""Returns true if _backward and _forward_backward can be called
by an attack, False otherwise.
"""
try:
self.__model.gradient
self.__model.gradient_one
self.__model.forward_and_gradient_one
self.__model.backward
self.__model.backward_one
except AttributeError:
return False
else:
return True
def forward_one(self, x, strict=True, return_details=False):
"""Interface to model.forward_one for attacks.
Parameters
----------
x : `numpy.ndarray`
Single input with shape as expected by the model
(without the batch dimension).
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
in_bounds = self.in_bounds(x)
assert not strict or in_bounds
self._total_prediction_calls += 1
predictions = self.__model.forward_one(x)
is_adversarial, is_best, distance = self.__is_adversarial(
x, predictions, in_bounds
)
assert predictions.ndim == 1
if return_details:
return predictions, is_adversarial, is_best, distance
else:
return predictions, is_adversarial
def forward(self, inputs, greedy=False, strict=True, return_details=False):
"""Interface to model.forward for attacks.
Parameters
----------
inputs : `numpy.ndarray`
Batch of inputs with shape as expected by the model.
greedy : bool
Whether the first adversarial should be returned.
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
if strict:
in_bounds = self.in_bounds(inputs)
assert in_bounds
self._total_prediction_calls += len(inputs)
predictions = self.__model.forward(inputs)
assert predictions.ndim == 2
assert predictions.shape[0] == inputs.shape[0]
if return_details:
assert greedy
adversarials = []
for i in range(len(predictions)):
if strict:
in_bounds_i = True
else:
in_bounds_i = self.in_bounds(inputs[i])
is_adversarial, is_best, distance = self.__is_adversarial(
inputs[i], predictions[i], in_bounds_i
)
if is_adversarial and greedy:
if return_details:
return predictions, is_adversarial, i, is_best, distance
else:
return predictions, is_adversarial, i
adversarials.append(is_adversarial)
if greedy: # pragma: no cover
# no adversarial found
if return_details:
return predictions, False, None, False, None
else:
return predictions, False, None
is_adversarial = np.array(adversarials)
assert is_adversarial.ndim == 1
assert is_adversarial.shape[0] == inputs.shape[0]
return predictions, is_adversarial
def gradient_one(self, x=None, label=None, strict=True):
"""Interface to model.gradient_one for attacks.
Parameters
----------
x : `numpy.ndarray`
Single input with shape as expected by the model
(without the batch dimension).
Defaults to the original input.
label : int
Label used to calculate the loss that is differentiated.
Defaults to the original label.
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
assert self.has_gradient()
if x is None:
x = self.__unperturbed
if label is None:
label = self.__original_class
assert not strict or self.in_bounds(x)
self._total_gradient_calls += 1
gradient = self.__model.gradient_one(x, label)
assert gradient.shape == x.shape
return gradient
def forward_and_gradient_one(
self, x=None, label=None, strict=True, return_details=False
):
"""Interface to model.forward_and_gradient_one for attacks.
Parameters
----------
x : `numpy.ndarray`
Single input with shape as expected by the model
(without the batch dimension).
Defaults to the original input.
label : int
Label used to calculate the loss that is differentiated.
Defaults to the original label.
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
assert self.has_gradient()
if x is None:
x = self.__unperturbed
if label is None:
label = self.__original_class
in_bounds = self.in_bounds(x)
assert not strict or in_bounds
self._total_prediction_calls += 1
self._total_gradient_calls += 1
predictions, gradient = self.__model.forward_and_gradient_one(x, label)
is_adversarial, is_best, distance = self.__is_adversarial(
x, predictions, in_bounds
)
assert predictions.ndim == 1
assert gradient.shape == x.shape
if return_details:
return predictions, gradient, is_adversarial, is_best, distance
else:
return predictions, gradient, is_adversarial
def forward_and_gradient(self, x, label=None, strict=True, return_details=False):
"""Interface to model.forward_and_gradient_one for attacks.
Parameters
----------
x : `numpy.ndarray`
Multiple input with shape as expected by the model
(with the batch dimension).
label : `numpy.ndarray`
Labels used to calculate the loss that is differentiated.
Defaults to the original label.
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
assert self.has_gradient()
if label is None:
label = np.ones(len(x), dtype=np.int) * self.__original_class
in_bounds = self.in_bounds(x)
assert not strict or in_bounds
self._total_prediction_calls += len(x)
self._total_gradient_calls += len(x)
predictions, gradients = self.__model.forward_and_gradient(x, label)
assert predictions.ndim == 2
assert gradients.shape == x.shape
is_adversarials, is_bests, distances = [], [], []
for single_x, prediction in zip(x, predictions):
is_adversarial, is_best, distance = self.__is_adversarial(
single_x, prediction, in_bounds
)
is_adversarials.append(is_adversarial)
is_bests.append(is_best)
distances.append(distance)
is_adversarials = np.array(is_adversarials)
is_bests = np.array(is_bests)
distances = np.array(distances)
if return_details:
return predictions, gradients, is_adversarials, is_bests, distances
else:
return predictions, gradients, is_adversarials
def backward_one(self, gradient, x=None, strict=True):
"""Interface to model.backward_one for attacks.
Parameters
----------
gradient : `numpy.ndarray`
Gradient of some loss w.r.t. the logits.
x : `numpy.ndarray`
Single input with shape as expected by the model
(without the batch dimension).
Returns
-------
gradient : `numpy.ndarray`
The gradient w.r.t the input.
See Also
--------
:meth:`gradient`
"""
assert self.has_gradient()
assert gradient.ndim == 1
if x is None:
x = self.__unperturbed
assert not strict or self.in_bounds(x)
self._total_gradient_calls += 1
gradient = self.__model.backward_one(gradient, x)
assert gradient.shape == x.shape
return gradient
| [
"[email protected]"
] | |
3b7bfee98ced3160b68b493d09c18113650dca3a | 46d0c6800ce7885e986856f9a46b1085a714ba52 | /1_assert.py | f2704f725d34fa90da47e32d87f6464a7778bc6b | [] | no_license | radleap/PythonTricksAndPractice | 256cdf11b8f538ca1e38ff72f2a8bd51bc9311d0 | f94cf35360628274aa306ce96aadca0836ee9b44 | refs/heads/main | 2023-07-14T19:42:39.783694 | 2021-08-24T01:48:01 | 2021-08-24T01:48:01 | 399,300,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py |
# assertions to find irrecoverable errors in a program, not to signal file not found
# meant to be internal selfchecks, by declaring some conditions are impossible in the code.
# not a mechanism for handling runtime errors.
# asserts can be globally disabled with an interpreter setting - beware
def apply_discount(product, discount):
price = int(product['price'] * (1.0 - discount))
assert 0 <= price <= product['price']
return price
# this should work
shoes = {'name':'Fancy Shoes', 'price':14900}
print(apply_discount(shoes, 0.25))
# raises an AssertionError b/c violates condition
print(apply_discount(shoes, 2.0)) | [
"[email protected]"
] | |
28a77e8e92f6675ff62f3a5e8cd554731297e41b | 7b33e10c09b17ed5d660dd347d6bbe6dd4805c43 | /posts/admin.py | 9e76ef529acce1bf51b69d3e925e281b544e3241 | [] | no_license | Sipho123/umnotho | 9e57f64d209d7e099aea5a2dc00ee733cfcd437e | 66ec81a1f7197f3137619179be153fa2e915e374 | refs/heads/master | 2023-04-30T04:02:28.566226 | 2021-05-24T11:55:21 | 2021-05-24T11:55:21 | 369,207,877 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | from django.contrib import admin
from .models import Post, Comment, Like
# Register your models here.
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(Like)
| [
"[email protected]"
] | |
42370ef4fa32c6ca643bb2ca8379cea6720204da | db4989445d079b55ffafb34af2358f27559193c3 | /Test/TestAboutBox1.py | a69c5829ac79257e417cd935358ae546bfbba9e6 | [] | no_license | TonyKing63/Python-Samples | 4e56e1f6b8b8fbb13828c43644e36743e0953fec | b827e82fc7a6e2984c49fae759b53db90e2f0c80 | refs/heads/master | 2021-09-07T10:08:59.002967 | 2018-02-21T12:31:07 | 2018-02-21T12:31:07 | 119,788,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,139 | py | #!/usr/bin/env python
import wx
import wx.adv
from wx.lib.wordwrap import wordwrap
class MyApp(wx.App):
def OnInit(self):
self.frame = wx.Frame(None, wx.ID_ANY, title='My Title')
self.panel = wx.Panel(self.frame, wx.ID_ANY)
# copy the code for the AboutBox
#----------------------------------------------------------------------
# class TestPanel(wx.Panel):
# def __init__(self, parent, log):
# self.log = log
# wx.Panel.__init__(self, parent, -1)
# change the button's parent to refer to my panel
# b = wx.Button(self, -1, "Show a wx.AboutBox", (50,50))
b = wx.Button(self.panel, -1, "Show a wx.AboutBox", (50,50))
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
self.frame.Show()
return True
def OnButton(self, evt):
# First we create and fill the info object
info = wx.adv.AboutDialogInfo()
info.Name = "Hello World"
info.Version = "1.2.3"
info.Copyright = "(c) 2013-2017 Programmers and Coders Everywhere"
info.Description = wordwrap(
"A \"hello world\" program is a software program that prints out "
"\"Hello world!\" on a display device. It is used in many introductory "
"tutorials for teaching a programming language."
"\n\nSuch a program is typically one of the simplest programs possible "
"in a computer language. A \"hello world\" program can be a useful "
"sanity test to make sure that a language's compiler, development "
"environment, and run-time environment are correctly installed.",
# change the wx.ClientDC to use self.panel instead of self
350, wx.ClientDC(self.panel))
info.WebSite = ("http://en.wikipedia.org/wiki/Hello_world", "Hello World home page")
info.Developers = [ "Joe Programmer",
"Jane Coder",
"Vippy the Mascot" ]
# change the wx.ClientDC to use self.panel instead of self
info.License = wordwrap(licenseText, 500, wx.ClientDC(self.panel))
# Then we call wx.AboutBox giving it that info object
wx.adv.AboutBox(info)
#----------------------------------------------------------------------
# def runTest(frame, nb, log):
# win = TestPanel(nb, log)
# return win
#----------------------------------------------------------------------
overview = """<html><body>
<h2><center>wx.AboutBox</center></h2>
This function shows the native standard about dialog containing the
information specified in info. If the current platform has a native
about dialog which is capable of showing all the fields in info, the
native dialog is used, otherwise the function falls back to the
generic wxWidgets version of the dialog.
</body></html>
"""
licenseText = "blah " * 250 + "\n\n" +"yadda " * 100
# if __name__ == '__main__':
# import sys,os
# import run
# run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
if __name__ == '__main__':
app = MyApp()
app.MainLoop() | [
"[email protected]"
] | |
99446ab75ad240d2ac48b94dddfabc90376335bb | 4ed5876ea8e14eba3ed04e752807c950b3a4752f | /Python_Managing_Data_And_Processes_Reading_Command_Line_Arguments_and_Exit_Status.py | 253f8703f37a19e97e0f3d9943ed0d44189e9e89 | [] | no_license | ChiranthakaJ/Google-Using-Python-to-Interact-with-the-Operating-System | 0b557ef6ebc99ea59bd84ae19ac9393fcd0eb5c9 | 40ee2efc730c1bbe68d36c82fb9d50ef31fc8871 | refs/heads/master | 2023-01-05T13:08:35.214360 | 2020-11-01T10:39:15 | 2020-11-01T10:39:15 | 299,016,153 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,553 | py | #Up to now. We've seen how different programs can read from and write to standard IO streams and how the show environment can influence execution of a program. Yet another common way of providing information to our programs is through command line arguments.
#These are parameters that are passed to a program when it started. It's a super common practice to make our scripts receive certain values by command line arguments. It allows a code of the script to be generic while also letting us run it automatically without requiring any interactive user input.
#This means that these arguments are really useful for system administration tasks. That's because we can specify the information that we want our program to use before it even starts. This lets us create more and more automation and you can't argue with that.
#Although you could argue my humor isn't really that humorous. What can I say I'm a better coder than I am a comedian. Anyway, we can access these values using the argv in the sys module. Let's check this out by executing a very simple script that just prints this value. First, let's use our friendly cat command to look at what the script does.
import sys
print(sys.argv)
'''
#Empty
'''
#As you can see, our script just imports the sys module and prints the sys.argv list. Now, let's see what happens when we call the program.
#In this case, we called the script without any parameters. The list contains one single element. The name of the program that we just executed. Let's try passing a few parameters.
#Now, we see that each of the parameters that we pass is included as a separate element in the list and last up we have the concept of exit status or return code, which provides another source of information between the shell and the programs that get executed inside of it.
#The exit status is a value returned by a program to the shell. In all Unix-like operating systems, the exit status of the process is zero when the process succeeds and different than zero if it fails.
#The actual number returned gives additional info on what kind of error the program encountered. Knowing if a command finish successfully or not is helpful information which can be used by a program that's calling a command.
#For example, it can use the information to retry the command. If it failed. To check the exit status of a program, we can use a special variable that lets us see what the exit status of the last executed command was.
#The variable is the question mark variable. So to see the contents we use dollar sign question mark. Let's try this out using the WC command, which counts the number of lines words and characters in a file. First, we'll pass it our variables up Py Script and check the exit value.
#So here we first ran the WC command and it printed the values of lines, words and characters for our Python script. Then we printed the contents dollar sign question mark variable, and we can see that the exit value was zero. That's because WC ran successfully.
#Here WC couldn't run for the file that we pass because it doesn't exist. The command printed an error and when printing the contents of the dollar sign question mark variable, we see that it finished with an exit value of one.
#So that's with system commands, but what about Python scripts? When a Python script finishes successfully, it exits with an exit value of zero. When it finishes with an error like type error or value error, it exits with a different value than zero. We can make it exit with whatever value is relevant. Let's check out an example of this.
import os
import sys
filename=sys.argv[1]
if not os.path.exists(filename):
with open(filename, "w") as f:
f.write("New file created\n")
else:
print("Error, the file {} already exists!".format(filename))
sys.exit(1)
#This script receives a file name as a command line argument. It first checks whether the file name exist or not. When the file doesn't exist, it creates it by writing a line to it.
#When the file exist, our script print an error message and exits with an exit value of one. To try this out let's first execute the script and pass a file that doesn't exist.
#Nice looks like that was successful. Check out how it exited with the exit code zero even though we didn't specify this in the code. That's because that's the default behavior. Let's look at the contents of the file to make sure it's got what it should. Okay and what do you think will happen if we now run the command again?
#You guessed it. We get an error because the file already exists and so we get an exit code of one. So we've now seen how we can pass command line arguments to our Python programs and how we can make our programs tell us whether they've finished successfully or not.
#These are both important tools that we'll use when creating automation. We'll use command line parameters to tell our programs what we want them to do without having to interact with them and we'll use exit values to know if our command succeeded or failed and then log failures and automatically retry the commands if we need to.
#Well, we've definitely learned a lot over these last few videos. Chances are it got a little tricky at some points but you're doing an awesome job not letting these complex concepts stop you. Since you made it this far, you're bound to master all the ways that we can make our code interact with our shell environment. As always, take your time to review and then head on over to the quiz to put your new knowledge to practice. | [
"[email protected]"
] | |
c33fb715afdbae144856b8334e626e548903d2ee | 331fca39c99354bb96f9f07a2309c59c34a9fb15 | /lib/python2.6/site-packages/formencode/rewritingparser.py | d568b4df9105b777a554187c0a737f5a211c2113 | [] | no_license | rudyvallejos/GestionItems | d368a940a63cae9a2e5845cdf50db6b232aa9871 | 1eb56b582f0539c883a4914ad48291941b3c6c38 | refs/heads/master | 2016-09-06T08:37:46.582041 | 2011-07-09T18:39:02 | 2011-07-09T18:39:02 | 1,662,813 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,017 | py | import HTMLParser
import re
import cgi
from htmlentitydefs import name2codepoint
def html_quote(v):
if v is None:
return ''
elif hasattr(v, '__html__'):
return v.__html__()
elif isinstance(v, basestring):
return cgi.escape(v, 1)
else:
if hasattr(v, '__unicode__'):
v = unicode(v)
else:
v = str(v)
return cgi.escape(v, 1)
class RewritingParser(HTMLParser.HTMLParser):
listener = None
skip_next = False
def __init__(self):
self._content = []
HTMLParser.HTMLParser.__init__(self)
def feed(self, data):
self.data_is_str = isinstance(data, str)
self.source = data
self.lines = data.split('\n')
self.source_pos = 1, 0
if self.listener:
self.listener.reset()
HTMLParser.HTMLParser.feed(self, data)
_entityref_re = re.compile('&([a-zA-Z][-.a-zA-Z\d]*);')
_charref_re = re.compile('&#(\d+|[xX][a-fA-F\d]+);')
def unescape(self, s):
s = self._entityref_re.sub(self._sub_entityref, s)
s = self._charref_re.sub(self._sub_charref, s)
return s
def _sub_entityref(self, match):
name = match.group(1)
if name not in name2codepoint:
# If we don't recognize it, pass it through as though it
# wasn't an entity ref at all
return match.group(0)
return unichr(name2codepoint[name])
def _sub_charref(self, match):
num = match.group(1)
if num.lower().startswith('x'):
num = int(num[1:], 16)
else:
num = int(num)
return unichr(num)
def handle_misc(self, whatever):
self.write_pos()
handle_charref = handle_misc
handle_entityref = handle_misc
handle_data = handle_misc
handle_comment = handle_misc
handle_decl = handle_misc
handle_pi = handle_misc
unknown_decl = handle_misc
handle_endtag = handle_misc
def write_tag(self, tag, attrs, startend=False):
attr_text = ''.join([' %s="%s"' % (n, html_quote(v))
for (n, v) in attrs
if not n.startswith('form:')])
if startend:
attr_text += " /"
self.write_text('<%s%s>' % (tag, attr_text))
def skip_output(self):
return False
def write_pos(self):
cur_line, cur_offset = self.getpos()
if self.skip_output():
self.source_pos = self.getpos()
return
if self.skip_next:
self.skip_next = False
self.source_pos = self.getpos()
return
if cur_line == self.source_pos[0]:
self.write_text(
self.lines[cur_line-1][self.source_pos[1]:cur_offset])
else:
self.write_text(
self.lines[self.source_pos[0]-1][self.source_pos[1]:])
self.write_text('\n')
for i in range(self.source_pos[0]+1, cur_line):
self.write_text(self.lines[i-1])
self.write_text('\n')
self.write_text(self.lines[cur_line-1][:cur_offset])
self.source_pos = self.getpos()
def write_text(self, text):
self._content.append(text)
def get_attr(self, attr, name, default=None):
for n, value in attr:
if n.lower() == name:
return value
return default
def set_attr(self, attr, name, value):
for i in range(len(attr)):
if attr[i][0].lower() == name:
attr[i] = (name, value)
return
attr.append((name, value))
def del_attr(self, attr, name):
for i in range(len(attr)):
if attr[i][0].lower() == name:
del attr[i]
break
def add_class(self, attr, class_name):
current = self.get_attr(attr, 'class', '')
new = current + ' ' + class_name
self.set_attr(attr, 'class', new.strip())
def text(self):
try:
return self._text
except AttributeError:
raise Exception(
"You must .close() a parser instance before getting "
"the text from it")
def _get_text(self):
try:
return ''.join([
t for t in self._content if not isinstance(t, tuple)])
except UnicodeDecodeError, e:
if self.data_is_str:
e.reason += (
" the form was passed in as an encoded string, but "
"some data or error messages were unicode strings; "
"the form should be passed in as a unicode string")
else:
e.reason += (
" the form was passed in as an unicode string, but "
"some data or error message was an encoded string; "
"the data and error messages should be passed in as "
"unicode strings")
raise
| [
"[email protected]"
] | |
67aa46473b39a388f276b427bc2e2b11105e32e1 | 7e34568308219ec28d68c48d828574d127ed9d3e | /Naive Bayes/Naive Bayes.py | 3455416797723805a50fc07a43667b29255882c8 | [] | no_license | WeebMogul/Senku-Sentiment-Analyzer-2.0 | d833fee3959ae793e16d17eb2ba37932f8b4c34b | d650103e94e2d89dcca689bd279828ad84f31323 | refs/heads/master | 2022-12-05T20:33:53.993585 | 2020-08-27T10:36:51 | 2020-08-27T10:36:51 | 230,595,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,107 | py | from sklearn.neighbors import KNeighborsClassifier
import nltk
import numpy as np
from nltk.corpus import stopwords
from sklearn.utils import resample,shuffle
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split, cross_validate,cross_val_score
from sklearn.metrics import make_scorer, precision_score, recall_score,f1_score, accuracy_score,confusion_matrix
import pandas as pd
import string
import numpy as np
from sklearn.naive_bayes import MultinomialNB,BernoulliNB,GaussianNB
from sklearn.pipeline import Pipeline
from imblearn.over_sampling import ADASYN, SMOTE, RandomOverSampler
import matplotlib.pyplot as pypl
stopword = set(stopwords.words('english'))
stopword.update(('know','really','say','way','thing','need','look','want','actually','use', 'think', 'would',
'use','muda','dr','make','go','get','it','even','also','already','much','could','that','one','though','still'))
exclude = set(string.punctuation)
lemma = WordNetLemmatizer()
contract = {
"ain't": "is not",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'd've": "I would have",
"I'll": "I will",
"I'll've": "I will have",
"I'm": "I am",
"I've": "I have",
"i'd": "i would",
"i'd've": "i would have",
"i'll": "i will",
"i'll've": "i will have",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it would",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so as",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"
}
# boogiepop_file = 'D:\Github Projects\Heriot-Watt-Msc-Project-Sentiment-Analysis\Data Cleaner\Boogiepop cleaned\Cleaned_Boogiepop_Wa_Waranai_Episode_' + str(ep) + '_Comment_list.csv'
# kaguya_file = 'D:\Github Projects\Heriot-Watt-Msc-Project-Sentiment-Analysis\Data Cleaner\Kaguya-sama cleaned\Cleaned_Kaguya_sama_Episode_' + str(1) + '_Comment_list.csv'
# slime_file = 'D:\Github Projects\Heriot-Watt-Msc-Project-Sentiment-Analysis\Data Cleaner\Tensei Slime cleaned\Cleaned_Tensei_Slime_Episode_' + str(ep) + '_Comment_list.csv'
def penntag(pen):
morphy_tag = {'NN': 'n', 'JJ': 'a',
'VB': 'v', 'RB': 'r'}
try:
return morphy_tag[pen[:2]]
except:
return 'n'
def comment_cleaner(comm, comment_array):
temp_comm = []
megos = ' '
uncontracted = ' '.join([contract[word] if word in contract else word for word in comm.lower().split()])
stopwords_removed = [word for word in uncontracted.lower().split() if word not in stopword]
POS_words = nltk.pos_tag(stopwords_removed)
for i in range(0, len(POS_words)):
lemmas = lemma.lemmatize(POS_words[i][0], pos=penntag(POS_words[i][1]))
temp_comm.append(lemmas)
megos = ' '.join(word for word in temp_comm)
return megos
for ep in range(1, 2):
df1 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 1 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df2 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 2 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df3 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 3 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df4 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 4 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df5 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 5 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df6 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 6 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df7 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 7 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df8 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 8 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df9 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 9 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df10 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 10 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df11 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 11 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df12 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 12 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df13 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 13 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df14 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 14 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df15 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 15 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df16 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 16 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df17 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 17 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df18 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 18 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df19 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 19 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df20 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 20 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df21 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 21 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df22 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 22 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df23 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 23 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df24 = pd.read_csv(
'D:\Python\Senku Sentiment Analyzer 2.0\Manually determined sentences\Dr. Stone\Dr.Stone Episode 24 Comment list with Sentiment rating.csv',
index_col=0, encoding='utf-8-sig')
df12 = pd.concat(
[df1, df2, df3, df4, df5, df6, df7, df8, df9, df10, df11, df12,df13,df14,df15,df16,df17,df18,df19,df20,df21,df22,df23,df24
])
train_array = []
test_array = []
train_target = []
comtest_array = []
# df = df.sample(frac=1)
# Convert dataframe values into string
df12 = df12[['Comment', 'Sentiment Rating']]
df12['Comment'] = df12['Comment'].astype(str)
df12['Length'] = df12['Comment'].apply(len)
#df12 = df12[df12['Length'] > 5]
df12['Comment'] = df12['Comment'].apply(lambda s: comment_cleaner(s, train_array))
# Remove punctuation marks and tokenize each and every word
df12['Comment'] = df12['Comment'].str.replace('[^\w\s]', ' ')
df12['Comment'] = df12['Comment'].str.replace('[\d+]', ' ')
df12['Comment'] = df12['Comment'].str.replace('(^| ).(( ).)*( |$)', ' ')
# Split into positive and negative datasets
pos_df = df12[df12['Sentiment Rating'] == 1]
neg_df = df12[df12['Sentiment Rating'] == 0]
neu_df = df12[df12['Sentiment Rating'] == 2]
# neu_df['Comment'] = neu_df['Comment'].
df_len = len(pos_df)
train_df = pd.concat([pos_df, neg_df,neu_df])
# train_df = pd.concat([pos_df, neg_df,neu_df])
train_df = train_df.reset_index(drop=True)
x = train_df['Comment'].values
y = train_df['Sentiment Rating'].values
x_train, x_test, y_train, y_test = train_test_split(x, y,
test_size=0.2, random_state=22)
vec = TfidfVectorizer(ngram_range=(1, 2),min_df=0.01,max_df=0.8,analyzer='word')
# vec = CountVectorizer(ngram_range=(1, 2))
x_tr = vec.fit_transform(x_train)
x_ts = vec.transform(x_test)
sm = RandomOverSampler(random_state=22)
X_train_res, y_train_res = sm.fit_sample(x_tr, y_train)
#Naive Bayes test
scores = {'Accuracy': make_scorer(accuracy_score),
'Precision': make_scorer(precision_score),
'Recall': make_scorer(recall_score),
'F1-Score': make_scorer(f1_score)
}
NB = MultinomialNB()
#NB = BernoulliNB()
#NB = GaussianNB()
NB.fit(X_train_res,y_train_res)
ex = NB.predict(X_train_res)
cross = cross_val_score(NB,X_train_res,y_train_res,cv=10)
print(round(cross.mean(),2))
print(round(cross.std(),2))
pred_linear = NB.predict(x_ts)
print(accuracy_score(ex,y_train_res))
print(accuracy_score(y_test, pred_linear))
#print(precision_score(y_test, pred_linear,average='None'))
#print(recall_score(y_test, pred_linear))
print(confusion_matrix(y_test,pred_linear))
| [
"[email protected]"
] | |
12d6b616ec022aacd24c3af885ee2f58e9666678 | ea7a0c67718f66490da2a60f9d27e2b8adc4cd3f | /settings/staging.py | 0bbcebd763087b9351e8ca77cd97a280a37aac45 | [
"BSD-3-Clause"
] | permissive | aiaudit-org/health-aiaudit-public | 7f752d521c12830f3d57fc996d1a636ab4ad66a4 | d410f4dfe85f6fd73d73c88184675a8bb4af233a | refs/heads/master | 2023-05-28T06:24:48.852998 | 2021-06-14T11:42:07 | 2021-06-14T11:42:07 | 376,797,718 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | from .prod import * # noqa: ignore=F405
ALLOWED_HOSTS = ["health.aiaudit.org"]
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (
"https://staging-evalai.s3.amazonaws.com",
"https://staging.health.aiaudit.org",
"http://beta-staging.health.aiaudit.org:9999",
)
| [
"[email protected]"
] | |
a4a9690a3a642c4f303beb8df79c6bfb6f0a7f44 | 0911058fde531ec5b8513b6b96d436e6d2cc7819 | /goslaunchera3/show.py | c23971d62b6411a62ad4cfbecccf5242fc61195d | [] | no_license | ChristopheTdn/GOSLauncher_A3_Python_version | 6649cbd79f87d30c58285b980202e842748ea906 | 4ed1360edc961e654a2a1e1779567c4dcfd4bf80 | refs/heads/master | 2020-04-05T09:35:30.517491 | 2018-04-15T09:15:46 | 2018-04-15T09:15:46 | 81,652,408 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,167 | py | #! /usr/bin/python3
#-*- coding: utf-8 -*-
#
# Module Interface SHOW
import os
import sys
import inspect
from PyQt5 import QtCore, QtGui, QtWidgets
from . import action
def init_app_start(self):
# Gestion Profil
""" Affiche Liste Mods."""
# Mods @GOS
#genere_tab_ui(self)
#genere size Mods
action.rsyncTaille(self)
def genere_tab_ui(self):
genereTabTemplate(self) # Specifique @TEMPLATE GOS
genereTab(self, self.listWidget_Framework, self.var_Arma3Path+"/@GOS/@FRAMEWORK/")
genereTab(self, self.listWidget_Islands, self.var_Arma3Path+"/@GOS/@ISLANDS/")
genereTab(self, self.listWidget_Units, self.var_Arma3Path+"/@GOS/@UNITS/")
genereTab(self, self.listWidget_Materiel, self.var_Arma3Path+"/@GOS/@MATERIEL/")
genereTab(self, self.listWidget_Client, self.var_Arma3Path+"/@GOS/@CLIENT/")
genereTab(self, self.listWidget_Test, self.var_Arma3Path+"/@GOS/@TEST/")
genereTab(self, self.listWidget_Interclan, self.var_Arma3Path+"/@GOS/@INTERCLAN/")
# Mods @Arma3
genereTab(self, self.listWidget_Arma3, self.var_Arma3Path+"/")
# Mods @WorkShop
genereTab(self, self.listWidget_Workshop, self.var_Arma3Path+"/!Workshop/")
def itemCheckState(self, mods):
if len(self.listWidget_priority.findItems(mods, QtCore.Qt.MatchExactly)) > 0:
return QtCore.Qt.Checked
else:
return QtCore.Qt.Unchecked
def genereTabTemplate(self):
listeWidget = self.listWidget_Template
listeWidget.clear()
repertoire = self.var_Arma3Path+"/@GOS/@TEMPLATE/"
self.comboBox_ChoixApparence.addItem("")
for mods in genereListMods(self, repertoire):
if mods.find("@GOSSkin_") == -1:
item = QtWidgets.QListWidgetItem()
item.setCheckState(itemCheckState(self, "@GOS/@TEMPLATE/"+mods))
item.setText(mods)
listeWidget.addItem(item)
else:
self.comboBox_ChoixApparence.addItem(mods.replace("@GOSSkin_", "").replace("_", " "))
def genereTab(self, listeWidget, repertoire):
listeMods = genereListMods(self, repertoire)
listeWidget.clear()
for mods in listeMods:
item = QtWidgets.QListWidgetItem()
item.setCheckState(itemCheckState(self, (repertoire+mods).replace(self.var_Arma3Path+"/", "")))
item.setText(mods)
listeWidget.addItem(item)
def genereTabPriority(listeWidget, listeMods):
if listeMods != []:
listeWidget.clear()
for mods in listeMods:
item = QtWidgets.QListWidgetItem()
item.setText(mods)
listeWidget.addItem(item)
def genereListMods(self, repertoire):
listeMods = []
for root, dirs, files in os.walk(repertoire):
for i in files:
SearchedDir = root.replace(repertoire, "")
if (((SearchedDir[0: (len(SearchedDir)-7)]) not in listeMods) and (SearchedDir.lower().endswith("addons"))):
#test @GOS
if ((root.find("@GOS") != -1) and (repertoire != self.var_Arma3Path+"/")):
listeMods.append(SearchedDir[0: (len(SearchedDir)-7)])
#test #Arma
if ((root.find("@GOS") == -1) and
(repertoire == self.var_Arma3Path+"/") and
("!Workshop" not in root) and
(SearchedDir.lower() != "addons")):
listeMods.append(SearchedDir[0: (len(SearchedDir)-7)])
#test #Workshop
if ((root.find("@GOS") == -1) and
("/!Workshop/" in repertoire) and
(SearchedDir.lower() != "addons")):
listeMods.append(SearchedDir[0: (len(SearchedDir)-7)])
return listeMods
def get_version_number(filename):
try:
from win32api import GetFileVersionInfo, LOWORD, HIWORD
info = GetFileVersionInfo(filename, "\\")
ms = info['FileVersionMS']
ls = info['FileVersionLS']
return HIWORD(ms), LOWORD(ms), HIWORD(ls), LOWORD(ls)
except:
return 0, 0, 0, 0
def LogoGosSkin(self, name):
self.label_GFX_Template.setPixmap(QtGui.QPixmap(":/gfx/camo_image/"+name.replace(" ", "_")+".jpg"))
def CleanInterface(self):
for name, obj in inspect.getmembers(self.Ui):
if isinstance(obj, QtWidgets.QCheckBox) and obj.objectName() not in self.excludeWidgetList:
obj.setChecked(False)
if isinstance(obj, QtWidgets.QListWidget) and obj.objectName() not in self.excludeWidgetList:
obj.clear()
if isinstance(obj, QtWidgets.QComboBox) and obj.objectName() not in self.excludeWidgetList:
obj.clear()
genere_tab_ui(self.Ui)
################################################################
if __name__ == "__main__":
# execute when run directly, but not when called as a module.
# therefore this section allows for testing this module!
#print "running directly, not as a module!"
sys.exit()
| [
"[email protected]"
] | |
b876964433707b9b30ea18c60f771e09750eda3b | 168556624401cd884fe0bfdac5a312adf51286a1 | /CS1430/homework5_cnns-Enmin/code/models.py | 06d01cc04f6c2169f576474746853af9239ed3e8 | [] | no_license | Enmin/Coursework | f39dc7b54a07b901491fbd809187fd54e96fa5a4 | a17d216c37e70a8073602389924af10019cfe7de | refs/heads/master | 2021-12-28T02:53:15.949547 | 2021-12-21T22:45:19 | 2021-12-21T22:45:19 | 179,909,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,371 | py | """
Project 4 - CNNs
CS1430 - Computer Vision
Brown University
"""
import tensorflow as tf
from tensorflow.keras.layers import \
Conv2D, MaxPool2D, Dropout, Flatten, Dense, BatchNormalization
from tensorflow.python.keras.layers.core import Activation
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling2D, MaxPooling2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import sparse_categorical_crossentropy
import hyperparameters as hp
class YourModel(tf.keras.Model):
""" Your own neural network model. """
def __init__(self):
super(YourModel, self).__init__()
# TODO: Select an optimizer for your network (see the documentation
# for tf.keras.optimizers)
self.optimizer = Adam(learning_rate=hp.learning_rate)
# TODO: Build your own convolutional neural network, using Dropout at
# least once. The input image will be passed through each Keras
# layer in self.architecture sequentially. Refer to the imports
# to see what Keras layers you can use to build your network.
# Feel free to import other layers, but the layers already
# imported are enough for this assignment.
#
# Remember: Your network must have under 15 million parameters!
# You will see a model summary when you run the program that
# displays the total number of parameters of your network.
#
# Remember: Because this is a 15-scene classification task,
# the output dimension of the network must be 15. That is,
# passing a tensor of shape [batch_size, img_size, img_size, 1]
# into the network will produce an output of shape
# [batch_size, 15].
#
# Note: Keras layers such as Conv2D and Dense give you the
# option of defining an activation function for the layer.
# For example, if you wanted ReLU activation on a Conv2D
# layer, you'd simply pass the string 'relu' to the
# activation parameter when instantiating the layer.
# While the choice of what activation functions you use
# is up to you, the final layer must use the softmax
# activation function so that the output of your network
# is a probability distribution.
#
# Note: Flatten is a very useful layer. You shouldn't have to
# explicitly reshape any tensors anywhere in your network.
self.architecture = [Conv2D(32, 3, 1, activation="relu", padding="same"),
MaxPool2D(2, padding="same"),
Conv2D(64, 3, 1, activation="relu", padding="same"),
MaxPool2D(2, padding="same"),
Conv2D(128, 3, 1, activation="relu", padding="same"),
MaxPool2D(2, padding="same"),
Conv2D(256, 3, 1, activation="relu", padding="same"),
MaxPool2D(2, padding="same"),
Conv2D(512, 3, 1, activation="relu", padding="same"),
MaxPool2D(2, padding="same"),
Flatten(),
Dense(128, activation="relu"),
Dropout(0.5),
Dense(64, activation="relu"),
Dense(hp.num_classes, activation="softmax")]
def call(self, x):
""" Passes input image through the network. """
for layer in self.architecture:
x = layer(x)
return x
@staticmethod
def loss_fn(labels, predictions):
""" Loss function for the model. """
# TODO: Select a loss function for your network (see the documentation
# for tf.keras.losses)
return sparse_categorical_crossentropy(labels, predictions)
pass
class VGGModel(tf.keras.Model):
def __init__(self):
super(VGGModel, self).__init__()
# TODO: Select an optimizer for your network (see the documentation
# for tf.keras.optimizers)
self.optimizer = Adam(learning_rate=hp.learning_rate)
# Don't change the below:
self.vgg16 = [
# Block 1
Conv2D(64, 3, 1, padding="same",
activation="relu", name="block1_conv1"),
Conv2D(64, 3, 1, padding="same",
activation="relu", name="block1_conv2"),
MaxPool2D(2, name="block1_pool"),
# Block 2
Conv2D(128, 3, 1, padding="same",
activation="relu", name="block2_conv1"),
Conv2D(128, 3, 1, padding="same",
activation="relu", name="block2_conv2"),
MaxPool2D(2, name="block2_pool"),
# Block 3
Conv2D(256, 3, 1, padding="same",
activation="relu", name="block3_conv1"),
Conv2D(256, 3, 1, padding="same",
activation="relu", name="block3_conv2"),
Conv2D(256, 3, 1, padding="same",
activation="relu", name="block3_conv3"),
MaxPool2D(2, name="block3_pool"),
# Block 4
Conv2D(512, 3, 1, padding="same",
activation="relu", name="block4_conv1"),
Conv2D(512, 3, 1, padding="same",
activation="relu", name="block4_conv2"),
Conv2D(512, 3, 1, padding="same",
activation="relu", name="block4_conv3"),
MaxPool2D(2, name="block4_pool"),
# Block 5
Conv2D(512, 3, 1, padding="same",
activation="relu", name="block5_conv1"),
Conv2D(512, 3, 1, padding="same",
activation="relu", name="block5_conv2"),
Conv2D(512, 3, 1, padding="same",
activation="relu", name="block5_conv3"),
MaxPool2D(2, name="block5_pool")
]
# TODO: Make all layers in self.vgg16 non-trainable. This will freeze the
# pretrained VGG16 weights into place so that only the classificaiton
# head is trained.
for layer in self.vgg16:
layer.trainable = False
# TODO: Write a classification head for our 15-scene classification task.
self.head = [Flatten(),
Dense(512, activation='relu'),
Dropout(0.5),
Dense(256, activation='relu'),
Dense(hp.num_classes, activation="softmax")]
# Don't change the below:
self.vgg16 = tf.keras.Sequential(self.vgg16, name="vgg_base")
self.head = tf.keras.Sequential(self.head, name="vgg_head")
def call(self, x):
""" Passes the image through the network. """
x = self.vgg16(x)
x = self.head(x)
return x
@staticmethod
def loss_fn(labels, predictions):
""" Loss function for model. """
# TODO: Select a loss function for your network (see the documentation
# for tf.keras.losses)
return sparse_categorical_crossentropy(labels, predictions)
pass
| [
"[email protected]"
] | |
f2ee0eca6e47c612025fda41b4d6a1af01d4ce64 | ff7c238dcd7c46ae5415de2ffb78a34213af9079 | /plugin.video.ardundzdf/resources/lib/zdfmobile.py | 9403defdc66b6f4039f314b098f476b9674e6a8a | [
"MIT"
] | permissive | Arkangel74/norre_kodi | e747ff2d24f5afd496f348ab23b3953a371f5a07 | b9173b1234f5620296f2d864bedda35a2fa566ac | refs/heads/master | 2022-12-02T03:23:12.533855 | 2020-08-08T12:23:57 | 2020-08-08T12:23:57 | 279,249,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,005 | py | # -*- coding: utf-8 -*-
################################################################################
# zdfmobile.py - - Teil von Kodi-Addon-ARDundZDF
# mobile Version der ZDF Mediathek
#
# dieses Modul nutzt nicht die Webseiten der Mediathek ab https://www.zdf.de/,
# sondern die Seiten ab https://zdf-cdn.live.cellular.de/mediathekV2 - diese
# Seiten werden im json-Format ausgeliefert
# 22.11.2019 Migration Python3 Modul six + manuelle Anpassungen
################################################################################
#
# Stand: 01.05.2020
# Python3-Kompatibilität:
from __future__ import absolute_import # sucht erst top-level statt im akt. Verz.
from __future__ import division # // -> int, / -> float
from __future__ import print_function # PYTHON2-Statement -> Funktion
from kodi_six import xbmc, xbmcaddon, xbmcplugin, xbmcgui, xbmcvfs
# o. Auswirkung auf die unicode-Strings in PYTHON3:
from kodi_six.utils import py2_encode, py2_decode
import os, sys, subprocess
PYTHON2 = sys.version_info.major == 2
PYTHON3 = sys.version_info.major == 3
if PYTHON2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, urlretrieve
from urllib2 import Request, urlopen, URLError
from urlparse import urljoin, urlparse, urlunparse, urlsplit, parse_qs
elif PYTHON3:
from urllib.parse import quote, unquote, quote_plus, unquote_plus, urlencode, urljoin, urlparse, urlunparse, urlsplit, parse_qs
from urllib.request import Request, urlopen, urlretrieve
from urllib.error import URLError
# Python
import ssl
import datetime, time
import re, json # u.a. Reguläre Ausdrücke
# import ardundzdf # -> ZDF_get_content - nicht genutzt
from resources.lib.util import *
# Globals
ADDON_ID = 'plugin.video.ardundzdf'
SETTINGS = xbmcaddon.Addon(id=ADDON_ID)
ADDON_NAME = SETTINGS.getAddonInfo('name')
SETTINGS_LOC = SETTINGS.getAddonInfo('profile')
ADDON_PATH = SETTINGS.getAddonInfo('path') # Basis-Pfad Addon
ADDON_VERSION = SETTINGS.getAddonInfo('version')
PLUGIN_URL = sys.argv[0] # plugin://plugin.video.ardundzdf/
HANDLE = int(sys.argv[1])
DEBUG = SETTINGS.getSetting('pref_info_debug')
FANART = xbmc.translatePath('special://home/addons/' + ADDON_ID + '/fanart.jpg')
ICON = xbmc.translatePath('special://home/addons/' + ADDON_ID + '/icon.png')
USERDATA = xbmc.translatePath("special://userdata")
ADDON_DATA = os.path.join("%sardundzdf_data") % USERDATA
if check_AddonXml('"xbmc.python" version="3.0.0"'):
ADDON_DATA = os.path.join("%s", "%s", "%s") % (USERDATA, "addon_data", ADDON_ID)
DICTSTORE = os.path.join("%s/Dict") % ADDON_DATA # hier nur DICTSTORE genutzt
ICON = 'icon.png' # ARD + ZDF
ICON_MAIN_ZDFMOBILE = 'zdf-mobile.png'
ICON_DIR_FOLDER = "Dir-folder.png"
ICON_SPEAKER = "icon-speaker.png"
imgWidth = 840 # max. Breite Teaserbild
imgWidthLive = 1280 # breiter für Videoobjekt
NAME = 'ARD und ZDF'
ZDFNAME = "ZDFmobile"
def Main_ZDFmobile():
PLog('zdfmobile_Main_ZDF:')
li = xbmcgui.ListItem()
li = home(li, ID='ARD und ZDF') # Home-Button
# Suche bleibt abgeschaltet - bisher keine Suchfunktion bei zdf-cdn.live.cellular.de gefunden.
# Web-Player: folgendes DirectoryObject ist Deko für das nicht sichtbare InputDirectoryObject dahinter:
#fparams="&fparams={'name': '%s'}" % name
#addDir(li=li, label='Suche: im Suchfeld eingeben', action="dirList", dirID="Main_ZDFmobile",
# fanart=R(ICON_SEARCH), thumb=R(ICON_SEARCH), fparams=fparams)
title = 'Startseite'
fparams="&fparams={'ID': '%s'}" % title
addDir(li=li, label=title, action="dirList", dirID="resources.lib.zdfmobile.Hub", fanart=R(ICON_MAIN_ZDFMOBILE),
thumb=R(ICON_DIR_FOLDER), fparams=fparams)
fparams="&fparams={'ID': 'Kategorien'}"
addDir(li=li, label="Kategorien", action="dirList", dirID="resources.lib.zdfmobile.Hub", fanart=R(ICON_MAIN_ZDFMOBILE),
thumb=R(ICON_DIR_FOLDER), fparams=fparams)
fparams="&fparams={'ID': 'Sendungen A-Z'}"
addDir(li=li, label="Sendungen A-Z", action="dirList", dirID="resources.lib.zdfmobile.Hub", fanart=R(ICON_MAIN_ZDFMOBILE),
thumb=R(ICON_DIR_FOLDER), fparams=fparams)
fparams="&fparams={'ID': 'Sendung verpasst'}"
addDir(li=li, label="Sendung verpasst", action="dirList", dirID="resources.lib.zdfmobile.Hub", fanart=R(ICON_MAIN_ZDFMOBILE),
thumb=R(ICON_DIR_FOLDER), fparams=fparams)
fparams="&fparams={'ID': 'Live TV'}"
addDir(li=li, label='Live TV', action="dirList", dirID="resources.lib.zdfmobile.Hub", fanart=R(ICON_MAIN_ZDFMOBILE),
thumb=R(ICON_DIR_FOLDER), fparams=fparams, summary='nur in Deutschland zu empfangen!')
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
# ----------------------------------------------------------------------
# ID = Dict-Parameter und title2 für ObjectContainer
def Hub(ID):
PLog('Hub, ID: %s' % ID)
li = xbmcgui.ListItem()
li = home(li, ID=ZDFNAME) # Home-Button
if ID=='Startseite':
# lokale Testdatei:
# path = '/daten/entwicklung/Plex/Codestuecke/ZDF_JSON/ZDF_start-page.json'
# page = Resource.Load(path)
path = 'https://zdf-cdn.live.cellular.de/mediathekV2/start-page'
if ID=='Kategorien':
path = 'https://zdf-cdn.live.cellular.de/mediathekV2/categories'
if ID=='Sendungen A-Z':
path = 'https://zdf-cdn.live.cellular.de/mediathekV2/brands-alphabetical'
if ID=='Sendung verpasst':
li = Verpasst(DictID='Verpasst')
return li # raus - jsonObject wird in Verpasst_load geladen
if ID=='Live TV':
now = datetime.datetime.now()
datum = now.strftime("%Y-%m-%d")
path = 'https://zdf-cdn.live.cellular.de/mediathekV2/live-tv/%s' % datum
page = loadPage(path)
if len(page) == 0 or str(page).startswith('Fehler'):
msg1 = 'Fehler beim Abruf von:'
msg2 = path
MyDialog(msg1, msg2, '')
xbmcplugin.endOfDirectory(HANDLE)
jsonObject = json.loads(page)
if ID=='Startseite':
v = 'Startpage' # speichern
Dict('store', v, jsonObject)
li = PageMenu(li,jsonObject,DictID='Startpage')
if ID=='Kategorien':
v = 'Kategorien'
Dict("store", v, jsonObject)
li = PageMenu(li,jsonObject,DictID='Kategorien')
if ID=='Sendungen A-Z':
v = 'A_Z'
Dict("store", v, jsonObject)
li = PageMenu(li,jsonObject,DictID='A_Z')
if ID=='Live TV':
v = 'Live'
Dict("store", v, jsonObject)
li = PageMenu(li,jsonObject,DictID='Live')
return li
# ----------------------------------------------------------------------
def Verpasst(DictID): # Wochenliste
PLog('Verpasst')
li = xbmcgui.ListItem()
# li = home(li, ID=ZDFNAME) # Home-Button - s. Hub
wlist = list(range(0,7))
now = datetime.datetime.now()
for nr in wlist:
rdate = now - datetime.timedelta(days = nr)
iDate = rdate.strftime("%Y-%m-%d") # ZDF-Format
display_date = rdate.strftime("%d-%m-%Y") # Formate s. man strftime(3)
iWeekday = rdate.strftime("%A")
if nr == 0:
iWeekday = 'Heute'
if nr == 1:
iWeekday = 'Gestern'
iWeekday = transl_wtag(iWeekday) # -> ARD Mediathek
path = 'https://zdf-cdn.live.cellular.de/mediathekV2/broadcast-missed/%s' % iDate
title = "%s | %s" % (display_date, iWeekday)
PLog(title); PLog(path);
fparams="&fparams={'path': '%s', 'datum': '%s'}" % (path, display_date)
addDir(li=li, label=title, action="dirList", dirID="resources.lib.zdfmobile.Verpasst_load", fanart=R(ICON_MAIN_ZDFMOBILE),
thumb=R(ICON_DIR_FOLDER), fparams=fparams)
xbmcplugin.endOfDirectory(HANDLE)
# ----------------------------------------------------------------------
# lädt json-Datei für gewählten Wochtentag:
def Verpasst_load(path, datum): # 5 Tages-Abschnitte in 1 Datei, path -> DictID
PLog('Verpasst_load:' + path)
li = xbmcgui.ListItem()
page = loadPage(path)
if page.startswith('Fehler') or page == '':
msg1 = 'Fehler beim Abruf von:'
msg2 = path
MyDialog(msg1, msg2, '')
xbmcplugin.endOfDirectory(HANDLE)
PLog(len(page))
jsonObject = json.loads(page)
path = path.split('/')[-1] # Pfadende -> Dict-ID
v = path
Dict("store", v, jsonObject)
li = PageMenu(li,jsonObject,DictID=path)
xbmcplugin.endOfDirectory(HANDLE)
# ----------------------------------------------------------------------
# Bisher nicht genutzt
def ZDFmSearch(query, title='Suche', offset=0):
PLog('ZDFmSearch')
PLog('query: %s' % query)
li = xbmcgui.ListItem()
xbmcplugin.endOfDirectory(HANDLE)
# ----------------------------------------------------------------------
# Übergabe jsonObject hier direkt, DictID: Zuordnung zum gespeicherten
# jsonObject (Laden durch SingleRubrik + ShowVideo).
def PageMenu(li,jsonObject,DictID): # Start- + Folgeseiten
PLog('PageMenu:, DictID: ' + DictID)
mediatype='' # Kennz. Videos im Listing
if SETTINGS.getSetting('pref_video_direct') == 'true':
mediatype='video'
PLog('mediatype: ' + mediatype);
if("stage" in jsonObject):
PLog('PageMenu stage')
i=0
for stageObject in jsonObject["stage"]:
if(stageObject["type"]=="video"): # Videos am Seitenkopf
typ,title,subTitle,descr,img,date,dauer = Get_content(stageObject,imgWidth)
if subTitle:
title = '%s | %s' % (title,subTitle)
if dauer:
date = u'%s | Länge: %s' % (date, dauer)
path = 'stage|%d' % i
PLog(path)
fparams="&fparams={'path': '%s', 'DictID': '%s'}" % (path, DictID)
addDir(li=li, label=title, action="dirList", dirID="resources.lib.zdfmobile.ShowVideo", fanart=img, thumb=img,
fparams=fparams, summary=descr, tagline=date, mediatype=mediatype)
i=i+1
if("cluster" in jsonObject): # Bsp- A-Z Leitseite -> SingleRubrik
PLog('PageMenu cluster')
for counter, clusterObject in enumerate(jsonObject["cluster"]): # Bsp. "name":"Neu in der Mediathek"
if "teaser" in clusterObject and "name" in clusterObject:
path = "cluster|%d|teaser" % counter
title = clusterObject["name"]
# keine personalisierten Inhalte
if 'Weiterschauen' in title or u'Das könnte Dich' in title or 'Derzeit beliebt' in title:
continue
if title == '':
title = 'ohne Titel'
title = repl_json_chars(title)
PLog(title); PLog(path);
fparams="&fparams={'path': '%s', 'title': '%s', 'DictID': '%s'}" % (path, title, DictID)
addDir(li=li, label=title, action="dirList", dirID="resources.lib.zdfmobile.SingleRubrik",
fanart=R(ICON_MAIN_ZDFMOBILE), thumb=R(ICON_DIR_FOLDER), fparams=fparams)
if("broadcastCluster" in jsonObject): #
PLog('PageMenu broadcastCluster')
for counter, clusterObject in enumerate(jsonObject["broadcastCluster"]):
if clusterObject["type"].startswith("teaser") and "name" in clusterObject:
path = "broadcastCluster|%d|teaser" % counter
title = clusterObject["name"]
fparams="&fparams={'path': '%s', 'title': '%s', 'DictID': '%s'}" % (path, title, DictID)
addDir(li=li, label=title, action="dirList", dirID="resources.lib.zdfmobile.SingleRubrik",
fanart=R(ICON_MAIN_ZDFMOBILE), thumb=R(ICON_DIR_FOLDER), fparams=fparams)
if("epgCluster" in jsonObject):
PLog('PageMenu epgCluster')
for counter, epgObject in enumerate(jsonObject["epgCluster"]): # Livestreams
if("liveStream" in epgObject and len(epgObject["liveStream"]) >= 0):
path = "epgCluster|%d|liveStream" % counter
title = epgObject["name"] + ' Live'
path=py2_encode(path)
fparams="&fparams={'path': '%s', 'DictID': '%s'}" % (quote(path), DictID)
addDir(li=li, label=title, action="dirList", dirID="resources.lib.zdfmobile.ShowVideo",
fanart=R(ICON_MAIN_ZDFMOBILE), thumb=R(ICON_DIR_FOLDER), fparams=fparams,
tagline=title, mediatype=mediatype)
xbmcplugin.endOfDirectory(HANDLE)
# ----------------------------------------------------------------------
def Get_content(stageObject, maxWidth):
PLog('Get_content:')
# PLog(str(stageObject))
title=stageObject["headline"]
subTitle=stageObject["titel"]
if(len(title)==0):
title = subTitle
subTitle = ""
teaser_nr='' # wie Serien in ZDF_get_content
if ("episodeNumber" in stageObject):
teaser_nr = "Episode %s | " % stageObject["episodeNumber"]
descr=''
if("beschreibung" in stageObject):
descr = teaser_nr + stageObject["beschreibung"]
typ=''
if("type" in stageObject):
typ = stageObject["type"]
dauer=''
if("length" in stageObject):
sec = stageObject["length"]
if sec:
dauer = time.strftime('%H:%M:%S', time.gmtime(sec))
img="";
if("teaserBild" in stageObject):
for width,imageObject in list(stageObject["teaserBild"].items()):
if int(width) <= maxWidth:
img=imageObject["url"];
date=''
if("visibleFrom" in stageObject):
date = stageObject["visibleFrom"]
else:
if("visibleFrom" in stageObject):
date = stageObject["date"]
#now = datetime.datetime.now()
#date = now.strftime("%d.%m.%Y %H:%M")
if date == '': # id=date-Ersatz: ..-sendung-vom-..
if("id" in stageObject):
date = "ID: >" + stageObject["id"] + "<"
date = date.replace('-', ' ')
title=repl_json_chars(title) # json-komp. für func_pars in router()
subTitle=repl_json_chars(subTitle) # dto
descr=repl_json_chars(descr) # dto
PLog('Get_content: %s | %s |%s | %s | %s | %s | %s' % (typ, title,subTitle,descr,img,date,dauer) )
return typ,title,subTitle,descr,img,date,dauer
# ----------------------------------------------------------------------
# einzelne Rubrik mit Videobeiträgen, alles andere wird ausgefiltert
def SingleRubrik(path, title, DictID):
PLog('SingleRubrik: %s' % path); PLog(DictID)
path_org = path
jsonObject = Dict("load", DictID)
jsonObject = GetJsonByPath(path, jsonObject)
if jsonObject == '': # index error
xbmcplugin.endOfDirectory(HANDLE)
PLog('jsonObjects: ' + str(len(jsonObject)))
# Debug:
# RSave("/tmp/x_SingleRubrik.json", json.dumps(jsonObject, sort_keys=True, indent=2, separators=(',', ': ')))
li = xbmcgui.ListItem()
li = home(li, ID=ZDFNAME) # Home-Button
i=0
for entry in jsonObject:
path = path_org + '|%d' % i
date=''; title=''; descr=''; img=''
# PLog(entry) # bei Bedarf
PLog("entry_type: " + entry["type"])
mediatype=''
if entry["type"] == 'video': # Kennz. Video nur bei Sofortstart in ShowVideo
if SETTINGS.getSetting('pref_video_direct') == 'true':
mediatype='video'
# Alle genannten types laufen über ShowVideo - nur video wird dort endverarbeitet.
# Die types video, brand, category, topic kehren mit dem neuen jsonObject wieder
# zum PageMenu zurück und werden neu verteilt (SingleRubrik od. ShowVideo).
# Alle anderen möglichen entry-types (?) werden übersprungen.
if entry["type"] == "video" or entry["type"] == "brand" or entry["type"] == "category" or entry["type"] == "topic":
typ,title,subTitle,descr,img,date,dauer = Get_content(entry,imgWidth)
if subTitle:
# title = '%s | %s' % (title,subTitle)
title = '%s | %s' % (subTitle, title ) # subTitle = Sendungstitel
tagline=''
if date:
tagline = '%s' % (date)
if tagline and dauer:
tagline = '%s | %s' % (tagline, dauer)
title = repl_json_chars(title)
# PLog('video-content: %s | %s | %s | %s | ' % (title,subTitle,descr,img))
fparams="&fparams={'path': '%s', 'DictID': '%s'}" % (path, DictID)
PLog("fparams: " + fparams)
addDir(li=li, label=title, action="dirList", dirID="resources.lib.zdfmobile.ShowVideo", fanart=img,
thumb=img, fparams=fparams, summary=descr, tagline=tagline, mediatype=mediatype)
i=i+1
# break # Test Einzelsatz
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
# ----------------------------------------------------------------------
# iteriert durch das Objekt und liefert Restobjekt ab path
# bei leerem Pfad wird jsonObject unverändert zurückgegeben
# index error möglich bei veralteten Indices z.B. aus
# Merkliste (Startpage wird aus Cache geladen).
def GetJsonByPath(path, jsonObject):
PLog('GetJsonByPath: '+ path)
if path == '':
return jsonObject
path = path.split('|')
i = 0
try: # index error möglich
while(i < len(path)):
if(isinstance(jsonObject,list)):
index = int(path.pop(0))
else:
index = path.pop(0)
PLog('i=%s, index=%s' % (i,index))
jsonObject = jsonObject[index]
except Exception as exception:
PLog(str(exception))
return '' # Aufrufer muss beenden
#PLog(jsonObject)
return jsonObject
# ----------------------------------------------------------------------
# 07.10.2019 Stringauswertung get_formitaeten2 für neue
# formitaeten-Variante hinzugefügt
#
def ShowVideo(path, DictID, Merk='false'):
PLog('ShowVideo:'); PLog(path); PLog(DictID)
PLog(Merk)
jsonObject = Dict("load", DictID)
videoObject = GetJsonByPath(path,jsonObject)
# Debug:
# RSave("/tmp/x_ShowVideo.json", json.dumps(videoObject, sort_keys=True, indent=2, separators=(',', ': ')))
if videoObject == '':
msg1 = 'ShowVideo:'
msg2 = "Beitrag leider nicht (mehr) verfügbar"
PLog("%s | %s" % (msg1, msg2))
MyDialog(msg1, msg2, '')
xbmcplugin.endOfDirectory(HANDLE)
li = xbmcgui.ListItem()
li = home(li, ID=ZDFNAME) # Home-Button
# Mehrfachbeiträge nachladen:
if videoObject["type"] == 'brand' or videoObject["type"] == "category" or videoObject["type"] == "topic":
PLog('Mehrfachbeiträge')
streamApiUrl, jsonurl, htmlurl = get_video_urls(videoObject)
PLog("streamApiUrl: %s, jsonurl: %s, htmlurl: %s" % (streamApiUrl, jsonurl, htmlurl))
page = loadPage(jsonurl)
if len(page) == 0 or str(page).startswith('Fehler'):
msg1 = 'Fehler beim Abruf von:'
msg2 = url
MyDialog(msg1, msg2, '')
xbmcplugin.endOfDirectory(HANDLE)
jsonObject = json.loads(page) # neues json-Objekt
# Debug:
# RSave("/tmp/x_ShowVideo_multi.json", json.dumps(jsonObject, sort_keys=True, indent=2, separators=(',', ': ')))
Dict("store", 'ShowVideo_multi', jsonObject)
li = PageMenu(li,jsonObject,DictID='ShowVideo_multi') # Rubrik o.ä. (key "cluster")
return li
PLog('Einzelbeitrag') # Einzelbeitrag
typ,title,subTitle,descr,img,date,dauer = Get_content(videoObject,imgWidthLive)
if subTitle:
title = '%s | %s' % (title,subTitle)
title_org = title
PLog(title_org)
streamApiUrl, jsonurl, htmlurl = get_video_urls(videoObject) # json- und html-Quellen bestimmen
PLog(streamApiUrl); PLog(jsonurl); PLog(htmlurl)
formitaeten=''; streamApiUrl=''; bandbreite=''
if("formitaeten" in videoObject):
PLog('formitaeten in videoObject') # OK - videoObject hat bereits Videoquellen
formitaeten = get_formitaeten(videoObject) # json-Ausw. Formitäten
PLog(len(formitaeten))
else:
PLog('formitaeten fehlen, lade jsonurl') # 1. Videoquellen in json-Seite suchen
try:
page = loadPage(jsonurl) # jsonurl aus get_video_urls
videoObject = json.loads(page)
page = json.dumps(videoObject, sort_keys=True, indent=2, separators=(',', ': '))
# Debug:
# RSave("/tmp/x_ShowVideo2.json", page)
# streamApiUrl = videoObject["streamApiUrlAndroid"] # json-key-error möglich
streamApiUrl = stringextract('streamApiUrlAndroid": "', '"', page)
PLog(streamApiUrl)
formitaeten = get_formitaeten(videoObject) # Json-Ausw. Formitäten
except Exception as exception:
PLog(repr(exception))
PLog('Abruf formitaeten jsonurl fehlgeschlagen')
# Fallback:
if "formitaeten" not in videoObject: # 2. Videoquellen via Web/apiToken suchen
streamApiUrl, jsonurl, htmlurl = get_video_urls(videoObject) # neu holen
PLog('formitaeten fehlen, lade htmlurl')
PLog('htmlurl: ' + htmlurl) # Webseite mit apiToken
page = loadPage(htmlurl)
apiToken = stringextract('apiToken": "', '"', page) # apiToken: Webseite
PLog("apiToken: " + apiToken)
PLog('url2: ' + streamApiUrl)
if streamApiUrl == '': # nicht verfügbar, Bsp. Jugenschutz vor 22h
msg1 = 'ShowVideo:'
msg2 = "Beitrag leider nicht verfügbar (Jugendschutz?)"
PLog("%s | %s" % (msg1, msg2))
MyDialog(msg1, msg2, '')
return li
else:
page = loadPage(streamApiUrl, apiToken=apiToken)
# Debug:
# RSave("/tmp/x_cco.json", page) # Debug
# neue Auswertung (Webseite kann von jsonObject abweichen) -
# Bsp. countdown-copenhagen-102 / countdown-copenhagen-118.html
if '"attributes"' in page and '"formitaeten"' in page:
PLog('lade formitaeten attr') # Videoquellen
formitaeten = get_formitaeten2(page) # String-Ausw. Formitäten
descr_local='' # Beschreibung zusammensetzen
PLog(type(date)); PLog(type(dauer)); PLog(type(descr));
if date and dauer:
descr_local = "%s | %s\n\n%s" % (date, dauer, descr) # Anzeige Listing
descr = "%s | %s||||%s" % (date, dauer, descr) # -> PlayVideo
descr=repl_json_chars(descr) # json-komp. für func_pars in router()
i=0
for detail in formitaeten:
i = i + 1
quality = detail[0] # Bsp. auto [m3u8]
hd = 'HD: ' + str(detail[1]) # False bei mp4-Dateien, s.u.
hd = hd.replace('true', 'ja'); hd = hd.replace('false', 'nein');
url = detail[2]
url = url.replace('https', 'http')
typ = detail[3]
codec = detail[4]
geo = detail[5]
PLog(geo)
geoblock = "mit Geoblock"
if geo == 'none':
geoblock = "ohne Geoblock"
else:
geoblock = "mit Geoblock %s" % geo
if url.endswith('mp4'):
try:
bandbreite = url.split('_')[-2] # Bsp. ../4/170703_despot1_inf_1496k_p13v13.mp4
hd = bandbreite
except:
bandbreite = ''
title_org=unescape(title_org);
title_org=repl_json_chars(title_org) # json-komp. für func_pars in router()
PLog("url: " + url)
if SETTINGS.getSetting('pref_video_direct') == 'true': # or Merk == 'true': # Sofortstart
PLog('Sofortstart Merk: ZDF Mobile (ShowVideo)')
PlayVideo(url=url, title=title_org, thumb=img, Plot=descr, Merk=Merk)
return
if url.find('master.m3u8') > 0: #
if 'auto' in quality: # speichern für ShowSingleBandwidth
if SETTINGS.getSetting('pref_video_direct') == 'true': # Sofortstart
PLog('Sofortstart: ZDF Mobile (ShowVideo)')
PlayVideo(url=url, title=title_org, thumb=img, Plot=descr, Merk=Merk)
return
url_auto = url
title=str(i) + '. ' + quality + ' [m3u8]' + ' | ' + geoblock # Einzelauflösungen
PLog("title: " + title)
tagline = '%s\n\n' % title_org + 'Qualitaet: %s | Typ: %s' % (quality, '[m3u8-Streaming]')
url=py2_encode(url); title_org=py2_encode(title_org);
img=py2_encode(img); descr=py2_encode(descr);
fparams="&fparams={'url': '%s', 'title': '%s', 'thumb': '%s', 'Plot': '%s', 'Merk': '%s'}" % \
(quote(url), quote(title_org), quote_plus(img), quote_plus(descr), Merk)
addDir(li=li, label=title, action="dirList", dirID="PlayVideo", fanart=img,
thumb=img, fparams=fparams, tagline=descr_local, summary=tagline, mediatype='video')
else:
title=str(i) + '. %s [%s] | %s' % (quality, hd, geoblock)
PLog("title: " + title)
tagline = '%s\n\n' % title_org + 'Qualitaet: %s | Typ: %s | Codec: %s' % (quality, typ, codec)
if bandbreite:
tagline = '%s | %s' % (tagline, bandbreite)
url=py2_encode(url); title_org=py2_encode(title_org);
img=py2_encode(img); descr=py2_encode(descr);
fparams="&fparams={'url': '%s', 'title': '%s', 'thumb': '%s', 'Plot': '%s', 'Merk': '%s'}" % \
(quote(url), quote(title_org), quote_plus(img), quote_plus(descr), Merk)
addDir(li=li, label=title, action="dirList", dirID="PlayVideo", fanart=img,
thumb=img, fparams=fparams, tagline=descr_local, summary=tagline, mediatype='video')
xbmcplugin.endOfDirectory(HANDLE)
# ----------------------------------------------------------------------
# ermittelt aus url + sharingUrl die json- und html-Quelle, Bsp.:
# "streamApiUrlAndroid": "https://api.zdf.de/tmd/2/android_native_1/vod/ptmd/mediathek/171013_tag_acht_cco"
# "url": "https://zdf-cdn.live.cellular.de/mediathekV2/document/callin-mr-brain-130"
# "sharingUrl": "https://www.zdf.de/wissen/callin-mr-brain/callin-mr-brain-130.html"
# Stringsuche bei htmlurl unischer
# 15.12.2019 re.search-Auswertung (unsicher) umgestellt auf stringextract
#
def get_video_urls(videoObject):
PLog("get_video_urls:")
v = json.dumps(videoObject, sort_keys=True, indent=2, separators=(',', ':'))
# RSave('/tmp/x.json', py2_encode(v)) # Debug
streamApiUrl = stringextract('streamApiUrlAndroid":"', '"', v)
PLog("streamApiUrl: " + streamApiUrl)
meta = stringextract('meta":', '}', v)
jsonurl = stringextract('url":"', '"', meta)
if jsonurl == '':
records = blockextract('"url"', v)
for rec in records:
jsonurl = stringextract('url":"', '"', rec)
if "zdf-cdn.live.cellular.de" in jsonurl:
break
PLog("jsonurl: " + jsonurl)
htmlurl = stringextract('sharingUrl":"', '"', v)
PLog("htmlurl: " + htmlurl)
return streamApiUrl, jsonurl, htmlurl
# ----------------------------------------------------------------------
# 2 Varianten:
# single=True: 1 Block formitaeten enthält die Videodetails
# single=False: mehrere Blöcke enthalten die Videodetails
def get_formitaeten(jsonObject):
PLog('get_formitaeten:')
forms=[]
# Debug
# RSave("/tmp/x_forms.json", json.dumps(jsonObject, sort_keys=True, indent=2, separators=(',', ': ')))
try:
formObject = jsonObject["document"]["formitaeten"]
single = True
except Exception as exception:
PLog(repr(exception))
single = False
PLog(single)
geoblock=''; fsk=''
if single:
if "geoLocation" in formObject:
geoblock = formObject["geoLocation"]
if "fsk" in formObject: # z.Z n. verw.
fsk = formObject["fsk"]
for formitaet in formObject:
detail=[]
url = formitaet["url"];
quality = formitaet["quality"]
hd = formitaet["hd"]
typ = formitaet["type"]
codec = formitaet["mimeType"]
PLog("quality:%s hd:%s url:%s" % (quality,hd,url))
detail.append(quality); detail.append(hd);
detail.append(url); detail.append(typ); detail.append(codec);
detail.append(geoblock);
forms.append(detail)
return forms
# single=False
if "geoLocation" in jsonObject:
geoblock = jsonObject["geoLocation"]
if "fsk" in jsonObject: # z.Z n. verw.
geoblock = jsonObject["fsk"]
for formitaet in jsonObject["formitaeten"]:
detail=[]
url = formitaet["url"];
quality = formitaet["quality"]
hd = formitaet["hd"]
typ = formitaet["type"]
codec = ''
if "mimeCodec" in formitaet:
codec = formitaet["mimeCodec"]
if "mimeType" in formitaet:
codec = formitaet["mimeType"]
PLog("quality:%s hd:%s url:%s" % (quality,hd,url))
detail.append(quality); detail.append(hd);
detail.append(url); detail.append(typ); detail.append(codec);
detail.append(geoblock);
forms.append(detail)
# PLog('forms: ' + str(forms))
return forms
# ----------------------------------------------------------------------
# json-Index-Probleme - daher stringextract
def get_formitaeten2(page):
PLog('get_formitaeten2:')
forms=[]
records = blockextract('"formitaeten"', page)
PLog(len(records))
geoblock = stringextract('"geoLocation"', '},', page)
geoblock = stringextract('"value" : "', '"', geoblock)
fsk = stringextract('"fsk"', '},', page) # z.Z n. verw.
fsk = stringextract('"value" : "', '"', fsk)
for rec in records:
detail=[]
url = stringextract('"uri" : "', '"', rec)
quality = stringextract('"quality" : "', '"', rec)
hd = stringextract('"hd" : ', ',', rec) # true / false
typ = stringextract('"type" : "', '"', rec)
codec = stringextract('"mimeCodec" : "', '"', rec)
PLog("quality: %s, hd: %s, typ: %s, codec: %s, url: %s" % (quality,hd,typ,codec,url))
detail.append(quality); detail.append(hd);
detail.append(url); detail.append(typ); detail.append(codec);
detail.append(geoblock);
forms.append(detail)
# PLog('forms: ' + str(forms))
return forms
# ----------------------------------------------------------------------:
def ShowSingleBandwidth(title,url_m3u8,thumb, descr): # .m3u8 -> einzelne Auflösungen
PLog('ShowSingleBandwidth:')
playlist = loadPage(url_m3u8)
if playlist.startswith('Fehler'):
msg1 = playlist
msg2 = url_m3u8
MyDialog(msg1, msg2, '')
li = xbmcgui.ListItem()
li = Parseplaylist(li, playlist=playlist, title=title, thumb=thumb, descr=descr)
xbmcplugin.endOfDirectory(HANDLE)
####################################################################################################
# Hilfsfunktionen
####################################################################################################
def Parseplaylist(li, playlist, title, thumb, descr): # playlist (m3u8, ZDF-Format) -> einzelne Auflösungen
PLog ('Parseplaylist:')
PLog(title)
title_org = title
lines = playlist.splitlines()
# PLog(lines)
lines.pop(0) # 1. Zeile entfernen (#EXTM3U)
line_inf=[]; line_url=[]
for i in range(0, len(lines),2):
line_inf.append(lines[i])
line_url.append(lines[i+1])
# PLog(line_inf); PLog(line_url);
i=0; Bandwith_old = ''
for inf in line_inf:
PLog(inf)
url = line_url[i]
i=i+1
Bandwith=''; Resolution=''; Codecs='';
Bandwith = re.search('BANDWIDTH=(\d+)', inf).group(1)
if 'RESOLUTION=' in inf: # fehlt ev.
Resolution = re.search('RESOLUTION=(\S+),CODECS', inf).group(1)
Codecs = re.search(r'"(.*)"', inf).group(1) # Zeichen zw. Hochkommata
summ= 'Bandbreite: %s' % Bandwith
if Resolution:
summ= 'Bandbreite %s | Auflösung: %s' % (Bandwith, Resolution)
if Codecs:
summ= '%s | Codecs: %s' % (descr, Codecs)
summ = summ.replace('"', '') # Bereinigung Codecs
PLog(Bandwith); PLog(Resolution); PLog(Codecs);
tagline='m3u8-Streaming'
title = '%s. %s' % (str(i), title_org)
if Bandwith_old == Bandwith:
title = '%s. %s | 2. Alternative' % (str(i), title_org)
Bandwith_old = Bandwith
if int(Bandwith) <= 100000: # Audio - PMS-Transcoder: Stream map '0:V:0' matches no streams
tagline = '%s | nur Audio' % tagline
thumb=R(ICON_SPEAKER)
url=py2_encode(url); title_org=py2_encode(title_org);
thumb=py2_encode(thumb); descr=py2_encode(descr);
fparams="&fparams={'url': '%s', 'title': '%s', 'thumb': '%s', 'Plot': '%s'}" % (quote_plus(url),
quote_plus(title_org), quote_plus(thumb), quote_plus(descr))
addDir(li=li, label=title, action="dirList", dirID="PlayVideo", fanart=thumb,
thumb=thumb, fparams=fparams, summary=summ, tagline=tagline, mediatype='video')
return li
#----------------------------------------------------------------
def loadPage(url, apiToken='', maxTimeout = None):
try:
safe_url = url.replace( " ", "%20" ).replace("&","&")
PLog("loadPage: " + safe_url);
req = Request(safe_url)
# gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1) # 07.10.2019: Abruf mit SSLContext klemmt häufig - bei
# Bedarf mit Prüfung auf >'_create_unverified_context' in dir(ssl)< nachrüsten:
req.add_header('User-Agent', 'Mozilla/5.0 (Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Mobile Safari/537.36')
req.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3')
req.add_header('Accept-Language', 'de-de,de;q=0.8,en-us;q=0.5,en;q=0.3')
# hier nicht verwenden: 'Accept-Charset', 'utf-8' | 'Accept-Encoding', 'gzip, deflate, br'
if apiToken:
PLog(apiToken)
req.add_header("Api-Auth", "Bearer %s" % apiToken)
req.add_header("Origin", "https://www.zdf.de")
req.add_header("Sec-Fetch-Mode", "cors")
if maxTimeout == None:
maxTimeout = 60;
# r = urlopen(req, timeout=maxTimeout, context=gcontext) # s.o.
r = urlopen(req, timeout=maxTimeout)
# PLog("headers: " + str(r.headers))
doc = r.read()
PLog(len(doc))
doc = doc.decode('utf-8')
return doc
except Exception as exception:
msg = 'Fehler: ' + str(exception)
msg = msg + '\r\n' + safe_url
msg = msg
PLog(msg)
return msg
#----------------------------------------------------------------
| [
"[email protected]"
] | |
e6d07a59fdd040ca8cdccb91df0a133c1e7a3a63 | f5313a8d5596ad136040034b7fdc527016f62eb5 | /apps/forums/migrations/0001_initial.py | d60d5e50d47ab387e34e804b802844efaec2bff1 | [] | no_license | Joel1210/FFF | c2e78d4bb800b6bef62346f0582014e894c6422b | 06849f510bfe3711ec436b5e77dd722f5cbb39e8 | refs/heads/master | 2020-07-14T07:18:55.528900 | 2019-09-12T00:33:10 | 2019-09-12T00:33:10 | 205,270,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2019-09-10 00:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=1000)),
('eventId', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('commenter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='user.User')),
],
),
migrations.CreateModel(
name='Reply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('Replier', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='replies', to='forums.Comment')),
],
),
]
| [
"[email protected]"
] | |
a95438a0547922a1c0bde8140faf1eb16abe9ca3 | a88e486c3be855554e8c9998766869a19a4e0635 | /exercises/branch_and_cut/OrTools.py | 5fcbde53871313aa47ac6c3c244be707d71b2994 | [] | no_license | DXV-HUST-SoICT/Combinatorial-Optimization | 03559786a36f66f10742e3a0c520a3369e96a065 | 67c326635bb4245e3dd9819ea9704c37bb9635d3 | refs/heads/master | 2021-03-17T12:59:51.141027 | 2020-06-09T17:42:41 | 2020-06-09T17:42:41 | 246,992,799 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,658 | py | from ortools.linear_solver import pywraplp
from ortools.algorithms import pywrapknapsack_solver
from Utils import *
def solve_knapsack(profits, weights, capacities):
solver = pywrapknapsack_solver.KnapsackSolver(pywrapknapsack_solver.KnapsackSolver.KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER, '.')
solver.Init(list(profits.astype(float)), [list(weights.astype(float))], [float(capacities)])
value = solver.Solve()
res = []
for i in range(len(profits)):
res.append(solver.BestSolutionContains(i))
return res, value
def lp_solve(a, b, c):
m, n = a.shape
solver = pywraplp.Solver('tester', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
x = []
for j in range(n):
x.append(solver.NumVar(0, solver.infinity(), 'x_%i' % j))
return solve(a, b, c, x, solver)
def mip_solve(a, b, c, ic):
m, n = a.shape
solver = pywraplp.Solver('tester', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
x = []
for j in range(n):
if j not in ic:
x.append(solver.NumVar(0, solver.infinity(), 'x_%i' % j))
else:
x.append(solver.IntVar(0, solver.infinity(), 'x_%i' % j))
return solve(a, b, c, x, solver)
def solve(a, b, c, x, solver):
m, n = a.shape
for i in range(m):
ct = solver.Constraint(float(b[i, 0]), float(b[i, 0]))
for j in range(n):
ct.SetCoefficient(x[j], float(a[i, j]))
o = solver.Objective()
for j in range(n):
o.SetCoefficient(x[j], float(c[0, j]))
o.SetMaximization()
solver.Solve()
return solver.Objective().Value(), [k.solution_value() for k in x]
if __name__ == '__main__':
print('OrTools')
a, b, c, ic = read_milp_data('./data/mip_04')
obj_value, sol = mip_solve(a, b, c, ic)
print(obj_value)
print(sol)
| [
"[email protected]"
] | |
9f01e7dd00ca2ee5f7e61291939718d683efc021 | 146fa299cc69a7b270091196cfc8b9eb9f7254d6 | /yixie/aixie/web/httpclient.py | 5c60195ff511cc18ce8a855312ad6880da202eae | [] | no_license | Tyler-D/Zfile | 1e52846cb181ba4acad83b5c0c44e491433e44dd | 3f6e53ef06d6ef3a54294652a437fde1a6b32626 | refs/heads/master | 2021-01-10T15:17:38.397728 | 2015-10-25T08:20:09 | 2015-10-25T08:20:09 | 44,509,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,792 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import urlparse
import urllib2
import urllib
import time
import httplib
import gzip
from io import BytesIO
class HttpClient(object):
def __init__(self):
pass
def get(self, url):
headers = self.set_headers(url)
req = urllib2.Request(url=url, headers=headers)
resp = None
status = 200
try:
resp = urllib2.urlopen(req, timeout=15)
except urllib2.URLError, e:
return resp
except urllib2.HTTPError, e:
status = e.code
except httplib.BadStatusLine, e:
#没有接受到客户端发送的数据
return resp
except Exception, e:
return resp
if not resp:
return resp
if 200 != status:
return resp
# 解决乱码
try:
is_gzip = resp.headers.get('Content-Encoding')
except:
is_gzip = None
if is_gzip:
buffer = BytesIO(resp.read())
gz = gzip.GzipFile(fileobj=buffer)
data = gz.read()
gz.close()
return data
return resp.read()
def post(self, url, data):
headers = self.set_headers(url)
req = urllib2.Request(url=url, headers=headers,
data=urllib.urlencode(data))
resp = None
status = 200
try:
resp = urllib2.urlopen(req, timeout=15)
except urllib2.URLError, e:
return resp
except urllib2.HTTPError, e:
status = e.code
except httplib.BadStatusLine, e:
#没有接受到客户端发送的数据
return resp
except Exception, e:
return resp
if not resp:
return resp
if 200 != status:
return resp
return resp.read()
def set_headers(self, url, **kargs):
"""自定义header,防止被禁,某些情况如豆瓣,还需制定cookies,否则被ban
使用参数传入可以覆盖默认值,或添加新参数,如cookies
"""
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
'Accept-Language': 'zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3',
'Connection': 'keep-alive',
#设置Host会导致TooManyRedirects, 因为hostname不会随着原url跳转而更改,可不设置
'Host': urlparse.urlparse(url).netloc,
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0',
#反盗链
'Referer': url,
}
return headers
| [
"[email protected]"
] | |
e469816da35bd150ece0491cbea9416b2a5526c4 | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /selenium__examples/avito_ru__auth.py | a15a761817b43084b6816bd114195806249ea93e | [
"CC-BY-4.0"
] | permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
LOGIN = '<LOGIN>'
PASSWORD = '<PASSWORD>'
driver = webdriver.Firefox()
driver.implicitly_wait(10)
try:
driver.get('https://www.avito.ru/')
print(f'Title: "{driver.title}"')
login_button_el = driver.find_element_by_css_selector('[data-marker="header/login-button"]')
login_button_el.click()
login_el = driver.find_element_by_css_selector('[data-marker="login-form/login"]')
password_el = driver.find_element_by_css_selector('[data-marker="login-form/password"]')
login_el.send_keys(LOGIN)
password_el.send_keys(PASSWORD + Keys.RETURN)
# NOTE: но останется проблема с капчей...
# можно оставить ее заполнение на человека, тогда скрипт
# должен ждать, когда человек введет капчу и закончит авторизацию,
# чтобы после уже самостоятельно работать с сайтом
finally:
driver.quit()
| [
"[email protected]"
] | |
cb732ce27bdc64dc250a8ce424c69405d7ef3f93 | f9bbece937df5f35b91db53e286588a213a490eb | /manage.py | a3c408be87c6198a0e20b0428526e07251bf2145 | [] | no_license | lostab/zhaoyu | be399df4d2261104c0dc043efdbd3d8c511c5b31 | cbdc7157650115683a7b0c2c58776b80d4b01eaf | refs/heads/master | 2020-03-26T03:47:36.854081 | 2018-08-25T16:05:21 | 2018-08-25T16:05:21 | 144,470,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cfg.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
8cf312544a236a29d110b2e3fe1038d5636d3fd7 | 2cf2e2ea3355a641e590aa7f42e7701f186c777d | /agents/ag1/environment.py | 3d080f0800f4abcbf2d87de5ca58b65346dc1afe | [] | no_license | lucaskup/pythonStudy | 07fb23dc2a0531b2542ea21cbf68b6ff93115f6d | 615e3db6f93e3598bd3ffcf33a716256ed8bf6e7 | refs/heads/master | 2021-01-12T04:50:51.582452 | 2017-07-25T00:59:47 | 2017-07-25T00:59:47 | 77,802,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,567 | py | import random
import time
import curses
class Environment:
def __init__(self):
self.agents = []
#1 home, 2 dirty, 3 barrier
self.ground = [[0,0,2,0,0,2],[0,3,0,0,0,2],[0,0,2,0,3,3],[0,2,3,0,0,2]]
self.home = (0,0)
self.agents.append(VcAgent([False,False,True]))
self.steps = 0
def doWork(self):
self.steps += 1
for a in self.agents:
homePercept = self.home == a.pos
dirtyPercept = self.ground[a.pos[0]][a.pos[1]] == 2
#print(a.pos,str(len(self.ground)),str(len(self.ground[0])))
touchPercept = a.pos[0] == 0 and a.facing == 2 or a.pos[0] == len(self.ground)-1 and a.facing == 0 or a.pos[1] == 0 and a.facing == 1 or a.pos[1] == len(self.ground[0])-1 and a.facing == 3 or self.ground[a.getMovePos()[0]][a.getMovePos()[1]] == 3
#print(touchPercept,'touchPercept')
a.setPercepts([touchPercept,dirtyPercept,homePercept])
a.evalUtility()
if not(a.photoSensor) and dirtyPercept:
self.ground[a.pos[0]][a.pos[1]] = 0
def showGround(self):
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
for x in range(len(self.ground)):
for y in range(len(self.ground[x])):
rep = str(self.ground[x][y])
for a in self.agents:
if a.pos == (x,y):
rep = 'X'
#print(rep,end=' ')
stdscr.addstr(y,2*x, rep+" ")
#print()
stdscr.addstr(len(self.ground)+2, 0, "Step: "+ str(self.steps))
stdscr.addstr(len(self.ground)+3, 0, "photoSensor")
stdscr.addstr(len(self.ground)+4, 0, "touchSensor")
stdscr.addstr(len(self.ground)+5, 0, "homeSensor")
stdscr.addstr(len(self.ground)+6, 0, "facing")
for i in range(len(self.agents)):
stdscr.addstr(len(self.ground)+2, 12 + 5*i, "AGT "+str(i))
stdscr.addstr(len(self.ground)+3, 12 + 5*i, "YES" if self.agents[i].photoSensor else "NO ")
stdscr.addstr(len(self.ground)+4, 12 + 5*i, "YES" if self.agents[i].touchSensor else "NO ")
stdscr.addstr(len(self.ground)+5, 12 + 5*i, "YES" if self.agents[i].homeSensor else "NO ")
stdscr.addstr(len(self.ground)+6, 12 + 5*i, str(self.agents[i].mov[self.agents[i].facing])+"º ")
stdscr.refresh()
def envSimulation(self):
self.showGround()
flat = [x for sublist in self.ground for x in sublist]
while 2 in flat :
time.sleep(0.5)
self.doWork()
self.showGround()
flat = [x for sublist in self.ground for x in sublist]
curses.echo()
curses.nocbreak()
curses.endwin()
class VcAgent(object):
"""docstring for VcAgent."""
mov = [0,90,180,270]
def __init__(self, percepts):
super(VcAgent, self).__init__()
self.setPercepts(percepts)
self.facing = 0
self.pos = (0,0)
self.prevSteps = []
self.blockadeGrounds = []
self.powerUp = True
def setPercepts(self,percepts):
self.touchSensor = percepts[0]
self.photoSensor = percepts[1]
self.homeSensor = percepts[2]
def turnRight(self):
if self.facing >=3:
self.facing = 0
else:
self.facing += 1
def turnLeft(self):
if self.facing <= 0:
self.facing = 3
else:
self.facing -= 1
def getMovePos(self):
if self.mov[self.facing] == 0:
return (self.pos[0]+1,self.pos[1])
elif self.mov[self.facing] == 90:
return (self.pos[0],self.pos[1]-1)
elif self.mov[self.facing] == 180:
return (self.pos[0]-1,self.pos[1])
else:
return (self.pos[0],self.pos[1]+1)
def move(self):
if self.pos not in self.prevSteps:
self.prevSteps.append(self.pos)
self.pos = self.getMovePos()
"""Eval function for agent, choose the best action to be made """
def evalUtility(self):
#if road blocked ahead, register in agents knowledge
if self.powerUp:
if self.touchSensor and self.getMovePos() not in self.blockadeGrounds:
self.blockadeGrounds.append(self.getMovePos())
if self.photoSensor: # if is dirty
self.photoSensor = False #do cleaning
#elif self.homeSensor and len(self.prevSteps) == 16: # is in home and size of env
#self.powerUp = False
# elif not self.touchSensor and not self.alreadyGone():
# self.move()
else:
self.evalWalks()
def alreadyGone(self):
return True if self.getMovePos() in self.prevSteps else False
""" Eval every possible walk """
def evalWalks(self):
topUtility = 100
gonePenalty = 50
stepPenalty = 10
possibleMov = []
facing = self.facing
pos = self.pos
#eval move front
util = topUtility
if self.alreadyGone():
util -= gonePenalty
if self.getMovePos() not in self.blockadeGrounds:
possibleMov.append((0,util))
#eval turn left 1 time
util = topUtility
self.turnLeft()
util -= stepPenalty
if self.alreadyGone():
util -= gonePenalty
if self.getMovePos() not in self.blockadeGrounds:
possibleMov.append((1,util))
#eval turn left 2 times
util = topUtility - 2*stepPenalty
self.turnLeft()
if self.alreadyGone():
util -= gonePenalty
if self.getMovePos() not in self.blockadeGrounds:
possibleMov.append((2,util))
#eval turn right 1 times
util = topUtility - stepPenalty
self.turnLeft()
if self.alreadyGone():
util -= gonePenalty
if self.getMovePos() not in self.blockadeGrounds:
possibleMov.append((3,util))
possibleMov = sorted(possibleMov, key=lambda utl: utl[1], reverse=True)
#print(possibleMov)
self.facing = facing
self.pos = pos
util = possibleMov[0][1]
probList = []
for act in possibleMov:
if act[1] == util:
probList.append(act[0])
do = random.choice(probList)
if do == 0:
self.move()
elif do == 1 or do == 2:
self.turnLeft()
else:
self.turnRight()
| [
"[email protected]"
] | |
59b23884360d446824b9011505eb87109ca2c183 | 6832f0571141829c7c9ebee4c7bfd8023ba66947 | /pythonChallenge/brute.py | a1e580fe5065cec214ce8b1d2b9e3ec193ee1330 | [] | no_license | LordMelkor/Misc-Projects | 6b1002413fb537271c3eb1a829c564a51d00f04b | 5ff40ce9d9332dd396791ea676499576a1035cca | refs/heads/master | 2021-05-15T21:14:01.926929 | 2017-03-14T20:59:02 | 2017-03-14T20:59:02 | 41,235,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | import numpy
import math
def countChainsBrute(perm):
N=len(perm)
sol = [None] * N
cnt = 0
max_cnt=0
i=1
while i <= N:
x = perm[i-1]
if x==1:
if sol[0]==None:
cnt=cnt+1
sol[0]=1
if sol[1]==None:
sol[1]=0
elif x==N:
if sol[N-1]==None:
cnt=cnt+1
sol[N-1]=N
if sol[N-2]==None:
sol[N-2]=0
else:
b1=0
b2=0
if sol[x-1]==None:
cnt=cnt+1
sol[x-1]=x
if sol[x-2]==None:
sol[x-2]=0
elif sol[x-2]>0:
b1=1
if sol[x]==None:
sol[x]=0
elif sol[x]>0:
b2=1
cnt=cnt-b1*b2
if cnt>max_cnt:
max_cnt=cnt
if sol.count(None)==0:
i=N
i=i+1
return max_cnt
import itertools
N = 8
lst=list(itertools.permutations(list(range(1,N+1))))
its=len(lst)
v=numpy.empty(its,dtype=int)
for i in range(its):
v[i]=countChainsBrute(lst[i])
print('numpy mean is ',numpy.mean(v,dtype=numpy.float64))
print('numpy std dev is ',numpy.std(v,dtype=numpy.float64)) | [
"[email protected]"
] | |
e51904f05da5fdc6deeb4b75043d5003bb768f65 | 2f34af2319ebf175c89dc621aef17289d647b8fd | /firstperson/forms.py | c6bf83f50800ea806638a7cc6b0fc394b848f5d3 | [] | no_license | rouxpz/vcarddiaries | 9b1b913c5594be78d32f7516b94182a337068391 | edf6abea8c7c9ea5eba03e8737a09fe5a503a710 | refs/heads/master | 2021-01-18T14:22:11.582112 | 2015-11-24T23:16:22 | 2015-11-24T23:16:22 | 38,317,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,597 | py | from django import forms
from firstperson.models import Story, Tag
from django.utils.translation import ugettext_lazy as _
from django.forms import Textarea
class StoryForm(forms.ModelForm):
demo_tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.filter(tagtype="Demographic"), required = False, widget=forms.CheckboxSelectMultiple, label="tell us about yourself. it will help others find stories from people who share their experiences.")
sex_tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.filter(tagtype="Sexuality"), required = False, widget=forms.CheckboxSelectMultiple)
theme_tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.filter(tagtype="Theme"), required = False, widget=forms.CheckboxSelectMultiple, label="pick the most important themes in your story.")
class Meta:
model = Story
fields = ('name', 'demo_tags', 'sex_tags', 'age', 'city', 'state', 'country', 'definition', 'text', 'theme_tags', 'email', 'address')
labels = {
'name' : _("name (any name you want, but please don't use \"anonymous\")"),
'email' : _('email address (we promise we will never share this with anyone)'),
'age' : _('age range (select one)'),
'definition' : _("what is your definition of virginity? (130 character maximum)"),
'text' : _('tell us your story (500 words max):'),
'address' : _('mailing address to get your 2 free v-cards: '),
}
widgets = {
'definition' : Textarea(attrs={'cols': 70, 'rows': 5}),
'text' : Textarea(attrs={'cols': 100, 'rows': 20, 'id': 'storytext'}),
'address' : Textarea(attrs={'cols': 70, 'rows': 3}),
} | [
"[email protected]"
] | |
68530cdbb3848427a394e2df31ba3f78117342b6 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_aliases.py | e7e1e5df2aa19b5e53c435d7f0439abced176273 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
from xai.brain.wordbase.nouns._alias import _ALIAS
#calss header
class _ALIASES(_ALIAS, ):
def __init__(self,):
_ALIAS.__init__(self)
self.name = "ALIASES"
self.specie = 'nouns'
self.basic = "alias"
self.jsondata = {}
| [
"[email protected]"
] | |
5aeeb71951326b0c5941e510866c58f1a6b7cf13 | 6d6381f51dfce72d99b92c13211017a3158d6aec | /alien_space1.py | 5f78ed1870f411019b7742bc8b4e5961a1335bb4 | [] | no_license | SubbulakshmiRS/SpaceInvaders-game | 78d0a97ce5ab80dde70f89ce2e45d9d042c08270 | eb520dfa16b2d40c216392d4d56c04a1d82cf694 | refs/heads/master | 2020-03-26T22:04:51.320232 | 2018-08-20T16:52:40 | 2018-08-20T16:52:40 | 145,430,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | import curses
import time
from random import randint
al1 = []
c1 = 0
class Alien:
marker = "a"
def __init__(self, win):
self.a = {0: len(al1), 1: time.time(), 2: 1,
3: randint(7, 8), 4: randint(1, 8), 5: self}
win.addch(self.a[3], self.a[4], self.marker)
al1.append(self.a)
def getalien(self):
return self.a
| [
"[email protected]"
] | |
4ed00e32deb9870dd4095b513bfc0475f66f7712 | f87f51ec4d9353bc3836e22ac4a944951f9c45c0 | /.history/HW04_20210712145313.py | 21381b494f09ac65def9817776c8924b1bff9179 | [] | no_license | sanjayMamidipaka/cs1301 | deaffee3847519eb85030d1bd82ae11e734bc1b7 | 9ddb66596497382d807673eba96853a17884d67b | refs/heads/main | 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,553 | py | """
Georgia Institute of Technology - CS1301
HW04 - Lists
and Tuples
Collaboration Statement:
"""
#########################################
"""
Function Name: roadTrip()
Parameters: state (str), list of tuples (list)
Returns: list of crops (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def roadTrip(state, crops):
listOfCrops = []
for group in crops:
if group[1] == state:
listOfCrops.append(group[0])
return listOfCrops
"""
Function Name: groceryShopping()
Parameters: groceryList (list), priceList (list), budget (float)
Returns: purchaseList(str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def groceryShopping(groceryList, priceList, budget):
totalCost = 0
finalItems = []
for i in range(len(groceryList)):
if priceList[i] + totalCost <= budget:
totalCost += priceList[i]
finalItems.append((groceryList[i], priceList[i]))
return finalItems
"""
Function Name: restaurantRatings()
Parameters: categoryList(list), restaurantList(list)
Returns: tuple(tuple)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def restaurantRatings(categoryList, restaurantList):
highestRated = (0,0,0)
for i in restaurantList:
if i[2] in categoryList:
if i[1] > highestRated[1]:
highestRated = i
return highestRated
"""
Function Name: snackTime()
Parameters: taList (list)
Returns: list (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def snackTime(taList):
taNames = []
taOptimalSnacksList = []
for ta in taList:
if ta[2] >= 11 and ta[2] <= 14 or ta[2] >= 21 and ta[2] <= 23: #checking if ta snacking time is optimal or not
if ta[0] not in taNames:
taNames.append(ta[0]) #adds the name to the list
taOptimalSnacksList.append([ta[0]]) #adds the 2d array with name to another list, these lists have the same names at the same indices
for ta in taList:
if ta[2] >= 11 and ta[2] <= 14 or ta[2] >= 21 and ta[2] <= 23: #checks if the snack is optimal
taName = ta[0] #stores the ta's name
index = 0
for i in range(len(taNames)): #finds the index of the ta's name in the first list
if taNames[i] == taName:
index = i
taOptimalSnacksList[index].append(ta[1]) #uses the index from the first list in the second list to add the snack since the indices are the same across lists
return taOptimalSnacksList
"""
Function Name: coffeeBreak()
Parameters: list of drinks (list), budget (float)
Returns: name of drink (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def coffeeBreak(drinksList, budget):
highestCaffeine = -1
drinkName = ''
for drink in drinksList:
if drink[2] <= budget:
if drink[1] > highestCaffeine:
highestCaffeine = drink[1]
drinkName = drink[0]
if highestCaffeine == -1:
return None
else:
return drinkName
# state = 'GA'
# crops = [('peaches', 'GA'), ('potatoes', 'ID'), ('peanuts', 'GA')]
# print(roadTrip(state, crops))
# groceryList = ["chips", "bagels", "coffee", "lettuce", "milk", "steak"]
# priceList = [3.50, 6.50, 3.75, 3.00, 4.40, 16.99]
# budget = 14.50
# print(groceryShopping(groceryList, priceList, budget))
# categoryList = ["Mexican"]
# restaurantList = [ ("Fogo de Chao", 4.8, "Brazilian"), ("El Rey", 4.5, "Mexican") ]
# print(restaurantRatings(categoryList, restaurantList))
# taList = [ ("Corinne", "pickles", 3), ("Michael", "pringles", 13), ("Kathleen", "trail mix", 21)]
# print(snackTime(taList))
# taList = [ ("Emily", "pretzels", 12), ("Michael", "celery", 4), ("Elizabeth", "hot cheetos", 1),("Emily", "fruit", 23), ("Corinne", "cookies", 9), ("Emily", "skittles", 22)]
# print(snackTime(taList))
drinks = [("Espresso", 75, 5.5),("Latte", 40, 4.0),("Frappuccino", 20, 3.5)]
budget = 4.5
print(coffeeBreak(drinks, budget))
| [
"[email protected]"
] | |
2fa6bf616d34e7f91f258fa297cc9b2f225ad2e0 | 41ad4dd833d1398248a5a9ec3d487b0819c00b9d | /jump7.py | f150ce896ea9517b94bf1d3c69410466915e3f84 | [] | no_license | teasunyehu/shiyanlou-code | 94afac3a1c243e10c453fd5111c7d9f4a900dfd0 | 0272b6efe694c2b68dd6468de4b3a39855d4feac | refs/heads/master | 2021-05-27T09:16:36.221171 | 2020-04-09T02:29:17 | 2020-04-09T02:29:17 | 254,244,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | a = 0
while a <= 99:
a += 1
if a == 7 or a % 7 == 0 or a % 10 == 7 or a // 10 == 7 :
continue
print(a)
| [
"[email protected]"
] | |
ba9896d2388077bda603c2adcaae245266dab302 | ceaf22335d084eae5d8a30a96bc1e0b7e3ba6de7 | /cst_test.py | ca511ed7f0ef00923033fc6f66030cfc7cb71009 | [] | no_license | zhouhuaman/myhelloword | bb433ccf70f641cdc3118ae283a0e21675ecafd2 | 1bf843176866945d5994de89ad40623f4a924869 | refs/heads/master | 2023-06-23T11:43:59.749596 | 2021-07-22T02:57:31 | 2021-07-22T02:57:31 | 112,067,518 | 0 | 0 | null | 2017-11-26T09:00:27 | 2017-11-26T08:31:50 | null | UTF-8 | Python | false | false | 536 | py | import csv
open_file = False
while(not open_file):
file_name = raw_input("what file?:")
try:
print file_name
fobj = open(file_name,"rU")
open_file = True
except IOError:
print "Error,try again"
f_csv = csv.reader(fobj)
sheet = []
for row in f_csv:
sheet.append(row)
print sheet
fobj.close()
sheet[2][2] = "52"
sum_flt = 0
for g in sheet[2][1:-1]:
sum_flt += float(g)
avg = sum_flt/3
sheet[2][4] = '%.2f'%avg
fobj = open("csv_test.csv","w")
f_csv_w = csv.writer(fobj)
for row in sheet:
f_csv_w.writerow(row)
fobj.close() | [
"[email protected]"
] | |
1660014fd32371318d387037c1082ab24fa41efb | 632d58bb602f379b78ee07b58baf27627fb496d3 | /numbers.py | 17fdc5d0d2221054e51630b18f07eeba3ea435a4 | [] | no_license | Ravi4teja/Python_classes | 152982f38c53c48edae4865a16a75e21ac3015c8 | 65f5944c52be005a7db7b6eb3cdd885691ce62b0 | refs/heads/main | 2023-07-03T03:02:26.696532 | 2021-08-10T16:57:28 | 2021-08-10T16:57:28 | 380,787,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,582 | py | num1 = 44
num2 = -33
num3 = 2.376
num4 = -84.34
#checking the type of variable
print(type(num1))
print(type(num3))
#incrementing by 1
num1 = num1 + 1
num2 += 1
print(num1)
print(num2)
# Arithmetic Operators:
# Addition: +
# Subtraction: -
# Multiplication: *
# Division: /
# Floor Division: //
# Exponent: **
# Modulus: %
numm1 = 56
numm2 = 3
print(numm1 + numm2)
print(numm1 - numm2)
print(numm1 * numm2)
print(numm1 / numm2)
print(numm1 // numm2)
print(numm1 ** numm2)
print(numm1 % numm2)
# Comparison Operators:
# Equal: ==
# Not Equal: !=
# Greater Than: >
# Less Than: <
# Greater or Equal: >=
# Less or Equal: <=
print("--------------------")
#56, 3
print(numm1 == numm2)
print(numm1 != numm2)
print(numm1 > numm2)
print(numm1 < numm2)
print(numm1 >= numm2)
print(numm1 <= numm2)
print("------------")
#Logical Operators:
# and
# or
# not
#1*0
print(numm1 == numm2 and numm1 > numm2)
# 1+0
print(numm1 == numm2 or numm1 > numm2)
print(not True)
print(not numm1 >= numm2)
#getting the absolute value
print(abs(-44))
print(abs(44))
#Approximate decimal value to integer
print(round(5.6457))
#Approximate decimal value to mentioned decimals
print(round(5.6457, 2))
print(round(5.6457, 3))
var1 = "3444"
print(type(var1))
#TypeCasting
#changing the type of string(str) to integer(int)
var1 = int(var1)
print(type(var1))
# float(), str()
var2 = "something"
# var2 = int(var2)
var3 = 27
#changing the type of integer(int) to decimal(float)
var3 = float(var3)
print(type(var3))
| [
"[email protected]"
] | |
e52790419eb48a31225e703c28341c82aaa55339 | 45b64f620e474ac6d6b2c04fbad2730f67a62b8e | /Varsity-Final-Project-by-Django-master/.history/project/exams/views_20210402160641.py | 822b88dd698f64df5a52198751b98f1cc04951c4 | [] | no_license | ashimmitra/Final-Project | 99de00b691960e25b1ad05c2c680015a439277e0 | a3e1d3c9d377e7b95b3eaf4dbf757a84a3858003 | refs/heads/master | 2023-04-11T06:12:35.123255 | 2021-04-26T15:41:52 | 2021-04-26T15:41:52 | 361,796,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | from django.shortcuts import render
from exams.models import Exam
def quiz
| [
"[email protected]"
] | |
732562b79c06697b174aaa20e6baf69619d2283e | 92d4109df95629d3d3452441b1b3c16138cd154d | /console.py | 77b1cc1fb2bd9a6f507e7a106efdf161721f2c71 | [] | no_license | wadaries/cli-ravel | ffebc5ef37a93bcf749c031316fc2d5e16d04bed | 810f79b3dfa6466e9b4e9c7eeff36e6e246c5e52 | refs/heads/master | 2020-12-25T05:37:05.012824 | 2016-02-20T22:03:43 | 2016-02-20T22:03:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,759 | py | #!/usr/bin/env python
import cmd
import os
import psycopg2
import pprint
import re
import sys
import tabulate
import tempfile
from optparse import OptionParser
from mininet.topo import Topo
from mininet.cli import CLI
from mininet.net import Mininet
from mininet.log import setLogLevel
import mndeps
from db import RavelDb
DB='mininet'
DBUSER='mininet'
class RavelConsole(cmd.Cmd):
prompt = "ravel> "
intro = "RavelConsole: interactive console for Ravel."
doc_header = "Commands (type help <topic>):"
def __init__(self, mnet, db):
self.mnet = mnet
self.db = db
cmd.Cmd.__init__(self)
def do_m(self, line):
if not line:
CLI(self.mnet)
else:
temp = tempfile.NamedTemporaryFile(delete=False)
temp.write(line)
temp.close()
CLI(self.mnet, script=temp.name)
os.unlink(temp.name)
def do_p(self, line):
cursor = self.db.connect().cursor()
try:
cursor.execute(line)
except psycopg2.ProgrammingError, e:
print e
try:
names = [row[0] for row in cursor.description]
data = cursor.fetchall()
print tabulate.tabulate(data, headers=names)
except psycopg2.ProgrammingError:
pass
def help_m(self):
print "syntax: m [mininet cmd]"
print "-- run mininet command"
def help_p(self):
print "syntax: p [sql statement]"
print "-- execute PostgreSQL statement"
def do_EOF(self, line):
"Quit Ravel console"
sys.stdout.write('\n')
return True
def do_exit(self, line):
"Quit Ravel console"
return True
def parseArgs():
desc = ( "Ravel console." )
usage = ( '%prog [options]\n'
'(type %prog -h for details)' )
parser = OptionParser(description=desc, usage=usage)
parser.add_option('--user', '-u', type='string', default=DBUSER,
help='postgresql username (default: %s)' % DBUSER)
parser.add_option('--db', '-d', type='string', default=DB,
help='postgresql username (default: %s)' % DB)
parser.add_option('--topo', '-t', type='string', default=None,
help='mininet topology argument')
options, args = parser.parse_args()
if args:
parser.print_help()
exit()
if not options.topo:
parser.error("No topology specified")
return options
if __name__ == "__main__":
opts = parseArgs()
topo = mndeps.build(opts.topo)
net = Mininet(topo)
net.start()
db = RavelDb(opts.db, opts.user)
db.load_topo(topo, net)
RavelConsole(net, db).cmdloop()
net.stop()
| [
"[email protected]"
] | |
4458f06e62fedb5ab9fd4048c7ac403fd89e80f3 | 0cc59d7106371cb2b5f2420f3cd103c902435f18 | /blog/migrations/0001_initial.py | 22f3f7a7a24088d1d104353670e9fc730467c22f | [] | no_license | omerolmez/django-examples | 3d4450d148ba55a6f69b65c5bfee6060646973c4 | 4685a6fc199fbcf8819ec2c70b453b6e202a6b23 | refs/heads/master | 2020-04-01T11:00:47.188790 | 2018-11-01T15:50:36 | 2018-11-01T15:50:36 | 153,142,532 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | # Generated by Django 2.1.2 on 2018-10-25 14:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('pub_date', models.DateTimeField()),
('body', models.TextField()),
('image', models.ImageField(upload_to='images')),
],
),
]
| [
"[email protected]"
] | |
731adf4ab422b9c3fa0ccc62c3ec54c02c6a655e | ba5c858da2515a8612c32c28392feeffeff00e80 | /DialogRE/GDPNet/constant.py | 9b21f0504e26b5a965e20759a5cd3020a2b9ad1a | [
"MIT"
] | permissive | maxxyh/dialogRE-eng-bi-lstm | 044a93b87fab18ab2889cfe964fe3e5e568e22fd | 2c0942b2f922a423dc130cfca72a8f9f2445237d | refs/heads/master | 2023-03-28T05:43:26.436950 | 2021-01-30T08:00:21 | 2021-01-30T08:00:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | # vocab
PAD_TOKEN = '<PAD>'
PAD_ID = 0
UNK_TOKEN = '<UNK>'
UNK_ID = 1
VOCAB_PREFIX = [PAD_TOKEN, UNK_TOKEN]
NER_TO_ID = {'PER': 0, 'GPE': 1, 'ORG': 2, 'STRING': 3, 'VALUE': 4}
POS_TO_ID = {PAD_TOKEN: 0, UNK_TOKEN: 1, 'NNP': 2, 'NN': 3, 'IN': 4, 'DT': 5, ',': 6, 'JJ': 7, 'NNS': 8, 'VBD': 9, 'CD': 10, 'CC': 11, '.': 12, 'RB': 13, 'VBN': 14, 'PRP': 15, 'TO': 16, 'VB': 17, 'VBG': 18, 'VBZ': 19, 'PRP$': 20, ':': 21, 'POS': 22, '\'\'': 23, '``': 24, '-RRB-': 25, '-LRB-': 26, 'VBP': 27, 'MD': 28, 'NNPS': 29, 'WP': 30, 'WDT': 31, 'WRB': 32, 'RP': 33, 'JJR': 34, 'JJS': 35, '$': 36, 'FW': 37, 'RBR': 38, 'SYM': 39, 'EX': 40, 'RBS': 41, 'WP$': 42, 'PDT': 43, 'LS': 44, 'UH': 45, '#': 46, 'pad': 47}
EVENT_TO_ID = {'process':0, 'perception_active':1, 'change_mind_or_sentiment':2, 'face_or_solve_problem':3, 'expressing_publicly':4, 'motion':5, 'social_event':6, 'placing_or_presence':7, 'giving_or_bringing':8, 'creating_or_damaging':9, 'getting_or_aiming':10, 'action':11, 'O':12} | [
"[email protected]"
] | |
e4925e939eb0f95b5b4e9b0e11ec5f1632c50582 | 64576924d59bf9d901caf8aa3875e4a399cee8cd | /08/5.py | 7a2856d6c153dbc56d82cd2002a6f5dcfa49f7ac | [] | no_license | drumgiovanni/schoolPythonProject | ca5df8be858aa4dfe77ad7706b0de67e40381b13 | 6eb2e87634dafd461d6161f05c127dc347bfa61f | refs/heads/master | 2021-09-06T08:52:57.101134 | 2018-02-04T15:08:24 | 2018-02-04T15:08:24 | 111,419,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | from tkinter import Frame, Label, StringVar, Button, LEFT, BOTTOM, TOP
import random
class LuckChallenger(Frame):
def __init__(self, master=None):
super().__init__(master)
self.pack()
self.master.title("LuckChallenger")
frame = Frame(self)
frame.pack(side=BOTTOM)
Button(frame, text="A", command=self.choose).pack(side=LEFT)
Button(frame, text="B", command=self.choose).pack(side=LEFT)
Button(frame, text="RESET", command=self.reset).pack(side=BOTTOM)
Button(frame, text="QUIT", command=self.dismiss).pack(side=LEFT)
self.disp = StringVar()
Label(self, textvariable=self.disp, font=("Sans-serif", 48)).pack(side=TOP)
def choose(self):
rnd = random.randrange(4)
if rnd == 1:
self.disp.set("あたり")
else:
self.disp.set("はずれ")
def reset(self):
self.disp.set("")
def dismiss(self):
self.master.destroy()
LuckChallenger().mainloop() | [
"[email protected]"
] | |
090e20f258a589c2cc6fa8171f2a2faa373f8fbd | 4d25363a4265e7eee4d6f3ab9c936ef1fb5260c7 | /Differential Equations in Action/Lesson 1 - Houston We Have a Problem/2-1_LunarOrbit.py | b95331c1cc2666883e346ba938a5b9f0893a3528 | [] | no_license | Aegis-Liang/Python | 0d4f303bae27c0e9a5ad29d5f570e27b3211db72 | 9dfc840e48f26932b3dd9f1d93ff0bb84c41a09e | refs/heads/master | 2021-01-15T21:31:00.374119 | 2018-05-12T15:40:34 | 2018-05-12T15:40:34 | 99,874,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | # PROBLEM 1
#
# Modify the orbit function below to model
# one revolution of the moon around the earth,
# assuming that the orbit is circular.
#
# Use the math.cos(angle) and math.sin(angle)
# functions in order to accomplish this.
import math
import numpy
import matplotlib.pyplot
moon_distance = 384e6 # m
def orbit():
num_steps = 50
x = numpy.zeros([num_steps + 1, 2])
###Your code here.
for i in range(num_steps + 1):
x[i, 0] = moon_distance * math.cos(2*math.pi*i/(num_steps))
x[i, 1] = moon_distance * math.sin(2*math.pi*i/(num_steps))
return x
x = orbit()
def plot_me():
matplotlib.pyplot.axis('equal')
matplotlib.pyplot.plot(x[:, 0], x[:, 1])
axes = matplotlib.pyplot.gca()
axes.set_xlabel('Longitudinal position in m')
axes.set_ylabel('Lateral position in m')
plot_me()
matplotlib.pyplot.show()
| [
"[email protected]"
] | |
a57a0e919340531dc670b3e13d1bc73133955128 | 06cda71707e5f4ac040ae89f63f00cdab48d2577 | /one-isp/generatePath.py | 5ec2f0f207e58cd0c1b83bb09c439d2998e7de95 | [] | no_license | YimengZhao/interdomain-TE | 9c4a1ef19549a39c8a7c9fecbd6a277c7d2073d4 | cc1a000c8c967a8608a0fa8bf740c5c200eb358e | refs/heads/master | 2020-03-08T11:44:08.136833 | 2018-04-04T18:54:33 | 2018-04-04T18:54:33 | 128,106,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,962 | py | # coding=utf-8
import networkx as nx
from paths import Path
def generatePathsPerIE(source, sink, topology, predicate, cutoff,
maxPaths, modifyFunc=None,
raiseOnEmpty=True):
"""
Generates all simple paths between source and sink using a given predicate.
:param source: the start node (source)
:param sink: the end node (sink)
:param topology: the topology on which we are operating
:param predicate: the predicate that defines a valid path, must be a
python callable that accepts a path and a topology, returns a boolean
:param cutoff: the maximum length of a path.
Helps to avoid unnecessarily long paths.
:param maxPaths: maximum number of paths paths to return, by default no limit.
:param modifyFunc: a custom function may be passed to convert a list of
nodes, to a different type of path.
For example, when choosing middleboxes, we use :py:func:`~predicates.useMboxModifier`
to expand a list of switches into all possible combinations of middleboxes
:param raiseOnEmpty: whether to raise an exception if no valid paths are detected.
Set to True by default.
:raise NoPathsException: if no paths are found
:returns: a list of path objects
:rtype: list
"""
G = topology.getGraph()
paths = []
num = 0
#maxPaths = 1
for p in nx.all_simple_paths(G, source, sink):
if modifyFunc is None:
if predicate(p, topology):
paths.append(Path(p))
num += 1
else:
np = modifyFunc(p, num, topology)
if isinstance(np, list):
for innerp in np:
if predicate(innerp, topology):
paths.append(innerp)
num += 1
else:
if predicate(np, topology):
paths.append(np)
num += 1
if not paths:
if raiseOnEmpty:
print 'no paths between {} and {}'.format(source, sink)
raise exceptions.NoPathsException("No paths between {} and {}".format(source, sink))
paths.sort(key=lambda x: x.getNodesNum(), reverse=False)
paths = paths[0:maxPaths]
return paths
def generatePath(ie_pairs, topology, predicate, cutoff, maxPaths=3, modifyFunc=None, raiseOnEmpty=True):
pptc_set = {}
for ie in ie_pairs:
i, e = ie
pptc_set[ie] = generatePathsPerIE(i, e, topology, predicate, cutoff, maxPaths, modifyFunc, raiseOnEmpty)
return pptc_set
def generatePathsPerTrafficClass(topology, trafficClasses, predicate, cutoff,
maxPaths=3, modifyFunc=None,
raiseOnEmpty=True):
"""
Generate all simple paths for each traffic class
:param topology: topology to work with
:param trafficClasses: a list of traffic classes for which paths should be generated
:param predicate: predicate to use, must be a valid preciate callable
:param cutoff: the maximum length of a path.
:param maxPaths: maximum number of paths paths to return, by default no limit.
:param modifyFunc: a custom function may be passed to convert a list of
nodes, to a different type of path.
For example, when choosing middleboxes, we use :py:func:`~predicates.useMboxModifier`
to expand a list of switches into all possible combinations of middleboxes
:param raiseOnEmpty: whether to raise an exception if no valid paths are detected.
Set to True by default.
:raise NoPathsException: if no paths are found for a trafficClass
:returns: a mapping of traffic classes to a list of path objects
:rtype: dict
"""
result = {}
for t in trafficClasses:
result[t] = generatePathsPerIE(t.src, t.dst, topology, predicate, cutoff, maxPaths,
modifyFunc, raiseOnEmpty)
return result
| [
"[email protected]"
] | |
ee3b8c954e8599f567e76609178a26390934ccc5 | 882e8ff2b1b86b9240fde27c5e67537915982b79 | /wwt_data_formats/place.py | d9c37b34f85bf6b1a084086e6013fcda76e2344d | [
"MIT"
] | permissive | astrodavid10/wwt_data_formats | 3350d54fc3d881451c27c668d56e88a74309667f | 6bcdffd8ac7a2791803043dcbcc527d510325971 | refs/heads/master | 2023-07-16T21:20:35.397289 | 2021-08-30T21:18:04 | 2021-08-30T21:18:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,715 | py | # -*- mode: python; coding: utf-8 -*-
# Copyright 2019-2020 the .NET Foundation
# Licensed under the MIT License.
"""A place that a WWT user can visit.
"""
from __future__ import absolute_import, division, print_function
__all__ = '''
Place
'''.split()
from argparse import Namespace
from traitlets import Float, Instance, Int, Unicode, UseEnum
from . import LockedXmlTraits, XmlSer
from .abcs import UrlContainer
from .enums import Classification, Constellation, DataSetType
from .imageset import ImageSet
class Place(LockedXmlTraits, UrlContainer):
"""A place that can be visited."""
data_set_type = UseEnum(
DataSetType,
default_value = DataSetType.EARTH
).tag(xml=XmlSer.attr('DataSetType'))
name = Unicode('').tag(xml=XmlSer.attr('Name'))
ra_hr = Float(0.0).tag(xml=XmlSer.attr('RA'))
dec_deg = Float(0.0).tag(xml=XmlSer.attr('Dec'))
latitude = Float(0.0).tag(xml=XmlSer.attr('Lat'))
longitude = Float(0.0).tag(xml=XmlSer.attr('Lng'))
constellation = UseEnum(
Constellation,
default_value = Constellation.UNSPECIFIED
).tag(xml=XmlSer.attr('Constellation'))
classification = UseEnum(
Classification,
default_value = Classification.UNSPECIFIED
).tag(xml=XmlSer.attr('Classification'))
magnitude = Float(0.0).tag(xml=XmlSer.attr('Magnitude'))
distance = Float(0.0).tag(xml=XmlSer.attr('Distance'))
angular_size = Float(0.0).tag(xml=XmlSer.attr('AngularSize'))
zoom_level = Float(0.0).tag(xml=XmlSer.attr('ZoomLevel'))
rotation_deg = Float(0.0).tag(xml=XmlSer.attr('Rotation'))
angle = Float(0.0).tag(xml=XmlSer.attr('Angle'))
opacity = Float(100.0).tag(xml=XmlSer.attr('Opacity'))
dome_alt = Float(0.0).tag(xml=XmlSer.attr('DomeAlt'))
dome_az = Float(0.0).tag(xml=XmlSer.attr('DomeAz'))
background_image_set = Instance(ImageSet, allow_none=True).tag(xml=XmlSer.wrapped_inner('BackgroundImageSet'))
foreground_image_set = Instance(ImageSet, allow_none=True).tag(xml=XmlSer.wrapped_inner('ForegroundImageSet'))
image_set = Instance(ImageSet, allow_none=True).tag(xml=XmlSer.inner('ImageSet'))
thumbnail = Unicode('').tag(xml=XmlSer.attr('Thumbnail'))
description = Unicode('').tag(xml=XmlSer.text_elem('Description'))
"""
A description of the place, using HTML markup.
This field is not actually used in the stock WWT clients, but it is wired up
and loaded from the XML.
"""
annotation = Unicode('').tag(xml=XmlSer.attr('Annotation'))
"""
Annotation metadata for the place.
This field is only used in the web engine and web client app. The web client
app expects this field to contain a comma-separated list of key-value pairs,
where each pair is delimited with colons:
.. code-block::
key1:val1,key2:val2,key3:val3
The webclient includes some unfinished support for this field to be used to
create circular annotations with YouTube video links. If your WTML file will
not be viewed in the webclient, you can use this field to convey arbitrary
textual data to the WWT Web Engine JavaScript/TypeScript layer.
"""
msr_community_id = Int(0).tag(xml=XmlSer.attr('MSRCommunityId'))
"""The ID number of the WWT Community that this content came from."""
msr_component_id = Int(0).tag(xml=XmlSer.attr('MSRComponentId'))
"""The ID number of this content item on the WWT Communities system."""
permission = Int(0).tag(xml=XmlSer.attr('Permission'))
"TBD."
xmeta = Instance(
Namespace,
args = (),
help = 'XML metadata - a namespace object for attaching arbitrary text to serialize',
).tag(xml=XmlSer.ns_to_attr('X'))
def _tag_name(self):
return 'Place'
def mutate_urls(self, mutator):
if self.thumbnail:
self.thumbnail = mutator(self.thumbnail)
if self.background_image_set:
self.background_image_set.mutate_urls(mutator)
if self.foreground_image_set:
self.foreground_image_set.mutate_urls(mutator)
if self.image_set:
self.image_set.mutate_urls(mutator)
def as_imageset(self):
"""Return an ImageSet for this place if one is defined.
Returns
-------
Either :class:`wwt_data_formats.imageset.ImageSet` or None.
Notes
-----
If the :attr:`foreground_image_set` of this :class:`Place` is not
None, it is returned. Otherwise, if its :attr:`image_set` is not
None, that is returned. Otherwise, None is returned.
"""
if self.foreground_image_set is not None:
return self.foreground_image_set
return self.image_set
| [
"[email protected]"
] | |
897f076718be403a31b350f26fa73d26170d02aa | 7727b95290484fb83674f3d03567e1f13b0b0828 | /scripts/VisualFeedback/imageFromVideo.py | 187b9181cbb65407c296ed6fe22724a7ce91ef0f | [] | no_license | Garfield-hr/ur_control | 09e60efd0bb7311305c6d4ce7bc173a6bd156b66 | 3bedf5a490232dee6619e542e43581314b765268 | refs/heads/main | 2023-06-28T16:42:54.774344 | 2021-08-03T08:53:13 | 2021-08-03T08:53:13 | 372,747,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,466 | py | import cv2 as cv
def jump_frame(frame):
global vc
vc.set(cv.CAP_PROP_POS_FRAMES, frame)
print(frame)
def cut_image():
vc = cv.VideoCapture('/home/hairui/Videos/experiments/616-2.avi')
frame_num = int(vc.get(cv.CAP_PROP_FRAME_COUNT))
cv.namedWindow('video', cv.WINDOW_NORMAL)
cv.createTrackbar('frame', 'video', 0, frame_num, jump_frame)
if vc.isOpened():
ret = True
while ret:
ret, img = vc.read()
if ret:
cv.setTrackbarPos('frame', 'video', int(vc.get(cv.CAP_PROP_POS_FRAMES)))
cv.imshow('video', img)
if cv.waitKey(33) == 27:
str_name = input('Please input the name of this image')
cv.imwrite('/home/hairui/Pictures/experiment/' + str_name, img)
break
def cut_video():
vc = cv.VideoCapture('/home/hairui/Videos/experiments/616-1.avi')
frame_num = int(vc.get(cv.CAP_PROP_FRAME_COUNT))
cv.namedWindow('video', cv.WINDOW_NORMAL)
cv.createTrackbar('frame', 'video', 0, frame_num, jump_frame)
fourcc = cv.VideoWriter_fourcc('I', '4', '2', '0')
output_video = cv.VideoWriter("/home/hairui/Videos/experiments/616-2.avi", fourcc, 24, (1264, 1016))
if vc.isOpened():
ret = True
while ret:
ret, img = vc.read()
if ret:
cv.setTrackbarPos('frame', 'video', int(vc.get(cv.CAP_PROP_POS_FRAMES)))
cv.imshow('video', img)
if cv.waitKey(33) == 27:
ret1, img = vc.read()
while ret1:
output_video.write(img)
ret1, img = vc.read()
break
if __name__ == '__main__':
vc = cv.VideoCapture('/home/hairui/Videos/experiments/618-1.avi')
frame_num = int(vc.get(cv.CAP_PROP_FRAME_COUNT))
cv.namedWindow('video', cv.WINDOW_NORMAL)
cv.createTrackbar('frame', 'video', 0, frame_num, jump_frame)
if vc.isOpened():
ret = True
while ret:
ret, img = vc.read()
if ret:
cv.setTrackbarPos('frame', 'video', int(vc.get(cv.CAP_PROP_POS_FRAMES)))
cv.imshow('video', img)
if cv.waitKey(33) == 27:
str_name = raw_input('Please input the name of this image')
cv.imwrite('/home/hairui/Pictures/experiment/' + str_name, img)
break | [
"[email protected]"
] | |
b7ee5a71ed7bc8f1caed4f781d14e0358892c997 | 20da942bbb9ed21e078eb3da863772f211328774 | /modules/nationstates.py | 1834e751f801c8169424375f5428fa635a0442fe | [] | no_license | embolalia/phenny | ab91c76cc26cbc91655a2d9168909061845355bc | ef1a79876466b2a7cdd32d1bc1151f56cc02b989 | refs/heads/master | 2021-04-09T16:04:25.281765 | 2011-12-22T16:54:21 | 2011-12-22T16:54:21 | 2,703,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,076 | py | #!/usr/bin/env python
"""
wa.py - NationStates WA tools for Phenny
Copyright 2011, Edward D. Powell, embolalia.net
Licensed under the Eiffel Forum License 2.
http://inamidst.com/phenny/
"""
import MySQLdb, re
wa_db = 'WA'
wa_host = 'embolalia.net'
wa_user = 'bot'
wa_pass = 'abotabot'
db = MySQLdb.connect(host=wa_host, user=wa_user, passwd=wa_pass, db=wa_db)
cur = db.cursor()
def whats(phenny, input):
"""Looks up a NationStates-related abbreviation or WA resolution"""
givenum = 1
#Retrieve resolution number and council
w, abbr = input.groups()
resd = re.match('(GA|SC|UN)(#| )*(\d*)', abbr)
if resd:
givenum = 0
result = [resd.group(1), resd.group(3), abbr, None]
if result[0] == 'GA': result[0] = 'G'
elif result[0] == 'SC': result[0] = 'S'
else: result[0] = 'U'
else:
cur.execute("SELECT * FROM ABBRS WHERE Abbr = \"" + abbr + "\"")
result = cur.fetchone()
if result is None:
phenny.say("Your guess is as good as mine, mate.")
elif result[3] is not None:
phenny.say(abbr + ' is ' + result[3])
elif result[0] is not 'G':
phenny.say('Sorry, ' + input.nick + ', I don\'t have data on that council yet.')
else:
council, number = result[0], result[1]
num = str(number)
#Look up full resolution name
select = "SELECT * FROM RESOLUTIONS WHERE Council = \'"
cur.execute(select + council + "\' AND Number = " + num)
message = makemessage(abbr, council, num, cur.fetchone(), givenum)
phenny.say(message)
whats.rule = ('$nick', ['whats', 'what\'s'], r'(.*)')
whats.example = '$nick, what\'s GA34\n$nick, whats CoCR?'
def makemessage(abbr, council, num, result, givenum):
if not result:
message = 'I don\'t have a result for that yet.'
else:
name = result[2]
cat = result[3]
arg = result[4]
auth = result[5]
coauth = result[6]
active = 0
if result[7] == None: active = 1
print active
if council == 'G': council = 'GA'
elif council == 'S': council == 'SC'
else: council = 'UN'
message = abbr + ' is '
if givenum: message = message + council + '#' + num + ', '
message = message + name
if not active: message = message + ', a repealed resolution'
message = message + ' by ' + auth
if coauth: message = message + ' and ' + coauth
return message
def authored(phenny, input):
""".authored nation - Checks the resolution DB for resolutions authored by
nation (or nation with any number of characters after. e.g, .authored unibot
returns results for Unibot and Unibotian WA Mission."""
phenny.say("Let me check.")
name = input.group(2)
auth = 'SELECT COUNT(*) FROM RESOLUTIONS WHERE Author LIKE \''
cur.execute(auth + name + '%\'')
authored = cur.fetchone()[0]
coauth = 'SELECT COUNT(*) FROM RESOLUTIONS WHERE Coauthor LIKE \''
cur.execute(coauth + name + '%\'')
coauthored = cur.fetchone()[0]
message = 'I see ' + str(authored) + ' resolutions'
if coauthored > 0:
message = message + ', plus ' + str(coauthored) + ' coauthorships'
phenny.say(message + ' by ' + name)
authored.commands = ['authored']
authored.example = '.authored Unibot'
def sc(phenny, input):
"""Returns a link for the requested SC resolution."""
lnk = 'http://www.nationstates.net/page=WA_past_resolutions/council=2/start='
phenny.say(lnk + str(int(input.group(2)) - 1))
sc.commands = ['sc']
sc.example = '.sc 3'
def ga(phenny, input):
"""Returns a link for the requested GA resolution."""
lnk = 'http://www.nationstates.net/page=WA_past_resolutions/council=1/start='
phenny.say(lnk + str(int(input.group(2)) - 1))
ga.commands = ['ga']
ga.example = '.ga 132'
def un(phenny, input):
"""Returns a link for the requested NSUN historical resolution."""
lnk = 'http://www.nationstates.net/page=UN_past_resolutions/council=0/start='
phenny.say(lnk + str(int(input.group(2)) - 1))
un.commands = ['un']
un.example = '.un 5'
| [
"[email protected]"
] | |
be5d4b84c3be8bdc84f67159477d3bbdc135afc4 | 2db7597686f33a0d700f7082e15fa41f830a45f0 | /Python/双指针问题/对撞对开指针/18.4Sum.py | 540b93f53f36c966de4bde8055481434fda747a8 | [] | no_license | Leahxuliu/Data-Structure-And-Algorithm | 04e0fc80cd3bb742348fd521a62bc2126879a70e | 56047a5058c6a20b356ab20e52eacb425ad45762 | refs/heads/master | 2021-07-12T23:54:17.785533 | 2021-05-17T02:04:41 | 2021-05-17T02:04:41 | 246,514,421 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @Time : 2020/4/16
# @Author : XU Liu
# @FileName: 18.4Sum.py
'''
1. 题目类型:
2. 题目要求与理解:
3. 解题思路:
sort 数组
四个数相加
两个数遍历
剩下两个数用左右指针
4. 输出输入以及边界条件:
input:
output:
corner case:
5. 空间时间复杂度
时间复杂度:O(N**3)
'''
class Solution:
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
nums.sort()
n = len(nums)
all_res = set()
for i in range(n - 1):
for j in range(i + 1, n):
l = j + 1
r = n -1
while l < r:
sums = nums[i] + nums[j] + nums[l] + nums[r]
if sums == target:
all_res.add((nums[i], nums[j], nums[l], nums[r]))
r -= 1
l += 1
elif sums > target:
r -= 1
elif sums < target:
l += 1
all_res = list(all_res)
return all_res | [
"[email protected]"
] | |
62e922bb562a9a8d23cb72bd2a904e56d95e7483 | 187012b06bfcf21cbb46073fd44de179ab912f1d | /tests/subject_tests.py | b2bfd0eae57056ce1df1778ad7b2ac8f0616806e | [] | no_license | frogwang92/PyAria | 11af3612e92868602d235c2af257460916a4a613 | 8832c30ba5aa1e8bd0bbce228641410b82c11cd9 | refs/heads/master | 2020-07-05T14:54:51.945253 | 2016-12-16T08:55:19 | 2016-12-16T08:55:19 | 73,889,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | import subject
class Observer(object):
def __init__(self):
pass
def notify(self, args):
print("obs notified, args: {}".format(args))
class Engine(subject.Subject):
def __init__(self):
super(Engine, self).__init__()
self.test_event = subject.Event()
def update_event1(self):
print("***event 1 updated with no argument***")
self.update(self.test_event)
def update_event1_with_args(self):
print("***event 1 updated with args {}***".format(1, 2, 3))
self.update(self.test_event, 1, 2, 3)
engine = Engine()
ob = Observer()
engine.subscribe(engine.test_event, ob.notify)
engine.subscribe(engine.test_event, lambda args: print("notified to a lambda with args {}".format(args)))
engine.update_event1()
engine.update_event1_with_args()
print("unsubscribe ob.notify from engine.test_event")
engine.unsubscribe(engine.test_event, ob.notify)
engine.update_event1() | [
"[email protected]"
] | |
16bb9fa7dfc2b669f3e750919b9beb5ea8a50a14 | 9302eafa1ab9e78360e70bcad4f2047d0216e03d | /13_wordcount.py | 1bacbc83621c530b6e361372e023b68e46651294 | [] | no_license | raul-gomes/welcome_to_the_django | ad9ed34caecb7fb83c36f03594fe3e78372b692f | 225f98101b83a0fad41c81504cfd16af1696ee20 | refs/heads/main | 2023-01-02T09:32:51.975389 | 2020-10-18T01:02:59 | 2020-10-18T01:02:59 | 304,969,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,696 | py | """
13. wordcount
Este desafio é um programa que conta palavras de um arquivo qualquer de duas
formas diferentes.
A. Lista todas as palavras por ordem alfabética indicando suas ocorrências.
Ou seja...
Dado um arquivo letras.txt contendo as palavras: A a C c c B b b B
Quando você executa o programa: python wordcount.py --count letras.txt
Ele deve imprimir todas as palavras em ordem alfabética seguidas
do número de ocorrências.
Por exemplo:
$ python wordcount.py --count letras.txt
a 2
b 4
c 3
B. Lista as 20 palavras mais frequêntes indicando suas ocorrências.
Ou seja...
Dado um arquivo letras.txt contendo as palavras: A a C c c B b b B
Quando você executa o programa: python wordcount.py --topcount letras.txt
Ele deve imprimir as 20 palavras mais frequêntes seguidas
do número de ocorrências, em ordem crescente de ocorrências.
Por exemplo:
$ python wordcount.py --topcount letras.txt
b 4
c 3
a 2
Abaixo já existe um esqueleto do programa para você preencher.
Você encontrará a função main() chama as funções print_words() e
print_top() de acordo com o parâmetro --count ou --topcount.
Seu trabalho é implementar as funções print_words() e depois print_top().
Dicas:
* Armazene todas as palavras em caixa baixa, assim, as palavras 'A' e 'a'
contam como a mesma palavra.
* Use str.split() (sem parêmatros) para fazer separar as palavras.
* Não construa todo o programade uma vez. Faça por partes executando
e conferindo cada etapa do seu progresso.
"""
import sys
from collections import Counter
# +++ SUA SOLUÇÃO +++
# Defina as funções print_words(filename) e print_top(filename).
def open_file(filename):
with open(filename, 'r') as arq:
letras = arq.read().lower().split()
letras = Counter(letras)
return letras
def print_words(filename):
letras = open_file(filename)
for letra in sorted(letras):
print(f'{letra}: {letras[letra]}')
def print_top(filename):
letras = open_file(filename)
for index, letra in enumerate(sorted(letras, key=lambda x: letras[x], reverse=True)):
print(f'{letra}: {letras[letra]}')
if index == 20:
break
# A função abaixo chama print_words() ou print_top() de acordo com os
# parêtros do programa.
def main():
if len(sys.argv) != 3:
print('Utilização: ./13_wordcount.py {--count | --topcount} file')
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print('unknown option: ' + option)
sys.exit(1)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d7cff6064fd3675250a054d0a97560cc60ad5a33 | a02a16d74e80de358b21c70c12b1e10975ab3a6d | /phase_4/views/resources.py | 452ef18011717ac1b8ee4ac6cb50712e096e9020 | [] | no_license | wentaoxu415/fizz_buzz | e0a182f6486fd79060895e262259a4ce776d9724 | 147ce15422e72b66c34fe54880aaf8ab6cf2cade | refs/heads/master | 2021-01-10T01:38:57.252322 | 2018-04-23T04:02:17 | 2018-04-23T04:02:17 | 47,909,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,875 | py | from flask.views import MethodView
from flask import request, redirect, url_for, render_template, jsonify
import arrow
from twilio import twiml
from twilio.util import RequestValidator
import os
class ResourceIndex(MethodView):
def get(self):
from schedules import db
from models.record import Record
all_records = db.session.query(Record).all()
return render_template('index.html', records=all_records)
class ResourceCall(MethodView):
def get_scheduled_time(self, sent_time, delay_min):
scheduled_time = sent_time.replace(minutes=+delay_min)
scheduled_time = scheduled_time.datetime
return scheduled_time
def get_sent_date(self, sent_time):
return sent_time.format('YYYY-MM-DD')
def get_sent_timestamp(self, sent_time):
return sent_time.format('HH:mm:ss')
def post(self):
from tasks import schedule_call
phone_number = request.form.get('phoneNumber', None)
sent_time = arrow.utcnow()
delay_min = int(request.form.get('delay', None))
scheduled_time = self.get_scheduled_time(sent_time, delay_min)
sent_date = self.get_sent_date(sent_time)
sent_timestamp = self.get_sent_timestamp(sent_time)
schedule_call.apply_async(args=[phone_number, sent_date, sent_timestamp, delay_min], eta=scheduled_time)
return jsonify({'message': 'Call incoming!'})
class ResourceOutboundCall(MethodView):
def post(self):
if validate_signature():
phone_number = request.values['Called']
sent_date = request.args['sent_date']
sent_timestamp = request.args['sent_timestamp']
delay_min = request.args['delay_min']
resp = twiml.Response()
resp.say("Hello! Welcome to the telephone fizz buzz game!")
with resp.gather(timeout=10, finishOnKey="*", action="https://schedule-fizzbuzz.herokuapp.com/handlekey?phone_number="+str(phone_number)+"&sent_date="+str(sent_date)+"&sent_timestamp="+str(sent_timestamp)+"&delay_min="+str(delay_min), method="POST") as g:
g.say("Please enter your number and then press star.")
return str(resp)
else:
return
class ResourceHandleKey(MethodView):
def get_fizz_buzz(self, resp, digits):
# generate a range of numbers leading up to the digits entered
my_digits = (x for x in range(1, int(digits)+1))
for i in my_digits:
if i % 3 == 0:
if i % 5 == 0:
resp.say("Fizz Buzz")
else:
resp.say("Fizz")
elif i % 5 == 0:
resp.say("Buzz")
else:
resp.say(str(i))
return resp
def post(self):
from schedules import db
from models.record import Record
from datetime import datetime
digits_pressed = request.values.get('Digits', None)
caller_id = request.values.get('CallSid', None)
phone_number = request.values.get('Called', None)
sent_date = request.args['sent_date']
sent_timestamp = request.args['sent_timestamp']
sent_time = sent_date + " " + sent_timestamp
sent_time = datetime.strptime(sent_time, "%Y-%m-%d %H:%M:%S")
delay_min = request.args['delay_min']
new_record = Record(caller_id, phone_number, sent_time, int(delay_min), int(digits_pressed), None)
db.session.add(new_record)
db.session.commit()
resp = twiml.Response()
resp.say("You've pressed" + digits_pressed)
resp.say("Now, let's start our fizz buzz game!")
resp = self.get_fizz_buzz(resp, digits_pressed)
resp.say("That's it! Thanks for playing! Good bye!")
return str(resp)
class ResourceRecording(MethodView):
def post(self):
from schedules import db
from models.record import Record
caller_id = request.values['CallSid']
link = request.values['RecordingUrl']
record = Record.query.filter_by(caller_id=caller_id).first()
record.record_link = link
db.session.commit()
return
def validate_signature():
validator = RequestValidator(os.environ.get("TWILIO_AUTH_TOKEN"))
if 'X-Twilio-Signature' not in request.headers:
abort(401)
else:
my_url = request.url
if my_url.startswith('http://'):
my_url = my_url.replace("http", "https")
params = request.form
twilio_signature = request.headers['X-Twilio-Signature']
if not validator.validate(my_url, params, twilio_signature):
abort(401)
return True
| [
"[email protected]"
] | |
81b19ded9589da061696c626c395682a5e23a71d | 8719599ea52299e0568fad18df2312981009bed8 | /prepare/create_hdf5_trainext.py | 43043361e77530ff40f1caa92d6b725d7ad00c14 | [
"MIT"
] | permissive | tiancheng-zhi/ms-powder | 674bc76f9ffe3fe95142d516438d351ed55c361f | 0d4ce21e811e65ca0c225d635a3aa70fe872d33a | refs/heads/master | 2021-10-13T13:49:14.732364 | 2021-10-10T04:50:54 | 2021-10-10T04:50:54 | 178,933,590 | 25 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | import h5py
import numpy as np
import cv2
from pathlib import Path
if __name__ == '__main__':
data_path = Path('../data/trainext/')
real_path = Path('../real/')
n_scenes = 64
height = 160
width = 280
n_channels = 38
h5f = h5py.File(str(Path(real_path / ('trainext.hdf5'))), 'w')
dset_im = h5f.create_dataset('im', (n_scenes, height, width, n_channels), dtype='float32')
dset_label = h5f.create_dataset('label', (n_scenes, height, width), dtype='uint8')
lights = ['EiKOIncandescent250W', 'IIIWoodsHalogen500W', 'LowelProHalogen250W', 'WestinghouseIncandescent150W']
for i in range(n_scenes):
idx = str(i % (n_scenes // len(lights))).zfill(2)
light = lights[i // (n_scenes // len(lights))]
im_npz = np.load(data_path / light / 'scene' / (idx + '_scene.npz'))
im = np.concatenate((im_npz['rgbn'].astype(np.float32), im_npz['swir'].astype(np.float32)), axis=2)
label = cv2.imread(str(data_path / light / 'label' / (idx + '_label.png')), cv2.IMREAD_GRAYSCALE)
dset_im[i, :, :, :] = im
dset_label[i, :, :] = label
h5f.close()
| [
"[email protected]"
] | |
cec7e9242ae9d503fe19e9d929ef7b73027674e0 | 96a1d2ef91c2085b741cb8d9222553fd25246701 | /bag/views.py | dd40a8329c5322abfce4c585706409c5306a08db | [] | no_license | Code-Institute-Submissions/fitness3 | d364d392f9df273b65b304cda6671e043dacf7ef | c2d71dd7b08412d20be8a6c4486603c940e1fbe0 | refs/heads/master | 2023-02-08T13:16:46.373286 | 2020-12-31T22:53:15 | 2020-12-31T22:53:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,890 | py | from django.shortcuts import render, redirect, reverse, HttpResponse, get_object_or_404
from django.contrib import messages
from products.models import Product
# Create your views here.
def view_bag(request):
""" A view that renders the bag contents page """
return render(request, 'bag/bag.html')
def add_to_bag(request, item_id):
""" Add a quantity of the specified product to the shopping bag """
product = get_object_or_404(Product, pk=item_id)
quantity = int(request.POST.get('quantity'))
redirect_url = request.POST.get('redirect_url')
size = None
if 'product_size' in request.POST:
size = request.POST['product_size']
bag = request.session.get('bag', {})
if size:
print("if")
if item_id in list(bag.keys()):
if size in bag[item_id]['items_by_size'].keys():
bag[item_id]['items_by_size'][size] += quantity
messages.success(request, f'Updated size {size.upper()} {product.name} quantity to {bag[item_id]["items_by_size"][size]}')
else:
bag[item_id]['items_by_size'][size] = quantity
messages.success(request, f'Added size {size.upper()} {product.name} to your bag')
else:
bag[item_id] = {'items_by_size': {size: quantity}}
messages.success(request, f'Added size {size.upper()} {product.name} to your bag')
else:
print("else")
if item_id in list(bag.keys()):
bag[item_id] += quantity
messages.success(request, f'Updated {product.name} quantity to {bag[item_id]}')
else:
bag[item_id] = quantity
messages.success(request, f'Added {product.name} to your bag')
request.session['bag'] = bag
return redirect(redirect_url)
def adjust_bag(request, item_id):
"""Adjust the quantity of the specified product to the specified amount"""
product = get_object_or_404(Product, pk=item_id)
quantity = int(request.POST.get('quantity'))
size = None
if 'product_size' in request.POST:
size = request.POST['product_size']
bag = request.session.get('bag', {})
if size:
if quantity > 0:
bag[item_id]['items_by_size'][size] = quantity
messages.success(request, f'Updated size {size.upper()} {product.name} quantity to {bag[item_id]["items_by_size"][size]}')
else:
del bag[item_id]['items_by_size'][size]
if not bag[item_id]['items_by_size']:
bag.pop(item_id)
messages.success(request, f'Removed size {size.upper()} {product.name} from your bag')
else:
if quantity > 0:
bag[item_id] = quantity
messages.success(request, f'Updated {product.name} quantity to {bag[item_id]}')
else:
bag.pop(item_id)
messages.success(request, f'Removed {product.name} from your bag')
request.session['bag'] = bag
return redirect(reverse('view_bag'))
def remove_from_bag(request, item_id):
"""Remove the item from the shopping bag"""
try:
product = get_object_or_404(Product, pk=item_id)
size = None
if 'product_size' in request.POST:
size = request.POST['product_size']
bag = request.session.get('bag', {})
if size:
del bag[item_id]['items_by_size'][size]
if not bag[item_id]['items_by_size']:
bag.pop(item_id)
messages.success(request, f'Removed size {size.upper()} {product.name} from your bag')
else:
bag.pop(item_id)
messages.success(request, f'Removed {product.name} from your bag')
request.session['bag'] = bag
return HttpResponse(status=200)
except Exception as e:
messages.error(request, f'Error removing item: {e}')
return HttpResponse(status=500)
| [
"[email protected]"
] | |
c56db88c7cfd6bc59d54dd79fe54c84237d82434 | 48638367bf80d0a66dc620c91311fa826d251361 | /clients/beers.py | 2e1016b618e8f79e248682cd3a69b98e6c0786b3 | [] | no_license | batetopro/beers | f3587ba77c0a9cd64adcef026ec067b3145cb52d | fd35e2bb6d1501110bf978ae51fff5910dfc631e | refs/heads/master | 2022-11-20T22:57:52.676312 | 2020-07-16T13:03:18 | 2020-07-16T13:03:18 | 277,605,808 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,698 | py | from datetime import datetime
from flask import current_app as app
import requests
from beers.settings import Settings
class BeerClient:
def __init__(self, api_url=None):
self.api_url = api_url or Settings.FLASK_SERVER_NAME
def list(self):
url = 'http://{}/api/beers'.format(self.api_url)
response = requests.get(url)
if not response.status_code == 200:
raise ValueError('Received not supported status code.')
return response.json()
def get(self, sku):
if app.config['TESTING']:
if sku == 'TESTING_SKU':
beer = dict()
beer['id'] = 1
beer['name'] = 'Test beer'
beer['sku'] = 'TESTING_SKU'
beer['price'] = 2.5
beer['image'] = None
beer['time_created'] = datetime.utcnow()
beer['time_modified'] = None
return beer
if sku == 'ANOTHER_TESTING_SKU':
beer = dict()
beer['id'] = 1
beer['name'] = 'Another test beer'
beer['sku'] = 'ANOTHER_TESTING_SKU'
beer['price'] = 1.25
beer['image'] = None
beer['time_created'] = datetime.utcnow()
beer['time_modified'] = None
return beer
return None
url = 'http://{}/api/beers/{}'.format(self.api_url, sku)
response = requests.get(url)
if response.status_code == 404:
return None
if not response.status_code == 200:
raise ValueError('Received not supported status code.')
return response.json()
def create(self, token, sku, name, price, image):
headers = dict()
headers['Authorization'] = token
data = dict()
data['name'] = name
data['sku'] = sku
data['price'] = price
data['image'] = image
url = 'http://{}/api/beers'.format(self.api_url)
response = requests.post(url, data=data, headers=headers)
if response.status_code == 400:
return False, response.json()
if response.status_code == 401:
raise ValueError('You are not authorized to create beers.')
if response.status_code == 201:
return True, response.json()
raise ValueError('Received not supported status code.')
def modify(self, token, sku, name, price, image):
url = 'http://{}/api/beers/{}'.format(self.api_url, sku)
headers = dict()
headers['Authorization'] = token
data = dict()
data['name'] = name
data['price'] = price
data['image'] = image
response = requests.put(url, data=data, headers=headers)
if response.status_code == 204:
return response.json()
if response.status_code == 401:
raise ValueError('You are not authorized to update beers.')
if response.status_code == 404:
raise ValueError('Beer does not exists.')
raise ValueError('Received not supported status code.')
def delete(self, token, sku):
url = 'http://{}/api/beers/{}'.format(self.api_url, sku)
headers = dict()
headers['Authorization'] = token
response = requests.delete(url, headers=headers)
if response.status_code == 204:
return True
if response.status_code == 401:
raise ValueError('You are not authorized to delete beers.')
if response.status_code == 404:
raise ValueError('Beer does not exists.')
raise ValueError('Received not supported status code.')
| [
"[email protected]"
] | |
a92ff6aed160b8ba81e659701cf2093c1ef42302 | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/streamcomputer/models/Namespace.py | 7401dbf1cd24e0d952a524b8a717f8651ba9d04a | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 1,818 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class Namespace(object):
def __init__(self, id=None, name=None, pods=None, type=None, typeValue=None, deleted=None, createTime=None, updateTime=None, userName=None, status=None, sourceId=None, resourceId=None, podsUpdateTime=None):
"""
:param id: (Optional)
:param name: (Optional)
:param pods: (Optional)
:param type: (Optional)
:param typeValue: (Optional)
:param deleted: (Optional)
:param createTime: (Optional)
:param updateTime: (Optional)
:param userName: (Optional)
:param status: (Optional)
:param sourceId: (Optional)
:param resourceId: (Optional)
:param podsUpdateTime: (Optional)
"""
self.id = id
self.name = name
self.pods = pods
self.type = type
self.typeValue = typeValue
self.deleted = deleted
self.createTime = createTime
self.updateTime = updateTime
self.userName = userName
self.status = status
self.sourceId = sourceId
self.resourceId = resourceId
self.podsUpdateTime = podsUpdateTime
| [
"[email protected]"
] | |
0431fc4c60c2e3808d81f4546cc607af77e8a283 | c4c7fa5a04b083ad6ca38933a1b261e627cdcc98 | /CNN/CNN.py | becd9dda6acd739352cf79d1feccc380f4dab699 | [] | no_license | MasatoNagashima/Lissajous_Curves | 636ac5a2001122cd26e7ff8c166ab5c76b0844a4 | a7b047e7796c8b5206c4f0dbf30cdcdd02eb2a80 | refs/heads/master | 2023-06-24T00:28:59.459382 | 2021-07-21T09:15:56 | 2021-07-21T09:15:56 | 388,049,518 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,221 | py | import numpy as np
import matplotlib.pyplot as plt
import torch
#data
def normalization(data, indataRange, outdataRange):
data = data.astype(np.float32)
data = ( data - indataRange[0] ) / ( indataRange[1] - indataRange[0] )
data = data * ( outdataRange[1] - outdataRange[0] ) + outdataRange[0]
return data
def getLissajousMovie(total_step, num_cycle, imsize, circle_r, x_mag, y_mag, vmin=-0.9, vmax=0.9):
from PIL import Image, ImageDraw
#total_step: 点の数, num_cycle: tの大きさ, imsize: 画像の大きさ, circle_r: 円の大きさ,x_mag: xの周期
#y_mag: yの周期, vmin:正規化の最小値? vmax:正規化の最大値?
t = np.linspace(0, 2.0*np.pi*num_cycle, total_step)
x = np.cos(t*x_mag)
y = np.sin(t*y_mag)
imgs = []
for _t in range(total_step):
_x = (x[_t] * (imsize * 0.4))+imsize/2
_y = (y[_t] * (imsize * 0.4))+imsize/2
img = Image.new("RGB", (imsize, imsize), "white")
draw = ImageDraw.Draw(img)
draw.ellipse((_x-circle_r, _y-circle_r, _x+circle_r, _y+circle_r), fill=128)
imgs.append(np.expand_dims(np.asarray(img), 0))
imgs = np.vstack(imgs)
imgs = normalization(imgs.astype(np.float32), [0, 255], [vmin, vmax])#画像データ
seq = normalization(np.c_[x,y].astype(np.float32), [-1.0, 1.0], [vmin, vmax])#座標データ
return imgs, seq
Image, Sequence = getLissajousMovie(total_step=300, num_cycle = 1, imsize=32, circle_r=3, x_mag=1, y_mag=3,vmin=-1,vmax=1)
Image_sample = Image.transpose(0,3,1,2)
train_dataset, val_dataset = torch.utils.data.random_split(Image_sample, [250, 50])
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=10, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=10, shuffle=True)
#learning
import torch
import torch.nn as nn
from torch import nn, optim
from torch.utils.data import DataLoader, TensorDataset
class ConvAutoencoder(nn.Module):
def __init__(self):
super(ConvAutoencoder, self).__init__()
#enc_Conv2d
self.enc_conv = nn.Sequential(
nn.Conv2d(in_channels = 3, out_channels = 8, kernel_size =4, stride = 2), nn.LeakyReLU(),
nn.Conv2d(in_channels = 8, out_channels = 16, kernel_size = 3, stride = 2), nn.LeakyReLU(),
nn.Conv2d(in_channels = 16, out_channels = 20, kernel_size= 3, stride = 1), nn.LeakyReLU(),
)
#enc_Linear
self.enc_lnr = nn.Sequential(
nn.Linear(20*5*5, 100), nn.Tanh(),
nn.Linear(100, 50), nn.Tanh(),
nn.Linear(50, 20), nn.Tanh()
)
#dec_Linear
self.dec_lnr = nn.Sequential(
nn.Linear(20,50), nn.Tanh(),
nn.Linear(50,100), nn.Tanh(),
nn.Linear(100,20*5*5), nn.Tanh()
)
#dec_Conv2d
self.dec_conv = nn.Sequential(
nn.ConvTranspose2d(in_channels = 20, out_channels =16, kernel_size = 3, stride = 1), nn.LeakyReLU(),
nn.ConvTranspose2d(in_channels = 16, out_channels =8, kernel_size = 3, stride = 2), nn.LeakyReLU(),
nn.ConvTranspose2d(in_channels = 8, out_channels =3, kernel_size = 4, stride = 2), nn.LeakyReLU(),
)
def forward(self, in_img):
hid = self.enc_conv(in_img)
batch, ch, h, w = hid.shape
hid = hid.view(batch, ch*h*w)
hid = self.enc_lnr(hid)
hid = self.dec_lnr(hid)
hid = hid.view(batch, ch, h, w)
out_img = self.dec_conv(hid)
return out_img
def train_net(n_epochs, train_loader, net, optimizer_cls = optim.Adam, loss_fn = nn.MSELoss(), device = "cuda:0"):
"""
n_epochs…訓練の実施回数
net …ネットワーク
device … "cpu" or "cuda:0"
"""
losses = []
optimizer = optimizer_cls(net.parameters(), lr = 0.001)
net.to(device)
for epoch in range(n_epochs):
running_loss = 0.0
net.train()
for i, XX in enumerate(train_loader):
XX=XX.to(device)
optimizer.zero_grad()
XX_pred = net(XX)
loss = loss_fn(XX,XX_pred)
loss.backward()
optimizer.step()
running_loss += loss.item()
losses.append(running_loss/i)
if epoch % 1000 == 0:
if epoch == 0:
t = open("step_loss.txt", "w")
else:
t = open("step_loss.txt", "a")
t.write("epoch:{}, train_loss:{}\n".format(epoch, running_loss/i))
print("epoch", epoch, ": ", running_loss / i)
t.close()
return losses
net = ConvAutoencoder()
losses = train_net(n_epochs = 10000, train_loader = train_loader, net = net)
#check
img_num = 5
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
idx = int(np.random.random() * Image.shape[0])
for train_image in train_loader:
train_Image = train_image.to(device)
pred_Image = net.forward(train_Image)
train_Image = train_Image.permute(0,2,3,1)
pred_Image = pred_Image.permute(0,2,3,1)
train_Image = train_Image.cpu().detach().numpy()
pred_Image = pred_Image.cpu().detach().numpy()
break
plt.figure(figsize=(5,5))
train_Img = normalization(train_Image[img_num].astype(np.float32), [-0.9, 0.9], [0.1, 0.9])
plt.imsave("train_data.png", train_Img)
plt.figure(figsize=(5,5))
pred_Img = normalization(pred_Image[img_num].astype(np.float32), [-0.9, 0.9], [0.1, 0.9])
plt.imsave("test_data.png", pred_Img)
| [
"[email protected]"
] | |
c0a7f4fd9d1be8d04249ec15c739c154577d96e7 | b1aab1ec361d7ac27f58a4ed1b73a28ffbd4ee75 | /omniWheelCareRobot/rosCode/src/carebot_navigation/nodes/paper.py | 4beb44481ea4005d54e55eb745db80d3c43d2b1f | [] | no_license | nkc3g4/ROS_Service_Robot | b98ad7443cba59a827d538cbf873c871923f3333 | c7aa68b628af522fd87181a2dc983bead98fb96c | refs/heads/master | 2023-03-15T21:11:08.067278 | 2023-03-13T08:16:59 | 2023-03-13T08:16:59 | 206,118,089 | 3 | 0 | null | 2023-03-13T08:17:00 | 2019-09-03T16:05:25 | HTML | UTF-8 | Python | false | false | 10,334 | py | #!/usr/bin/env python2
################################################
# Copyright(c): 2016-2018 www.corvin.cn
################################################
# Author: corvin
################################################
# Description:
# four destination postion auto navigation.
################################################
# History:
# 20180413: init this file.
################################################
import rospy
import actionlib
from actionlib_msgs.msg import *
from geometry_msgs.msg import Pose, PoseWithCovarianceStamped, Point, Quaternion, Twist, PoseWithCovariance
from std_msgs.msg import Header
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from random import sample
from math import pow, sqrt
from iot_modules.msg import IOTnet
import time
import os
class PositionNav():
def __init__(self):
rospy.init_node('position_nav_node', anonymous=True)
rospy.on_shutdown(self.shutdown)
# How long in seconds should the robot pause at each location?
self.rest_time = rospy.get_param("~rest_time", 3)
# Are we running in the fake simulator?
self.fake_test = rospy.get_param("~fake_test", False)
# Goal state return values
goal_states = ['PENDING', 'ACTIVE', 'PREEMPTED',
'SUCCEEDED', 'ABORTED', 'REJECTED',
'PREEMPTING', 'RECALLING', 'RECALLED',
'LOST']
# Set up the goal locations. Poses are defined in the map frame.
# An easy way to find the pose coordinates is to point-and-click
# Nav Goals in RViz when running in the simulator.
#
# Pose coordinates are then displayed in the terminal
# that was used to launch RViz.
locations = dict()
locations['one-1'] = Pose(Point(-0.6353,-0.1005,0.0), Quaternion(0.0,0.0,0.9793,0.20249))
locations['one'] = Pose(Point(-1.4373,0.2436,0.0), Quaternion(0.0,0.0,0.9764,0.2159))
locations['two-1'] = Pose(Point(-0.6353,-0.1005,0.0), Quaternion(0.0,0.0,0.9793,0.20249))
locations['two'] = Pose(Point(-0.3821,-0.5335,0.0), Quaternion(0.0,0.0,-0.8500,0.5267))
locations['three-1'] = Pose(Point(-0.1248,0.4022,0.0), Quaternion(0.0,0.0,0.7374,0.67542))
locations['three'] = Pose(Point(-0.8292,1.0313,0.0), Quaternion(0.0,0.0,0.9744,0.2243))
locations['four-1'] = Pose(Point(-0.1248,0.4022,0.0), Quaternion(0.0,0.0,0.7374,0.67542))
locations['four-2'] = Pose(Point(0.5078,0.1495,0.0), Quaternion(0.0,0.0,0.9818,0.1898))
locations['four'] = Pose(Point(0.4435,0.3268,0.0), Quaternion(0.0,0.0,0.5583,0.8296))
locations['initial'] = locations['one']
# 2018.8.6 backhome code
# locations['back'] = initial_pose
# Publisher to manually control the robot (e.g. to stop it)
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
self.IOTnet_pub = rospy.Publisher('/IOT_cmd', IOTnet, queue_size=10)
self.initial_pub = rospy.Publisher('initialpose', PoseWithCovarianceStamped, queue_size=10)
# Subscribe to the move_base action server
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
rospy.loginfo("Waiting for move_base action server...")
# Wait 60 seconds for the action server to become available
self.move_base.wait_for_server(rospy.Duration(60))
rospy.loginfo("Connected to move base server")
# A variable to hold the initial pose of the robot to be set by
# the user in RViz
initial_pose = PoseWithCovarianceStamped()
# Variables to keep track of success rate, running time,
# and distance traveled
n_locations = len(locations)
n_goals = 0
n_successes = 0
i = 0
distance_traveled = 0
start_time = rospy.Time.now()
running_time = 0
location = ""
last_location = ""
sequeue=['four-2' ,'four']
# Get the initial pose from the user
rospy.loginfo("*** Click the 2D Pose Estimate button in RViz to set the robot's initial pose...")
#rospy.wait_for_message('initialpose', PoseWithCovarianceStamped)
self.last_location = Pose()
rospy.Subscriber('initialpose', PoseWithCovarianceStamped, self.update_initial_pose)
setpose = PoseWithCovarianceStamped(Header(0,rospy.Time(),"map"), PoseWithCovariance(locations['initial'], [0.25, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.25, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06853891945200942]))
self.initial_pub.publish(setpose)
# Make sure we have the initial pose
rospy.sleep(1)
while initial_pose.header.stamp == "":
rospy.sleep(1)
rospy.sleep(1)
# locations['back'] = Pose()
rospy.loginfo("Starting position navigation ")
# Begin the main loop and run through a sequence of locations
while not rospy.is_shutdown():
# If we've gone through the all sequence, then exit
if i == n_locations:
rospy.logwarn("Now reach all destination, now exit...")
rospy.signal_shutdown('Quit')
break
# Get the next location in the current sequence
location = sequeue[i]
# Keep track of the distance traveled.
# Use updated initial pose if available.
if initial_pose.header.stamp == "":
distance = sqrt(pow(locations[location].position.x -
locations[last_location].position.x, 2) +
pow(locations[location].position.y -
locations[last_location].position.y, 2))
else:
rospy.loginfo("Updating current pose.")
distance = sqrt(pow(locations[location].position.x -
initial_pose.pose.pose.position.x, 2) +
pow(locations[location].position.y -
initial_pose.pose.pose.position.y, 2))
initial_pose.header.stamp = ""
# Store the last location for distance calculations
last_location = location
# Increment the counters
i += 1
n_goals += 1
# Set up the next goal location
self.goal = MoveBaseGoal()
self.goal.target_pose.pose = locations[location]
self.goal.target_pose.header.frame_id = 'map'
self.goal.target_pose.header.stamp = rospy.Time.now()
# Let the user know where the robot is going next
rospy.loginfo("Going to: " + str(location))
# Start the robot toward the next location
self.move_base.send_goal(self.goal) #move_base.send_goal()
# Allow 5 minutes to get there
finished_within_time = self.move_base.wait_for_result(rospy.Duration(300))
# Map to 4 point nav
cur_position = -1
position_seq = ['one','two','three','four']
if str(location) in position_seq:
cur_position = position_seq.index(str(location))+1
# Check for success or failure
if not finished_within_time:
self.move_base.cancel_goal() #move_base.cancle_goal()
rospy.loginfo("Timed out achieving goal")
else:
state = self.move_base.get_state() #move_base.get_state()
if state == GoalStatus.SUCCEEDED:
rospy.loginfo("Goal succeeded!")
n_successes += 1
distance_traveled += distance
rospy.loginfo("State:" + str(state))
if cur_position!=-1:
os.system("/home/sz/scripts/./arm_trash.sh")
self.IOTnet_pub.publish(5)
rospy.sleep(12)
else:
rospy.loginfo("Goal failed with error code: " + str(goal_states[state]))
if cur_position != -1:
os.system("/home/sz/scripts/./arm_trash.sh")
self.IOTnet_pub.publish(5)
rospy.sleep(12)
# if cur_position != -1:
# os.system("/home/sz/scripts/./arm_trash.sh")
# How long have we been running?
running_time = rospy.Time.now() - start_time
running_time = running_time.secs / 60.0
# Print a summary success/failure, distance traveled and time elapsed
rospy.loginfo("Success so far: " + str(n_successes) + "/" +
str(n_goals) + " = " +
str(100 * n_successes/n_goals) + "%")
rospy.loginfo("Running time: " + str(trunc(running_time, 1)) +
" min Distance: " + str(trunc(distance_traveled, 1)) + " m")
rospy.sleep(self.rest_time)
def update_initial_pose(self, initial_pose):
self.initial_pose = initial_pose
def shutdown(self):
rospy.loginfo("Stopping the robot...")
self.move_base.cancel_goal()
rospy.sleep(2)
self.cmd_vel_pub.publish(Twist())
rospy.sleep(1)
def trunc(f, n):
# Truncates/pads a float f to n decimal places without rounding
slen = len('%.*f' % (n, f))
return float(str(f)[:slen])
if __name__ == '__main__':
try:
PositionNav()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("AMCL position navigation finished.")
| [
"[email protected]"
] | |
072d4ae4b97300e00616bfcfe2bf5f5bda776c67 | 4cb5bc85920617f0c99a0f417ab8cc8ca974dda6 | /controller/modules/UsageReport.py | b12acd5bef504060b4b8b5584f89f1986e426fe7 | [
"MIT"
] | permissive | saumitraaditya/modifiedControllers | 57d73be1a14859ad3d419dc1bb5f06ae4418db51 | ffd5078c64df2aa983c49be5a7f230196afe6ec3 | refs/heads/master | 2020-03-16T22:32:18.588196 | 2018-05-11T13:27:49 | 2018-05-11T13:27:49 | 133,042,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,238 | py | # ipop-project
# Copyright 2016, University of Florida
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import datetime
import hashlib
import threading
try:
import simplejson as json
except ImportError:
import json
import urllib.request as urllib2
from controller.framework.ControllerModule import ControllerModule
class UsageReport(ControllerModule):
def __init__(self, cfx_handle, module_config, module_name):
super(UsageReport, self).__init__(cfx_handle, module_config, module_name)
self._stat_data = {"ready": False, "pending_request": False}
self.submit_time = datetime.datetime(2015, 1, 1, 0, 0)
self.lck = threading.Lock()
def initialize(self):
self.register_cbt("Logger", "LOG_INFO", "{0} Loaded".format(self._module_name))
def process_cbt(self, cbt):
if cbt.op_type == "Response":
if cbt.request.action == "SIG_QUERY_REPORTING_DATA":
if (not cbt.response.status):
self.register_cbt("Logger", "LOG_WARNING", "CBT failed {0}".format(cbt.response.data))
self.free_cbt(cbt)
return
else:
self.create_report(cbt)
else:
self.free_cbt(cbt)
else:
self.req_handler_default(cbt)
def timer_method(self):
cur_time = datetime.datetime.now()
self.lck.acquire()
if self._stat_data["ready"]:
data = self._stat_data["data"]
self._stat_data = {}
self._stat_data["ready"] = False
self._stat_data["pending_request"] = False
self.lck.release()
self.submit_report(data)
self.submit_time = datetime.datetime.now()
elif not self._stat_data["pending_request"] and cur_time > self.submit_time:
self._stat_data["pending_request"] = True
self.lck.release()
self.request_report()
def terminate(self):
pass
def request_report(self):
self.register_cbt("Signal", "SIG_QUERY_REPORTING_DATA")
def create_report(self, cbt):
nid = self._cm_config["NodeId"]
report_data = cbt.response.data
for overlay_id in report_data:
report_data[overlay_id] = {
"xmpp_host": hashlib.sha1(report_data[overlay_id]["xmpp_host"].encode("utf-8")).hexdigest(),
"xmpp_username": hashlib.sha1(report_data[overlay_id]["xmpp_username"].encode("utf-8")).hexdigest(),
}
stat = {
"NodeId": hashlib.sha1(nid.encode("utf-8")).hexdigest(),
"Time": str(datetime.datetime.now()),
"Model": self._cfx_handle.query_param("Model"),
"Version": self._cfx_handle.query_param("IpopVersion")
}
stat.update(report_data)
self.lck.acquire()
self._stat_data["data"] = stat
self._stat_data["ready"] = True
self._stat_data["pending_request"] = False
self.lck.release()
self.free_cbt(cbt)
def submit_report(self, report_data):
data = json.dumps(report_data).encode('utf8')
self.register_cbt("Logger", "LOG_INFO", "data at submit report {0}".format(data)) # for debugging
url = None
try:
url = "http://" + self._cm_config["StatServerAddress"] + ":" + \
str(self._cm_config["StatServerPort"]) + "/api/submit"
req = urllib2.Request(url=url, data=data)
req.add_header("Content-Type", "application/json")
res = urllib2.urlopen(req)
if res.getcode() == 200:
log = "succesfully reported status to the stat-server {0}\n"\
"HTTP response code:{1}, msg:{2}"\
.format(url, res.getcode(), res.read())
self.register_cbt("Logger", "LOG_INFO", log)
else:
self.register_cbt("Logger", "LOG_WARNING", "stat-server error code: {0}".format(res.getcode()))
raise
except Exception as error:
log = "statistics report failed to the stat-server ({0}).Error: {1}".format(url, error)
self.register_cbt("Logger", "LOG_WARNING", log)
| [
"[email protected]"
] | |
18148aac1e2cf76a8922cb4bf5e3b01d452bc36d | da6fd64b34a99eb1f40558354c13e657d77e110b | /TMalign_index.py | 301908d0c88527e3c9d5740cfc6b7b8ef1387cff | [] | no_license | uagaug/DeNovoHeterodimers | 35566625df94220a963d8b0ac6160f6e0f4f2295 | 0614c43f1b0500accef84cf24521cfde1464e61b | refs/heads/master | 2020-03-16T05:27:54.672811 | 2018-12-15T00:10:23 | 2018-12-15T00:10:23 | 132,532,839 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | #!/usr/local/bin/python2.7
#get the alignment regions for TMalign of heptads
import os
from os import system,popen
import itertools
from sys import argv
import math
import numpy as np
import re
# import csv
# import matplotlib.pyplot as plt
#find indicies of all the letters, and deduce indicies of the starting points
string=argv[1]
repeat=int(argv[2])
hits=[]
vec=[]
final=[]
for i in range(len(string)-1):
if ((string[i] != "-") and i==0): #take care of this case: SSSSSSS-----------
hits.append(1)
if ((string[i] == "-") and (string[i+1] != "-")):
#print i
hits.append(i+2)
if ((string[i] != "-") and ((string[i+1] == "-")) or i==len(string)-2): #after or: take care of this case: ------SSSSSSS
#print i
hits.append(i+2)
#print len(hits)
for i in range((len(hits))/2):
#print range(hits[2*i],hits[2*i+1])
vec.extend(range(hits[2*i],hits[2*i+1]))
#print len(vec)
final.append(vec[0])
final.append(vec[repeat])
final.append(vec[repeat*2])
final.append(vec[repeat*3])
out=""
for i in final:
out+=str(i)
out+=","
ind=out[0:-1]
print ind | [
"[email protected]"
] | |
6b2624156474b14ccd1d1f09d73956b62ae4b0be | 3343c4e998f7748f6a5f9fb198baa798ede3cee0 | /python/video processing/chap11/header/paint_init.py | ae5a5a82e6242d6553fec55ab830174272aa20b9 | [] | no_license | jwo29/prj | c8bf028d63c255ba95814cdaf5a1303ee8c19256 | 6e7d5cefa36ae47ffea167e21f2ff926eda6008f | refs/heads/main | 2023-08-30T03:07:36.730416 | 2021-10-17T16:39:19 | 2021-10-17T16:39:19 | 406,631,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | """
그림판 프로그램 > 그리기 상수, 일반 명령 상수, 팔레트 관련 상수 정의
"""
DRAW_RECTANGLE = 0 #사각형 그리기
DRAW_CIRCLE = 1 # 원 그리기
DRAW_ECLIPSE = 2 # 타원 그리기
DRAW_LINE = 3 # 직선 그리기
DRAW_BRUSH = 4 # 브러시 그리기
ERASE = 5 # 지우개
OPEN = 6 # 열기 명령
SAVE = 7 # 저장 명령
PLUS = 8 # 밝게 하기 명령
MINUS = 9 # 어둡게 하기 명령
CREAR = 10 # 지우기 명령
COLOR = 11 # 색상 아이콘
PALETTE = 12 # 색상팔레트
HUE_IDX = 13 # 색상인덱스
# 전역 변수
mouse_mode, draw_mode = 0, 0 # 그리기 모드, 마우스 상태
pt1, pt2, Color = (0, 0), (0, 0), (0, 0, 0) # 시작 좌표, 종료 좌표
thickness = 3 # 선 두께
| [
"[email protected]"
] | |
7869e3b06a1fe31f15537a98e1d56fe63d6bfa77 | 58141d7fc37854efad4ad64c74891a12908192ed | /config/hawaiikai/node_013.py | 029989029620006785eb18681f78af4d8275b9fa | [] | no_license | stanleylio/fishie | b028a93b2093f59a8ceee4f78b55a91bb1f69506 | 0685045c07e4105934d713a0fd58c4bc28821ed6 | refs/heads/master | 2022-08-14T13:08:55.548830 | 2022-07-29T01:32:28 | 2022-07-29T01:32:28 | 30,433,819 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | name = 'Water Level'
location = 'Hawaii Kai, Miloli\'i'
google_earth_link = '#'
note = 'Ultrasonic tide gauge (XBee). 1Hz measurements; each transmission is average of 60 measurements. Firmware us11c, hardware v5.0.'
latitude = 21.287222
longitude = -157.717500
conf = [
{
'dbtag':'d2w',
'unit':'mm',
'description':'Distance from sensor to water surface',
'lb':301,
'ub':4999,
'interval':180,
},
{
'dbtag':'Vs',
'unit':'V',
'description':'Solar panel voltage',
'lb':0,
'ub':7.5,
'interval':180,
},
{
'dbtag':'idx',
'description':'Sample index',
'lb':24*60,
'interval':180,
},
]
if '__main__' == __name__:
for c in conf:
print('- - -')
for k, v in c.items():
print(k, ':', v)
import sys
sys.path.append('../..')
from os.path import basename
from storage.storage2 import create_table
create_table(conf, basename(__file__).split('.')[0].replace('_', '-'))
| [
"[email protected]"
] | |
1a965afb565c818f80100ac349ddad05542bc987 | 9d4bde3947edc11ba87d06068581c6a91696e8e6 | /chapter_04/property_tax.py | 93861eceff88c33d202efb0d679faab2fd36f7a3 | [] | no_license | zanariah8/Starting_Out_with_Python | e6d8c6cbd187043160c6408fc4ac5f47c35e7c57 | 92751fde462683c9d62934409fa469ccddc1d519 | refs/heads/master | 2020-04-23T20:38:13.712144 | 2019-02-18T20:24:15 | 2019-02-18T20:24:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | # this program displays property taxes
tax_factor = 0.0065 # represents the tax factor
# get the first lot number
print("Enter the property lot number ")
print("or enter 0 to end.")
lot = int(input("Lot number: "))
# continue processing as long as the user
# does not enter lot number 0
while lot != 0:
# get the property value
value = float(input("Enter the property value: "))
# calculate the property tax
tax = value * tax_factor
# display the tax
print("Property tax: $", format(tax, ",.2f"), sep="")
# get the next lot number
print("Enter the next lot number or enter 0 to end: ")
lot = int(input("Lot number: ")) | [
"[email protected]"
] | |
78ee1babe2ac3fb5103e70a8b6f52a367f0ce41b | 2e3ab830367b0d2e0fe5020c440cb83b685bd0af | /day12/maoyan/maoyan/spiders/maoyan_spider.py | d859eb00c29712f746d713869dcda9aba72216a1 | [] | no_license | KD-huhu/py_spider | 9dfde678b814b9dcfd2452f8fb1266eeb3f7ce26 | e62dea6d2a7315d2160f44f45c4b368d85cea770 | refs/heads/master | 2022-06-10T10:31:20.646414 | 2020-05-08T08:59:52 | 2020-05-08T08:59:52 | 261,740,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,820 | py | # -*- coding: utf-8 -*-
import scrapy
from maoyan.items import MaoyanItem
class MaoyanSpiderSpider(scrapy.Spider):
name = 'maoyan_spider'
# allowed_domains = ['www']
start_urls = []
# 分页做法
for i in range(10):
base_url = 'https://maoyan.com/board/4?offset=%s' % (i*10)
start_urls.append(base_url)
def parse(self, response):
# 测试下载器下载好的response有没有数据
# print(response.text)
# 提取数据
# 实例化定义好的item,注意要将该包导入
item = MaoyanItem()
'''
response对象的xpath方法:不需要使用lxml在转
responsex.xpath('xpath表达式')--返回值:[selector对象]
从selector中提取数据的方法:
responsex.xpath('xpath表达式').extract_first()-就是一个字符串
responsex.xpath('xpath表达式').extract()---返回值是list,list里面是所有的字符串内容
'''
dd_list = response.xpath('//div[@class="main"]/dl/dd')
for dd in dd_list:
movie_title = dd.xpath('.//p[@class="name"]/a/@title').extract_first()
# print(movie_title)
movie_actor = dd.xpath('.//p[@class="star"]/text()').extract_first().strip()
date = dd.xpath('.//p[@class="releasetime"]/text()').extract_first()
scores = dd.xpath('.//p[@class="score"]/i/text()').extract()
detail = dd.xpath('.//p[@class="name"]/a/@href').extract_first()
# print(scores)
scores = ''.join(scores)
item['movie_title'] = movie_title
item['movie_actor'] = movie_actor
item['date'] = date
item['detail'] = detail
item['scores'] = scores
# print(item)
yield item | [
"[email protected]"
] | |
0378ca63bfa27a31cc8356cbe802518a596d4281 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5738606668808192_1/Python/Cerezo/2016QC-L.py | c1a1c48dc2ac38b824498c9448e21f605f802895 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | from math import sqrt
def merge(ev, od):
return ''.join(item for tup in zip(ev,od) for item in tup)
def Next(N, J):
coins = []
# generate jamcoins with 6 | sum a_i and 21 | sum (-)^i a_i
# automatically divisble by 3, 2, 3, 2, 7, 2, 3, 2, 3, unless sum a_i divisible by 30 (then 5 replaces 7)
# so let sum a_i = 6 and sum (-)^i a_i = 0. (easy to obtain more jamcoins with different conditions)
# as first and last digits are determined need to put 2 ones in the even slots and 2 ones in the odd slots
# there are 15 choose 2 possibilities for either, and 11025 > 500
aux = []
n = (N-2)/2-1 # 32 -> 14
for i in xrange(n):
for j in xrange(n-i):
aux.append(i*'0' + '1' + j*'0' + '1'+ (n-1-i-j)*'0')
for a in aux:
for b in aux[:5]: #can replace 5 by n
coins.append('1'+merge(a,b)+'1')
return (coins[:J], [3, 2, 3, 2, 7, 2, 3, 2, 3]) #okay for J < (n choose 2)**2
input = open(r'./C-large.in')
input.readline()
N, J = (int(n) for n in input.readline().strip().split())
#N, J = 32, 500
sol = Next(N, J)
tofile = True
if tofile:
with open(r'./outputC-L.txt', 'w') as output:
output.write('Case #1: \n')
for s in sol[0]:
output.write(s)
x = ' ' + ' '.join(str(j) for j in sol[1])+'\n'
output.write(x)
else:
for s in sol[0][:20]:
print s,
print sol[1]
| [
"[email protected]"
] | |
8c6af40819bd160ac8b36e5b40e2fac79332cb14 | bfca0915e74089d878ac8cd7dd7c85b785676cbd | /user.py | 88f0bcec4f47ae1006f4b9d58dc3c4e6b2845040 | [
"MIT"
] | permissive | Lourine/Password-Locker | c950464f508b0b073e6aae1f41864ed60899f5f0 | abacf65f15f09e778a74a506999735ee23ba589f | refs/heads/master | 2022-12-13T14:16:24.981747 | 2020-09-07T14:22:35 | 2020-09-07T14:22:35 | 292,849,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,023 | py | import pyperclip
class User:
"""
User class that generates new instances of a User
"""
user_list = [] #empty User List
def __init__(self, user_name, password):
self.user_name = user_name
self.password = password
def save_user(self):
"""
Save User Method saves a new user to the user_List
"""
User.user_list.append(self)
def delete_user(self):
'''
delete_user method deletes a saved user from the user_list
'''
User.user_list.remove(self)
@classmethod
def display_user(cls):
return cls.user_list
@classmethod
def user_exist(cls,user_name):
'''
Method that checks if a user exists from the user list.
Args:
string: User_name to search if it exists
Returns :
Boolean: True or false depending if the user exists
'''
for user in cls.user_list:
if user.user_name == user_name:
return True
return False
#Create the 2nd class
class Credentials():
"""
Create credentials class to help create new objects of credentials
"""
credentials_list = [] #empty credential list
def __init__(self,account,userName, password):
"""
method that defines user credentials to be stored
"""
self.account = account
self.userName = userName
self.password = password
def save_credentials(self):
"""
method to store a new credential to the credentials list
"""
Credentials.credentials_list.append(self)
def delete_credentials(self):
"""
delete_credentials method that deletes an account credentials from the credentials_list
"""
Credentials.credentials_list.remove(self)
@classmethod
def find_credential(cls, account):
"""
Method that takes in a account_name and returns a credential that matches that account_name.
"""
for credential in cls.credentials_list:
if credential.account == account:
return credential
@classmethod
def if_credential_exist(cls, account):
"""
Method that checks if a credential exists from the credential list and returns true or false depending if the credential exists.
Args:
account: accountName to search if it exists
Returns :
Boolean: True or false depending if the credential exists
"""
for credential in cls.credentials_list:
if credential.account == account:
return True
return
@classmethod
def display_credentials(cls):
"""
Method that returns all items in the credentials list
"""
return cls.credentials_list
@classmethod
def copy_credential(cls,account):
credential_found = Credentials.find_credential(account)
pyperclip.copy(credential_found.account)
| [
"[email protected]"
] | |
0dc91839dcead6fd88a1235c61ea296a0e3aa682 | e162a2993d32bec5e479a30572e691ef958c5274 | /fiscalite/wizard/etat_report_wizard.py | 0df56e8f092843c5b6da2d7bdcb5dfa64ef307b7 | [] | no_license | ly2ly/declaration-ci | 221990a7e8eb92f17c1c308b992661563c99eba7 | 41deef161b5cab3b19794a9639b67a32ecf7db4a | refs/heads/master | 2020-03-22T02:26:33.226824 | 2018-07-01T23:50:59 | 2018-07-01T23:50:59 | 139,370,660 | 2 | 0 | null | 2018-07-01T23:44:58 | 2018-07-01T23:44:57 | null | UTF-8 | Python | false | false | 589 | py |
from openerp.osv import osv, fields, orm
class etat_report_file(osv.osv_memory):
_name = 'etat.report.file'
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(etat_report_file, self).default_get(cr, uid, fields, context=context)
res.update({'file_name': context.get('file_name','ETAT DES TAXES DEDUTIBLES')+'.xls'})
if context.get('file'):
res.update({'file':context['file']})
return res
_columns = {
'file':fields.binary('File', filters='*.xls'),
'file_name':fields.char('File Name'),
} | [
"[email protected]"
] | |
6434bbf86cc1e36917716979de56e2265cda788e | 0186c056063e614fede210e242901dff9f16f64d | /pybo/urls.py | 516e4af8ba10eb77fc1f56f8a606bb2705240de8 | [] | no_license | DA-97/Django_Practice | 323606e3e2e9a13ea86101312aa89ef86acd6ba9 | d6dbdfd97f9165534f76437e169c079864f865cc | refs/heads/master | 2023-05-10T04:13:16.267700 | 2021-06-11T09:16:19 | 2021-06-11T09:16:19 | 375,540,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,583 | py | from django.urls import path
from .views import base_views, question_views, answer_views, comment_views
app_name = 'pybo'
urlpatterns = [
path('', base_views.index, name='index'),
path('<int:question_id>/', base_views.detail, name='detail'),
path('question/create/', question_views.question_create, name='question_create'),
path('question/modify/<int:question_id>/', question_views.question_modify, name='question_modify'),
path('question/delete/<int:question_id>/', question_views.question_delete, name='question_delete'),
path('answer/create/<int:question_id>/', answer_views.answer_create, name='answer_create'),
path('answer/modify/<int:answer_id>/', answer_views.answer_modify, name='answer_modify'),
path('answer/delete/<int:answer_id>/', answer_views.answer_delete, name='answer_delete'),
path('comment/create/question/<int:question_id>/', comment_views.comment_create_question, name='comment_create_question'),
path('comment/modify/question/<int:comment_id>/', comment_views.comment_modify_question, name='comment_modify_question'),
path('comment/delete/question/<int:comment_id>/', comment_views.comment_delete_question, name='comment_delete_question'),
path('comment/create/answer/<int:answer_id>/', comment_views.comment_create_answer, name='comment_create_answer'),
path('comment/modify/answer/<int:comment_id>/', comment_views.comment_modify_answer, name='comment_modify_answer'),
path('comment/delete/answer/<int:comment_id>/', comment_views.comment_delete_answer, name='comment_delete_answer'),
] | [
"[email protected]"
] | |
368ba6e4f4eac521b082474e8135226995779545 | fa21bc693911f21224fd6017b986407f236859de | /setup.py | 32679b1b16e7ae7619e5b0b9d144388b30b103d3 | [] | no_license | cessor/blockhosts | 1e6e5d220bc99498f8a9f893518f4f7d9c0b4563 | 5ed28cb54333cc4662c08d226d2b8425bdce0286 | refs/heads/master | 2020-03-18T08:39:04.963792 | 2018-07-21T14:54:16 | 2018-07-21T14:54:16 | 134,521,165 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | #!/usr/bin/env python
from distutils.core import setup
setup(
name='blockhosts',
version='1.0',
description='Blocks Hosts',
author='Johannes Hofmeister',
author_email='[email protected]',
url='https://github.com/cessor/blockhosts/',
packages=['blockhosts'],
tests_require=[
'nose',
'kazookid'
],
scripts=['scripts/blockhosts',
'scripts/block',
'scripts/unblock',
'scripts/blockhosts.bat',
'scripts/block.bat',
'scripts/unblock.bat']
)
| [
"[email protected]"
] | |
8f5a220f757b3631c5b6e287696fadd3c2a710a4 | f3df657de524f61370be256b9d1f35c6fd48ec65 | /OA/urls.py | 0c6c3daefcb3dc6b94f64c2ed31c471fecc84205 | [] | no_license | caiwenshi/oa | b97cfa0e4f6c90732f72e50d534661f19a5b8b70 | 5d0690ebfb935eedddaf3415c831019e6a2ff830 | refs/heads/master | 2021-05-13T21:26:48.522716 | 2018-01-06T07:18:00 | 2018-01-06T07:18:00 | 116,461,075 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^login$', views.login, name='login'),
url(r'getemployeeinfo/(.+)', views.process_get_employee_info, name='process_get_employee_info'),
url(r'getprojectinfo/(.+)', views.process_get_project_info, name='process_get_project_info'),
url(r'^loginapp$', views.loginapp, name='loginapp'),
]
| [
"[email protected]"
] | |
d86f9f29a7a306dccaa447abfb3f5d50184286c1 | dde9442399c78414c05f7f36803c861638065ca3 | /Comprehensions-Exercise/Capitals.py | e993ebdc1e709ca710b5f2339a4a29758b83b07f | [] | no_license | Vigyrious/python_advanced | 6778eed9e951b5a11b22f6c6d8ea5b160c3aa00d | 67db470e78b194aea1f9a35283d5a88b0f6ab94c | refs/heads/main | 2023-03-23T12:24:59.688699 | 2021-03-12T20:53:04 | 2021-03-12T20:53:04 | 347,192,305 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | countries = [i for i in input().split(", ")]
capitals = [i for i in input().split(", ")]
[print(f"{k} -> {capitals[i]}") for (i,k) in enumerate(countries)] | [
"[email protected]"
] | |
f0e6b87a9d4066e0b2e897ad0ea6fd098fd80122 | f4434c85e3814b6347f8f8099c081ed4af5678a5 | /sdk/servicebus/azure-servicebus/tests/mgmt_tests/test_mgmt_subscriptions.py | 4d64962b000f91e8a91eb2927f911d1ea20dad4a | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | yunhaoling/azure-sdk-for-python | 5da12a174a37672ac6ed8e3c1f863cb77010a506 | c4eb0ca1aadb76ad892114230473034830116362 | refs/heads/master | 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 | MIT | 2020-03-31T20:35:17 | 2019-03-25T22:43:40 | Python | UTF-8 | Python | false | false | 15,492 | py | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import logging
import pytest
import datetime
import msrest
from azure.servicebus.management import ServiceBusAdministrationClient, SubscriptionProperties
from utilities import get_logger
from azure.core.exceptions import HttpResponseError, ResourceExistsError
from devtools_testutils import AzureMgmtTestCase, CachedResourceGroupPreparer
from servicebus_preparer import (
CachedServiceBusNamespacePreparer,
ServiceBusNamespacePreparer
)
from mgmt_test_utilities import clear_topics
_logger = get_logger(logging.DEBUG)
class ServiceBusAdministrationClientSubscriptionTests(AzureMgmtTestCase):
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_subscription_create_by_name(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_topics(mgmt_service)
topic_name = "topic_testaddf"
subscription_name = "sub_testkkk"
try:
mgmt_service.create_topic(topic_name)
mgmt_service.create_subscription(topic_name, subscription_name)
subscription = mgmt_service.get_subscription(topic_name, subscription_name)
assert subscription.name == subscription_name
assert subscription.availability_status == 'Available'
assert subscription.status == 'Active'
finally:
mgmt_service.delete_subscription(topic_name, subscription_name)
mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_subscription_create_with_subscription_description(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_topics(mgmt_service)
topic_name = "iweidk"
subscription_name = "kdosako"
try:
mgmt_service.create_topic(topic_name)
mgmt_service.create_subscription(
topic_name,
subscription_name=subscription_name,
auto_delete_on_idle=datetime.timedelta(minutes=10),
dead_lettering_on_message_expiration=True,
default_message_time_to_live=datetime.timedelta(minutes=11),
enable_batched_operations=True,
lock_duration=datetime.timedelta(seconds=13),
max_delivery_count=14,
requires_session=True
)
subscription = mgmt_service.get_subscription(topic_name, subscription_name)
assert subscription.name == subscription_name
assert subscription.auto_delete_on_idle == datetime.timedelta(minutes=10)
assert subscription.dead_lettering_on_message_expiration == True
assert subscription.default_message_time_to_live == datetime.timedelta(minutes=11)
assert subscription.enable_batched_operations == True
assert subscription.lock_duration == datetime.timedelta(seconds=13)
assert subscription.max_delivery_count == 14
assert subscription.requires_session == True
finally:
mgmt_service.delete_subscription(topic_name, subscription_name)
mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_subscription_create_duplicate(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_topics(mgmt_service)
topic_name = "dqkodq"
subscription_name = 'kkaqo'
try:
mgmt_service.create_topic(topic_name)
mgmt_service.create_subscription(topic_name, subscription_name)
with pytest.raises(ResourceExistsError):
mgmt_service.create_subscription(topic_name, subscription_name)
finally:
mgmt_service.delete_subscription(topic_name, subscription_name)
mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_subscription_update_success(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_topics(mgmt_service)
topic_name = "fjrui"
subscription_name = "eqkovc"
try:
topic_description = mgmt_service.create_topic(topic_name)
subscription_description = mgmt_service.create_subscription(topic_description.name, subscription_name)
# Try updating one setting.
subscription_description.lock_duration = datetime.timedelta(minutes=2)
mgmt_service.update_subscription(topic_description.name, subscription_description)
subscription_description = mgmt_service.get_subscription(topic_name, subscription_name)
assert subscription_description.lock_duration == datetime.timedelta(minutes=2)
# Now try updating all settings.
subscription_description.auto_delete_on_idle = datetime.timedelta(minutes=10)
subscription_description.dead_lettering_on_message_expiration = True
subscription_description.default_message_time_to_live = datetime.timedelta(minutes=11)
subscription_description.lock_duration = datetime.timedelta(seconds=12)
subscription_description.max_delivery_count = 14
# topic_description.enable_partitioning = True # Cannot be changed after creation
# topic_description.requires_session = True # Cannot be changed after creation
mgmt_service.update_subscription(topic_description.name, subscription_description)
subscription_description = mgmt_service.get_subscription(topic_description.name, subscription_name)
assert subscription_description.auto_delete_on_idle == datetime.timedelta(minutes=10)
assert subscription_description.dead_lettering_on_message_expiration == True
assert subscription_description.default_message_time_to_live == datetime.timedelta(minutes=11)
assert subscription_description.max_delivery_count == 14
assert subscription_description.lock_duration == datetime.timedelta(seconds=12)
# assert topic_description.enable_partitioning == True
# assert topic_description.requires_session == True
finally:
mgmt_service.delete_subscription(topic_name, subscription_name)
mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_subscription_update_invalid(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_topics(mgmt_service)
topic_name = "dfjfj"
subscription_name = "kwqxc"
try:
topic_description = mgmt_service.create_topic(topic_name)
subscription_description = mgmt_service.create_subscription(topic_name, subscription_name)
# handle a null update properly.
with pytest.raises(AttributeError):
mgmt_service.update_subscription(topic_name, None)
# handle an invalid type update properly.
with pytest.raises(AttributeError):
mgmt_service.update_subscription(topic_name, Exception("test"))
# change the name to a topic that doesn't exist; should fail.
subscription_description.name = "iewdm"
with pytest.raises(HttpResponseError):
mgmt_service.update_subscription(topic_name, subscription_description)
subscription_description.name = subscription_name
# change the name to a topic with an invalid name exist; should fail.
subscription_description.name = ''
with pytest.raises(msrest.exceptions.ValidationError):
mgmt_service.update_subscription(topic_name, subscription_description)
subscription_description.name = topic_name
# change to a setting with an invalid value; should still fail.
subscription_description.lock_duration = datetime.timedelta(days=25)
with pytest.raises(HttpResponseError):
mgmt_service.update_subscription(topic_name, subscription_description)
subscription_description.lock_duration = datetime.timedelta(minutes=5)
finally:
mgmt_service.delete_subscription(topic_name, subscription_name)
mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_subscription_delete(self, servicebus_namespace_connection_string):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_topics(mgmt_service)
topic_name = 'test_topicgda'
subscription_name_1 = 'test_sub1da'
subscription_name_2 = 'test_sub2gcv'
mgmt_service.create_topic(topic_name)
mgmt_service.create_subscription(topic_name, subscription_name_1)
subscriptions = list(mgmt_service.list_subscriptions(topic_name))
assert len(subscriptions) == 1
mgmt_service.create_subscription(topic_name, subscription_name_2)
subscriptions = list(mgmt_service.list_subscriptions(topic_name))
assert len(subscriptions) == 2
description = mgmt_service.get_subscription(topic_name, subscription_name_1)
mgmt_service.delete_subscription(topic_name, description.name)
subscriptions = list(mgmt_service.list_subscriptions(topic_name))
assert len(subscriptions) == 1 and subscriptions[0].name == subscription_name_2
mgmt_service.delete_subscription(topic_name, subscription_name_2)
subscriptions = list(mgmt_service.list_subscriptions(topic_name))
assert len(subscriptions) == 0
mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_subscription_list(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_topics(mgmt_service)
topic_name = 'lkoqxc'
subscription_name_1 = 'testsub1'
subscription_name_2 = 'testsub2'
mgmt_service.create_topic(topic_name)
subscriptions = list(mgmt_service.list_subscriptions(topic_name))
assert len(subscriptions) == 0
mgmt_service.create_subscription(topic_name, subscription_name_1)
mgmt_service.create_subscription(topic_name, subscription_name_2)
subscriptions = list(mgmt_service.list_subscriptions(topic_name))
assert len(subscriptions) == 2
assert subscriptions[0].name == subscription_name_1
assert subscriptions[1].name == subscription_name_2
mgmt_service.delete_subscription(topic_name, subscription_name_1)
mgmt_service.delete_subscription(topic_name, subscription_name_2)
subscriptions = list(mgmt_service.list_subscriptions(topic_name))
assert len(subscriptions) == 0
mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_subscription_list_runtime_properties(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_topics(mgmt_service)
topic_name = 'dkoamv'
subscription_name = 'cxqplc'
mgmt_service.create_topic(topic_name)
subs = list(mgmt_service.list_subscriptions(topic_name))
subs_infos = list(mgmt_service.list_subscriptions_runtime_properties(topic_name))
assert len(subs) == len(subs_infos) == 0
mgmt_service.create_subscription(topic_name, subscription_name)
subs = list(mgmt_service.list_subscriptions(topic_name))
subs_infos = list(mgmt_service.list_subscriptions_runtime_properties(topic_name))
assert len(subs) == 1 and len(subs_infos) == 1
assert subs[0].name == subs_infos[0].name == subscription_name
info = subs_infos[0]
assert info.accessed_at_utc is not None
assert info.updated_at_utc is not None
assert info.active_message_count == 0
assert info.dead_letter_message_count == 0
assert info.transfer_dead_letter_message_count == 0
assert info.transfer_message_count == 0
mgmt_service.delete_subscription(topic_name, subscription_name)
subs_infos = list(mgmt_service.list_subscriptions_runtime_properties(topic_name))
assert len(subs_infos) == 0
mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_subscription_get_runtime_properties_basic(self, servicebus_namespace_connection_string):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_topics(mgmt_service)
topic_name = 'dcvxqa'
subscription_name = 'xvazzag'
mgmt_service.create_topic(topic_name)
mgmt_service.create_subscription(topic_name, subscription_name)
sub_runtime_properties = mgmt_service.get_subscription_runtime_properties(topic_name, subscription_name)
assert sub_runtime_properties
assert sub_runtime_properties.name == subscription_name
assert sub_runtime_properties.created_at_utc is not None
assert sub_runtime_properties.accessed_at_utc is not None
assert sub_runtime_properties.updated_at_utc is not None
assert sub_runtime_properties.active_message_count == 0
assert sub_runtime_properties.dead_letter_message_count == 0
assert sub_runtime_properties.transfer_dead_letter_message_count == 0
assert sub_runtime_properties.transfer_message_count == 0
mgmt_service.delete_subscription(topic_name, subscription_name)
mgmt_service.delete_topic(topic_name)
def test_subscription_properties_constructor(self):
with pytest.raises(TypeError):
SubscriptionProperties("randomname")
| [
"[email protected]"
] | |
142f5650121718ffc18771ecb419ce37ffd1262e | 2a82fc809097eae07e4eb036fbbaae5f652025db | /Task-3.py | 2706c1b444a44a02f22d870c8e22e1fed087ffbb | [] | no_license | Ashish7783073120/MyCaptainTestPython | 697c9ad18c152c366b5a153023987ca76f8b03c8 | 530091eb46019876860b4186b5a0ec5c4a69abc3 | refs/heads/master | 2020-07-12T04:23:34.853603 | 2019-08-27T15:11:06 | 2019-08-27T15:11:06 | 204,717,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | # Python-beginners-tasks
def longest_word(words):
word_len = []
for x in words:
word_len.append((len(x), x))
word_len.sort()
return word_len[-1][1]
print(longest_word(["Ashish", "Kumar", "Bhuwaniya"]))
| [
"[email protected]"
] | |
bdd82bc7c06b92fde32cac64864d082cf2160e38 | 1fde4cf8f0c611d5424f1cc5d041974e8239edd7 | /frame_process.py | 490779ac05e060a27928d749b8174aa77f2dabbc | [
"Apache-2.0"
] | permissive | nmabhi/Webface | 1ab445ec797b46b05abe5218c20a68501393768b | 8833751f34973a9cb19df8e73313232c2eb426d4 | refs/heads/master | 2021-01-01T04:14:21.969211 | 2017-08-21T10:02:44 | 2017-08-21T10:02:44 | 97,146,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,159 | py | import os
import sys
fileDir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(fileDir, "..", ".."))
import argparse
import cv2
import imagehash
import json
from PIL import Image
import numpy as np
import os
import StringIO
import urllib
import base64
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
from sklearn.manifold import TSNE
from sklearn.svm import SVC
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import openface
import pprint
import pickle
import json
from numpy import genfromtxt
import requests
from flask import Flask,request,jsonify
app = Flask(__name__)
modelDir = os.path.join(fileDir, 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
parser = argparse.ArgumentParser()
parser.add_argument('--dlibFacePredictor', type=str, help="Path to dlib's face predictor.",
default=os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
parser.add_argument('--networkModel', type=str, help="Path to Torch network model.",
default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--unknown', type=bool, default=False,
help='Try to predict unknown people')
parser.add_argument('--port', type=int, default=9000,
help='WebSocket Port')
args = parser.parse_args()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(args.networkModel, imgDim=args.imgDim,
cuda=args.cuda)
def loadModel():
# model = open('model.pkl', 'r')
# svm_persisted = pickle.load('model.pkl')
# output.close()
# return svm_persisted
# return True
with open(fileDir+'/model.pkl', 'rb') as f:
# if sys.version_info[0] < 3:
mod = pickle.load(f)
return mod
def loadOfflineModel():
with open(fileDir+'/Feature_dir/classifier.pkl', 'rb') as f:
if sys.version_info[0] < 3:
(le, clf) = pickle.load(f)
return (le,clf)
else:
(le, clf) = pickle.load(f, encoding='latin1')
return (le,clf)
def loadPeople():
with open(fileDir+'/people.pkl', 'rb') as f:
# if sys.version_info[0] < 3:
mod = pickle.load(f)
return mod
#classifier=loadModel()
(le,clf) = loadOfflineModel()
#people=loadPeople()
dataURL="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAA0JCgsKCA0LCgsODg0PEyAVExISEyccHhcgLikxMC4pLSwzOko+MzZGNywtQFdBRkxOUlNSMj5aYVpQYEpRUk//2wBDAQ4ODhMREyYVFSZPNS01T09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT0//wAARCAEsAZADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD06iiigAooooAqXKdazpUrWnGRWdKOatbAZ0i4NQsKuSrVZ1pWArtUbVMwqJhUgRmmGnmmGkA00004000gGmkNKaaaBCUhpTSGgBM0UUlAC5pM0UUCClzTc0tADgaUUzNLmmA8U4VHmnA0APBp4NRg04GmBKDUimoQaeDQBYQ1KpqsrVKrUxFuOQgg1dt7ko+4Ac9qy1apkkxTuBuJe56qKkF0hrEWX3p4m96dkF2bJukAppvEHasnzvemGX3p2QXZrm9HYVGbv3rKMvvSecadkLU02uc1E0ue9UPOPrSeaad0KxbaSoXcVCZKaXpNlWHOaiY0FqYWqWMaxqJjT2NRsakZ2+R60FgO9Zv2g46003B9aLDNIyIO9MM6Csxpj60wzH1o0AvyzhqquwNVzN700zVVxDpOarOKlL+9RMQaAIWFRMKnao2FSBXIqM1OwqJhSAiNNNPYUwikA00004000gG0hpaTvQAlJS0lABRSUUCFozSUUALS5puaKAHU4UwUuaYDwacDTAaUUxEoNPBqEGng0ATKakVqrhqeGpgWA1PDVXVqeGoAsB6XfVcNS7qYFjfSF6g30m+i4E++k3VBvpC9AybfQXqAvSF6AJjJSGSoC9NLUATmSml6gL0hegZOWppaod9N30gN8S0hl96r7qN1IonMlML1FupC1FxEu800vUZakLUXAk3Ubqh3Um6i4E26mk1Fvo30XEOaomFO3UhNAEbCo2FStUZpARkUw1IaYRSAYabTjTTQAlJS02gAozSUZoELmkpKM0ALmjNJmigBwNKDTQaXNMB4NLmowaXNAiQGnA1FmlzTAmBpwaoQacDQBMGxTw1Vw1LupgWN1G6oN1Lu96AJt1JuqLdSbqAJt1NLVFuo3UASFqTfURakLUDJS1NLVEWpC1AEhamluKYTSFqBj91IWqPdRmkBtbqN1RbqXNIok3Um6mbqTNMB5akJpmaM0CHE0hNNzSE0AKTSZpCaaTSEP3Um6mZpM0ASFqaTTM0ZoAU0w0pNNJoENNNNONNNIBppppTTTQAU2lNNZlVSzEADkk0ALSZ96z59VhQfJlvwqhNqszH5cKMdAKaQG/mjNct9sl3ZMjHvye9TxXs+44mKjr1p8ojo80uay4ruT7oaNiACcvVuK5VgC6smRxuosBZz70uaYGB6UuaQD80uaZRmgCQGl3VHmlzTAk3UbqjzRmgCXdRuqLNBNAEu6k3VHuo3UAPLUbveo80ZoAeWpCaj3UE0DH5pM80zdSZ5oAeTSZpmaM0DHE4ozTM0A0hmxmjNNzRmgY7NGabmjNAh2aTNJmkzQA7NJmkJpKAFzSGkzSE0CFNNNBpCaAAmmk0UlIQZozTTSZoAXNIaTNJmgANNJpSaz7+/EQMcJzJ3PZf/AK9MCS7vY7f5fvPj7o7fWsW5u5rhjub5f7o6VDLLyS3LHqarlz2Jp2AcSq9SS1RlwT0pCKaRTAfuHalzUYHNOFAEiyEHI61at76RVMZw4IwARnFUgRQrc0BY6C1vFOcHAzyDz+tX1cHjPPeuXgk2MckjjjHrWhDeOi75DuAbqOooaEbeaM1BFMksYZDmpc1IDs0ZpuaM0CH5ozTM0ZoAfmkzTc0maYx+aCaZnmjNADs8UZpmaM+tADs0mabmkzRcY4mjPFMzRmgB2aM+9MzRmkMfmlpmeKUHNIDWzS5puaKZQ6jNNzRmgQ6kzSZopALmkoooAKTNBpM0CDikNFIaAA000pppoAQmkNKaaaBAaaTSmmk0AUtQuzChSP757+lYzHYu49cVZvpQ8xJxjt70y0tWu9ztxGgyxqtgSuZxyxzU8NqzjJ4q4lupcsBwelWAgUUnItRM82oAqE2/Jx0rRc8cVXbg0uYfKUmixUTDFXXNVmFCYrEJ5pQakCZzRtqrisMyRU0MpjYHg+uaj2HNLtNO4rGtp90gxHwDn861A1cqkhilB9K6K0nE8IcHkcGkyWizmjNNzRmkIdmkzSZpM0AOzRmm5ozQMdmjNMzRmgB2aCaZnmjNADqQmmk0UAOzSZFNzRmgY7NGaYXUdSKb5i+opDJwTS5qDzV9akU5Gc0AjXBpaYDS0FDqKQUtAhaKSigAozRRQISkpaSgApKWm0CA000ppDQA00hpaQ0ANNVr2Xyrdm/CrBrG1ublIweOppoDPdvNnwOnattAbXTxEgH7z759qxLUjzMmtlpA8Y9qUmXFDVGaHwFpFpkpJFQWVZn+b2qDJIOaklU81AcigBHPvURpzGmE81SAUGgU2lFMQ8Yp4AqKng0CI504yKuaLNiZoieGGR9RVWY/LUdnL5V3G/YNzVIiSOnJopuaWkQLmgmm5ooAdnjrSZptGaBjs0Z5puaTNADs0ZppNGeKAFzRmm5ooAXNIzYGaSq15LshODyeBSGVJpQ8hOT6Uzf6E81H3+nSnRr5kgFUMvWq/KDk/N61oCq8C96sCpYzVzSimA04UDHCikpaBDqKSigQtJRRQAUUUUCEpKWkNADTSU4000ANNIaWmmgQ01z+u8XSD1XP610B5rn9fz9qj/3P600BRgb58CtmBS0eT0FYtsu6UAVu70t7cbvvHtUyNYidBUUjgVBLfIOhqrJeBqmzKuSyOKhcioTMD3pC+R1p2C46kA5pm6jfTES8UcVD5nvR5g9aYEp60A1H5opyupPWkAk54qKAbpkB7sKlmHyE+lR2o3XMQ9WFUiJHTg0Zpo+tLmgzFzSZpM0ZoGLmkzSZozQAuaM03NGaAFzSZozSZoAXNGaTpSZpDFrLvJN8xHYVemfZEzH0rLyScnGaaGgxgd6t2acbu54FVkDM4HetO3QDtwOlNjLCDAAqVRk0wVNGO9QwLoPFPBqJTTxTGPFOpopwoEKKKBRQIWiiloEJRS0lACGkxTqQigBpFNxTz0ppoAYaaaeaYaBDTWLrEBmu4ck7cYP61tGsm/vIy5RThl7+tO40rmTBJFazMZG3nttFMuLpp3yoamXULIVk/hk5HtS4CKF9qLdS0yFkk78fjUZR/wDJqycHrxULAA8GmhXGEMoyQaAxqQSEDGeKR1UrlRg0WC43NJv7U0ZJNJnJxikUKWNNJNT+WqfeGTTwygY2qPwppCbKp3Ac0qsc0+Q5PQfhTQqn2oETb90RB9KXT+b2LHrmmKz27AgKynsyhh+taOn3CSzAfZ4Yz2MYI5/OgTNTPFLmm5ozSJFzSUhNJnNIB2aM0lJmmA7NFNBpc0AKaSikpAFBNFNc4UmgZR1CXJCDoOTVPPvSyN5khY96EXcwA6mqQy1aISd3rwK0412qBVa2QDAHQVbHtSYEiDJxU44piDA+tOBqGNFhTUimoUaplNUMeKeKaBTwKBBS0tLigQAUYpwFLigQzFLinYpcUwI8UYp+KQigCMimEVKRTCKAIjTSKkIphFAhhFcrexlLqZTzhjXVGsTWIMXHmAcOv6ikVFkN9DvsFIHKAGqjxb1DL3Ga2VUGJQRn5RWdsNtL5T/cJyjdvpSTLsZ7xMpOSarMDnrWzNEGXNUHh54FUmLlKnJPWpT8sfvViO23HBqJ4/MuFiXkZ5NFwsLbqVQOyZB7HvVeT5Zc4xmt6aJVjU44xWPdqDkgdKSepbjoQtIxNMLNmnDDLnvSY5qrmdhSRt6fN60LnNFOUc0BYfLzCPXdxVjS0JuQeygk1Gg3uB2Tkn3rQ0yLZAZD1c/pSBl7NGaSkzQQLSUmeaKQC5opuaWgBaM03NFADs0ZptFADqq3r4j255arGcVl3UokmPoOBTGiPHuKsW0eTu/AVXA3EAVp20eAB6UwuWIlCrip41yaaoqdRge9Sxi1HPKIYi569vrT6zL6bzJdg+6v86kaNVTip0aqwp6GqKLyNUq4qojVOjUySwBShaYrVKppiDbS7alUA1IEzQIr7aXZVgRe1OER9KLAVdtNK1cMJ9KY0WKdgKhWo2FWmQ1Ey0rCKzCmMKmYVGRQBERVS+i823OPvLyKuNUbUgRnHKov0FMdUmTZIoI7g1NKP0qA/Kc8VButSnJA8Z2xP8vo/P61VlMo6rH+BNXZ5O9UJZOaAsNQlmPmNhfRe9WbeNWl3BQAOAKpRAyTKvqa1kaKLHzChjSH3X+pA9Kx5xwav3d2DkJ+FUC27OaSG9iptxyKUN6inMNrUoI9KszsM49aeoJ6cD1pwANPQc4oCxNAgCEAdsCtRECRqg6KMVQtwTKqgdDWgSO9MiQtJ3pvmL60bx2NIkXNFNLj1pNwPegB1FIrBhkGigBaKSigBaKSjNAEdzJ5cRI6npWWBVi9l3ShB0Wq4YnAApoZPbIS+70rWiG1QKqWyYwPSrq0NgTRrk1NUScLT93FTcCG7l8qEgH5m4FZeMmpbifzpSR90cCkABppDNQGniolanikWTo1To1VVNTI1MRbQ1KpqsjVMppiLSNViN6pqc1OhpiNG2USSKpzycYBxWvBp8SrmRSW781gROQeK6SxuBcQAk/MvDVV9AsL9jt/+eY/M1FNp1vIhCJtbscmrlFTcLHL3tnJbON44PpVGRa6zUYVmtGBxkcj61zMqHmq3JZRcVC1WZBioHpAQNUbVK1RNSEV5171nzk9q0pvu1l3BxmoZtB6FOYnHNUpOvFWpWzUSICcmhFMbDG2dw4qORZxIRu6Vb3AcVDIeDgYoAgdjgZqJnOeKHbJqMk5p2FceWLUoNNU0tAEgJqSPrUQNTxCgDQs1wparDfcJqOAbYhT3J8s0GTKFzO0e0KMk1B9rl/uipbiNndSoyAKi8h/SgBDdy+i1ZjdmjVnHOM1XNs5I4q0F2qFHPQUAWI+EFPpqjApaBC0ZpKKACmSuEjLegp9RTxGUABsDvQMzCSzEt1JqW3Tc+ewqf7Ef7/6VPDb7MZOadxk8K7VFToMmo1FTqMCpYD6q302yPywfmb+VWGYIpZjgCsmVzLIXPc0kCGg54qVTUYp61YzTxinqaCvWmjioLJlNSKahU09TVIktI3Sp0aqanFTI1MC4rVMjVUV6lV6BFxHqzDcGNgwxx2PSs4PUgeqEbUeqTqoG4HHtTzqsuOo/KsTzKDL70xGpNqMkq4dsj6VTdwxNVDJTGkNO4iWQA1VkWnmWmM4NICs3FRtU74NQsKTEQvyDWTddTWu1Zl4uHPpUMuDMxhzTGbHAp8zY7VVclulI0JdwHLHimPOMYUD61GIs/fc/hRJCi8KxNOwXIiwznHNMyM0pQ9jTCp70xMd9KUGo+exp4460CHrnNW7Vd0gFVU5rSsE4LkewpDZdFB5pKKDMTaPSk2j0pTRSAbgelLgelLRmmISlzSUCkAtGaKKBhS0lKKAFFOApBTqAJI1yeelS02MYGKbPIIkLn8PrUsZVvpsnyl7cmqlIWLOWJyT1oFUkMeOtOHHWmindqYGyRTGFSsKYRUljFODzUoNRkULx1piJwalVqrqaeDTEWVapVeqitUoamItB6cHqsGpQ9MRZD0b6rh6N1FxE5emF6jLU0mgB7NTC1NJppNFwHFqaTmmk02gRKIs28kuM7RwvrXPfbJLmUo4AGMjHrXVqnyRRDqoyfx61ha5YJZXKXVuMRyDlfQ0DWjMadagVasyOrcjv2pFAIqTQrMCKhcn3q84GeRUTlQpFFxlEtzSdae4BPApKYhKKWjgUAPjHIrZtWjaEeWeBwfrWIJNvSrmkOd8g9eaBM1KKKKkgKKKKAENFLSYoASjFLijFACdKAKXtRQAUooooAdT0GTn0qOpk4WkMkXrWdey+ZLgH5Vq1cuUhZl69KzOtCGKKcBmminCqGOGKZ5m58DpTZpNo2jrUSsM5poR1TCmEcVanhaNip//AF1ARUFEJHrTD1qVhUbCgAVvenhqh+lKM0wJw2O9SBqrDPrTwTTEWA9O3e9QDNOzgZ7UCJs0u6o4v3sgRTkmriWZJP3iB3xinZiK+acI5GGQpx6ngVbFsy5CjaByxrOu5JZZPvkKOF5qlG4mOdlT7zAUxMynKSxqueSxxVbypnkwCxUcsRzipBLG/wC6woQdTTcbAXvJIQsAhUDg561QivsXKrK2ADzgcYqW5lzAqxghm4H+NZkgCk/3/wCKlHUGdNafM7Mev8P0rI1mdJbRtidG6n64q5ps3+jeWx/eRjIPrVZrNpreZAABk4LfnTtZivocnISje1NEpHSrlzArIdmSfWsx8qSD1okiosnNxUTy5qInNNNRYq44tSbqbSUWC4/dSZptBNAXBjVrTWKTBh0PBqkeTV+3Bi2OehqorUls6VraN8iCQl1GSpqqVIOD1qjqEr+cJEJ2Mo5HrTrVmmYhXIZRls8CjkEi3RQpDL8rZpazegxtITgU40jdqVwEB46UuT6UlGTQAfhQSR2ozRQAzzKN59KrE4JFKHIoAtoxJqdWNVIGzVkdKAFl+eJlx1FZnfFaYrOmXbKw96EMQUM2F4pBQ3IqgKxBLZJPNPVBTSGz1p8aFnAJNWgPRpolkTDVlTwtE2D07Gtoiq88YdSCKzKMRhUbCrM0ZR9pqBhzQBFilApxHNPSNnOFFADAtWbaynuCPLQ4/vHgVoWVhCkfmT4Yn7uegq3NexxoEQHngbR0p2YioNPit4WknJcjgAHAzWbLEruPlyPY1Nquos0qwxptVRzk96zxeMPvMV9cCtYxsibmzpkRRnZVVcDGetXV4CA7mbGcZ4rKtruMWgUSuzMckdMCrsVzBKdpl2N1C54qZXYIe8ZlgcNIFDfMTjgCqJijGcLvQ9OxqS7umkm+zRNvjXmRl7n0p9uI5CecKvUGhXQig2m+dmS3kZG9GYiqEweN/LkyWHtzXTuFZRlRjsKoXgH3WXf6EdVo57hYxYmuQGaJSxPC7h0pYrS7WXfPGoGCxJPWrLzNbNz95vuvnio5UuJo98kpAPU54IovYCxYKftsa9eSfwrWlUfaMHksufbj/wDXWbpbxm/VY23qEJz3zjFXb2dIhG7nc6t9xfT3pSbbA5u6UR3MkfoxwB6VlXsGcso5HWtLXbsLc5DKCV+6vasVrhpDznBrR7CRWORR1q3c26rbRTRtksDvX096qYqGWgwKTFLQaQxuaQ0ppY4nlJ2g4HU+lADF++M1p21vJcRkbcYGQTwBVVdkPKcsOjGo5LiV/vOxz6mmhNGpab5I5g7B40YKD2qV3SGIMoDBP4R3+tN8PBLkT2bkDeNy/WnzWEkMjRyEZB9+a03I2I11ITP88ax9srWhaNHLuWYHIGQy/wCFZiWI89MDehPzKOorXhgSZS8ZwF4KHgkfWokkO4ksBU/I24fkfyqBs1ZSWF1KvJt25GHOCPxqhdXjRNswhA6Y5BrPkGS0VTjvwX/eKAvqtXx5LAFZevQMuKTg0AyinFCp9R7U09KVgKUoIkb60gFSzj5x7imAUDJIDg1aU5NU1OGq2nSgCUGqd4uJQ3qKtr0qG8XMQPoaEIp+1KKQU4VQyPZ8596mRQMYFJjvT1HFO4HdQXKy8HhvSnSDis0Kd2VOCKsifCYbk1JRSuTulPtxVcjNTkFmwOSarujSSbAdqjlmNUo3C5dNmlum+5ILHog/rS2+JnKopwoycdqjlI+zh8hjjCgnJOKoteSuxhUlQfvAcVrGKsLqa4mWSTapySMAegqTyNp33DbExgEcCsfDAYjPPrVm1xyLmUkdj1xSa7CNFUsohuBXcf4yOao30Vpct87gP2bHB+tWlgt1X5MzL7dqSb7LGuWAx6VF2FkYb7IpfL5Y9sVHdXHlRFYwd7dWPYVeubuNv3aRgqejHqKz5LGc5ZlJT9a1iQxlrfS267I8Nnk5retZoZYkjlAWU8sT29gaxYIFVgxGMdBVkI2ST1PX2okkPXobrGWJRj589PUCqN5cgAJbk7z19apRX8kBd87o1GAD/SiLZey5iJEhOWb+6KjltqwuU5IzJK6HABHPfNUQZYjsVmYA/dOeK6aOGCCVwhJO0ZPUnrXP3rMZJCuRlielOLuJ6FvSLgpdkhPLbYQXNZ+p6u0paK2+WPJ+bu1VTK6RSqCcsuP1qjnP1oaGIzFiSxyfemo2G4p+3nJFRsME0AXrk5RCvBKjIqiRgkVLEzP8ucnHGajYEk560nsNCUmaWgLlgB1JxUFAiBsk9BVuNNke9jkY4AqueG2DsafNKdgUHoMVVhMhc+ZISKjZOamRdoB9aeyfLmgRFaXElpcJNESrKe1dZJdxalZC4hIEsfDqeuK5GRcNRHI8TBkYg+1MDoEkaKQSbRwc561rG6jMKyooy3QAc5rlku9x+cbT6rWjp1w25kJDAjKfX0pyV0IvzQxToZ5XEch/hHXP9axp4X5V156gilnmknO9zh16Y7URXGfkm69jQk7DK8VvJJIqKpya07e08ssk/nhl6FRwKuwWhtoDK6AucbfpV+2crhVBCjk5PU0nIW5SjWAKCUCZ4+fIpZLcMuY4uQOcNmrl0yrGZLg5bsvpWBJHIXLqRuJ6A9KErgS3UDhd+07RwTVXkVatZ5JHa2cFtw79aILKWYygfNt6djUuA7lT+LNW4zxVaWN4m2uCDU8J4FZ2KLC8Uky7omHtQD704UCMsU8ClZcSlQOhwK1YPDurzQCaOwlKEZBxjP4VQGXxThinTRSQTNFMhSRTgqRgimimB2Kp8tRspJwBk1aIwKlt4Sq+dxu7Z7e9BRXNuLeMFvmlP8Iqpdo+3eV2jFaeFYlvTliazpZvtLOr8KOg9aqLsJmX5jqpC8n0PNWbawa5TcDtb+JvT2qWysWkZyDhQcZrbaFVVVjG0r0NU5WFYwpbeS1wrrwe/rUDuqrvY/L2AroWxIhjkAHqx6Vz2pQKHzAd0S9/enF3FexCmp3EZKwfKh6ipNpk/eGQvnqM1nFWzx1qeHdHh2J+lU0gL8Eak/u13Zq/DbNCPMb5/wDZ64+lRWksbjMYETY5X1q/EynkElu+aybaHuVnsY7n94uEk9B/Ws2+R7f92w5PUjvW3KRncvBH8Xb/AOvWPquoLGxgwshxyR3pxbZLVtjIJknkEMQySeBW5YRW9pb7SwaTPzkc5NZGnLulc47VdhBKEgfxH+dVLsJalpr1RO+xP4R149a5y4uHdz0HJrWZcXGPVTWLcrtnkUdmNOKQMiEhEoLKDUd3akbpIx8vcelPA56VryWzt5fyEBzg8deKGM58D9yoP3gajmTa9a91pVwkmY48qB61l3DFZCChU9CDUAQKdrg1PGBu5GQfWq5PNSqxZcEUWHcJE2kleVqSzCq5mcZEfQep5x/KmmQiMqeRTwMQoAPvHJFTYdyqzZkyfXNDnkAj3okXDkU1+T+FUIm+Xapz+FSBS6EqvSoAPkBrQsULqVAJJ9KAKMyHg7TUYU1qT28n2PJjPynGfxxVEow6qaaQCxxkx78ZAPJqUDypFlQ4weRUtmP3LgjuKdLCVcEjKE4o2YAW3S7yB83WrJ09ov3soGDytFkkBBHzNKhxgjP4itmAfaUAuEyF6L/Wm5WEULK4mRhuyyDgKe1TS6g0GZPLCqvQHvVhreONicEoOp9Kxb3dczHymJjHAzSVmxkzagLqTdKdp7DsKlGcdmz0IrKeB4uoq9Yl7ZfOfr6HpVNJCCVZI7jCkqynOfSrsG04kJxuOCc1DcRlovtA/EVGLsJGEaPIHIqdx2Ny4t4b2Ah8ZHRh1FYIRonKN1Bq3BqKYCGMoCfvDtVW/bZe+YGDRvjkdqhxY7kq9KetRr061IvSswIHdra6SaMgMpDAkZ5FbD+MNXeIIJ0XjGVQA1j3gOxT6GqtUtik7E80zzytJKxZ2OSSeTTPrTB9KfmgV7neBcnnoKbJOUIA5HoKkkOyHI71SnJjTcvX1NWkBPcMTB8p2l+CPaqJT+6OlSSEiGMg8seaLtzHbkpwfWhgi5aLthjMZz3Yds1NJKoQljjHY1laVNIu1Qx2k9Ksa0xESkcFTxRYCG7uGuP3C8R98VHGQv7pwCDwM96apwgxV60iSVcuM4GaewmZd1p/2VPOwSD0HcVnyNxW/ITMzCQ7gGx+FZ00Eccr7V4B4FUncWxQQuCG5GDwa17O587KSqXYD5ccDNVtPt47qf8AegkDnAq9sAuFRfkUKcBeKTGZ2p3N6xW3HC9eOtZhtpRyy8+5ramiU3TuxYkcDJqtIAe3eqWiJvcdpVrJ5UjkAAnrVqO1k+zqcqM8/nViBFXTxtGMg1IVB2rjiolIEiobPM6Fm/hPT8Kxr2CJL2UdcN610IjTzmyM4UdfrWPeIouJCFHWnBikjPUKuQqE59BW1J5jRW7BQPmU8mszqRWz1tIB/u0S2BbiiKR5iCwAx2FY9zaxyC4R8Nh2xkdK6IAb8+1YLj95c/75rNMqxzjQKwyBipLe2ZiFyM54zTgOBUkZIkXHqK0Yi1LpMaSLuc+W/GcdKpSJvkOzoo25+nFdQ4BgkU9Otc5OgV3jUkKW7VNxoozxlHORkE8GonUcCtKcBrHkdMY/OqA600BLbW4ljIzzWtZR7JQIuFK9SKh0vhZMe1XrPnbnsMUmwQ4RO1pOhYfxdvxrDcnsa6QADzh2P+Armm+9TiwLmnjKTb0yuB/Orktuojco3bpUGmf6uX8K0yoO4EDBFKT1EY13DJbXaSxtgkZyKuW+r7VKSxbXP8a9Pyp2oxL9ngbnOPX6VnbRnHOKq11qNGvc3DeUIYkOx+C2c5qoIJlALRuP+A1f0uGOWzw65wSKuNhLVWUDOOtK9gWpRtLP/ltcAbByAeh+tVtS2NiRPlTOAv8AWpXleW5RXPynqvY81X1HgSAdFK49qHJjSsTMUihDTN+8x8oHSs+VzdOW2/MewpkkjyPljk0trK0UwdcZU8U1oBGpKOWJOAO9PgfzcowyGNalzHG9vvaNSW5PFUbaNY75FUfK2QR+FUthXLCxPCoWT8DUi1LAPNR/MyduQPapoYIyuSKwa1GUpxuiYe1Z1bVzGiqNoxWKev40IdxwNOFMHU04HmgD/9k="
def processFrame(dataURL):
head = "data:image/jpeg;base64,"
#print dataURL
#print '!!!!!!!!!!!!!'
#req = requests.get(url='http://static6.businessinsider.com/image/537d34c2ecad04df68b0a788/roger-federer-shows-you-what-its-like-to-play-tennis-from-his-perspective-using-google-glass.jpg').content
# print req.status_code
#img = base64.b64encode(req)
assert(dataURL.startswith(head))
imgdata = base64.b64decode(dataURL[len(head):])
imgF = StringIO.StringIO()
imgF.write(imgdata)
imgF.seek(0)
img = Image.open(imgF)
buf = np.fliplr(np.asarray(img))
print buf.shape
#resized_img=cv2.resize(buf,(300,400))
rgbFrame = np.zeros(buf.shape, dtype=np.uint8)
rgbFrame[:, :, 0] = buf[:, :, 2]
rgbFrame[:, :, 1] = buf[:, :, 1]
rgbFrame[:, :, 2] = buf[:, :, 0]
#if not self.training:
annotatedFrame = np.copy(buf)
# cv2.imshow('frame', rgbFrame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# return
identities = []
bbs = align.getAllFaceBoundingBoxes(rgbFrame)
#if not self.training:
#else:
# bb = align.getLargestFaceBoundingBox(rgbFrame)
# bbs = [bb] if bb is not None else []
#faces={}
#faces.keys=[identity,left,right,bottom,top]
for bb in bbs:
# print(len(bbs))
faces={}
landmarks = align.findLandmarks(rgbFrame, bb)
alignedFace = align.align(args.imgDim, rgbFrame, bb,
landmarks=landmarks,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
continue
phash = str(imagehash.phash(Image.fromarray(alignedFace)))
#if phash in self.images:
# identity = self.images[phash].identity
#else:
rep = net.forward(alignedFace)
# print(rep)
# if self.training:
# self.images[phash] = Face(rep, identity)
# TODO: Transferring as a string is suboptimal.
# content = [str(x) for x in cv2.resize(alignedFace, (0,0),
# fx=0.5, fy=0.5).flatten()]
# content = [str(x) for x in alignedFace.flatten()]
# msg = {
# "type": "NEW_IMAGE",
# "hash": phash,
# "content": content,
# "identity": identity,
# "representation": rep.tolist()
#}
#print "training",identity
#self.sendMessage(json.dumps(msg))
#print "training",self.images
# with open('images.json', 'w') as fp:
# json.dump(self.images, fp)
#with open('images.pkl', 'w') as f:
# pickle.dump(self.images, f)
#else:
# if len(self.people) == 0:
# identity = -1
# elif len(self.people) == 1:
# identity = 0
# elif self.svm:
##########################################ONLINE MODEL####################################################
#print classifier.predict
#identity = classifier.predict(rep)[0]
#print "predicted",identity
#name=people[identity]
#print name
##################################################OFFLINE MODEL###################################################
identity=clf.predict_proba(rep).ravel()
maxI = np.argmax(identity)
name = le.inverse_transform(maxI)
confidence = identity[maxI]
bl = (bb.left(), bb.bottom())
tr = (bb.right(), bb.top())
faces['identity']=name
faces['left']=bb.left()
faces['right']=bb.right()
faces['top']=bb.top()
faces['bottom']=bb.bottom()
faces['confidence']=confidence
identities.append(faces)
return identities
@app.route("/deleteModel", methods=['GET'])
def deleteMode():
# TODO: fix these paths in EC2
os.remove('/home/wlpt836/webface/model.pkl')
os.remove('/home/wlpt836/webface/people.pkl')
os.remove('/home/wlpt836/webface/images.pkl')
#os.remove('/home/wlpt836/webface/people.pkl')
return jsonify({"success": True})
@app.route("/api",methods=['POST'])
def api():
content = request.get_json()
req=content["url"]
print req
response_image = requests.get(req)
print response_image
uri = ("data:" +
response_image.headers['Content-Type'] + ";" +
"base64," + base64.b64encode(response_image.content))
#print uri
return jsonify(processFrame(uri))
app.run(host='0.0.0.0',port=8000)
#print processFrame(dataURL) | [
"[email protected]"
] | |
6bd8e351549e1cca30848b20b4a86f5949209d5b | afc4b25060e4f2f2a42080bcf2ec948128fb22e5 | /src/blog/urls.py | 47fab9faed284758d4c295af308552c97c464ec8 | [
"MIT"
] | permissive | samims/blog-api-tuto | 38c9530e4bf1e3a57ef45827a54ef51e1ad1e46a | 586cca3bbd43838de9ee3f30d8b6f95217644891 | refs/heads/master | 2023-01-20T12:31:53.627618 | 2019-11-06T07:26:13 | 2019-11-06T07:26:13 | 162,914,395 | 0 | 0 | MIT | 2023-08-11T07:29:46 | 2018-12-23T18:23:19 | JavaScript | UTF-8 | Python | false | false | 1,656 | py | """blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from accounts.views import (login_view, register_view, logout_view)
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^comments/', include("comments.urls", namespace='comments')),
url(r'^register/', register_view, name='register'),
url(r'^login/', login_view, name='login'),
url(r'^logout/', logout_view, name='logout'),
url(r'^', include("posts.urls", namespace='posts')),
url(r'^api/comments/', include("comments.api.urls", namespace="comments-api")),
url(r'^api/posts/', include("posts.api.urls", namespace='posts-api')),
# url(r'^posts/$', "<appname>.views.<function_name>"),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
4a94954de8a7e6b418c50d5c8b725d578b55bc2e | 25540d76f6d47443027b01dd892116122b895587 | /km92/maliarenkoma/main.py | 4d83b31a8dfd9ff81810496d2d25d3cd7adef4e0 | [] | no_license | igortereshchenko/pandasplotly3 | 64c88bbf3a19f0a159fb8a473c81ced79c172cc3 | bc5410562873f3094e4baca0c595e9a13b586290 | refs/heads/master | 2022-12-24T13:29:03.976827 | 2019-12-26T15:36:13 | 2019-12-26T15:36:13 | 229,225,081 | 2 | 22 | null | 2022-09-23T22:32:24 | 2019-12-20T08:41:15 | HTML | UTF-8 | Python | false | false | 1,847 | py | import os
import pandas as pd
from bq_helper import BigQueryHelper
import plotly.graph_objs as go
from plotly.offline import plot
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "keys.json"
LIMIT = 100
bq_assistant = BigQueryHelper('bigquery-public-data', 'nhtsa_traffic_fatalities')
QUERY = """
SELECT state_number, vehicle_number, driver_maneuvered_to_avoid, driver_maneuvered_to_avoid_name
FROM `bigquery-public-data.nhtsa_traffic_fatalities.maneuver_2016`
LIMIT {}
""".format(LIMIT)
df: pd.DataFrame = bq_assistant.query_to_pandas(QUERY)
group_by_state = df.groupby(["state_number"])
bar_data = group_by_state["state_number"].count()
bar = go.Bar(
x=bar_data.index,
y=bar_data.values,
)
bar_layout = dict(
title='Accidents',
xaxis=dict(title='state', ),
yaxis=dict(title='number'),
)
plot(dict(data=[bar], layout=bar_layout))
by_vehicles = [
df[df['vehicle_number'] == 5],
df[df['vehicle_number'] == 1],
df[df['vehicle_number'] == 9],
]
scatter_data = [el.groupby(["state_number"])["state_number"].count() for el in by_vehicles]
sc_1 = go.Scatter(
x=scatter_data[0].index,
y=scatter_data[0].values,
name="vehicle id 5"
)
sc_2 = go.Scatter(
x=scatter_data[1].index,
y=scatter_data[1].values,
name="vehicle id 1"
)
sc_3 = go.Scatter(
x=scatter_data[2].index,
y=scatter_data[2].values,
name="vehicle id 9"
)
scatter_layout = dict(
title='Num Accidents by vehicle_id in state',
xaxis=dict(title='state', ),
yaxis=dict(title='number'),
)
plot(dict(data=[sc_1, sc_2, sc_3], layout=scatter_layout))
group_by_accident_name = df.groupby(["driver_maneuvered_to_avoid_name"])["driver_maneuvered_to_avoid_name"].count()
plot(go.Figure(data=[go.Pie(labels=group_by_accident_name.index, values=group_by_accident_name.values)]))
| [
"[email protected]"
] | |
f15ca18e6fbc3fa18d367fc0d3e26030ca778a74 | da5972abb7e9f802d060a7b2840b9c385f44dab5 | /api.py | 02dfd15553f81019d3e1bfa25ced1911e328366b | [] | no_license | marcguo/tinder_bot | cecc18f80f6647fda4732b4f3adbcbb8a54072b0 | f85b5e93d4947dfaf296deaef72a8293d70cb067 | refs/heads/master | 2022-11-24T19:31:04.221219 | 2020-08-06T20:45:16 | 2020-08-06T20:45:16 | 285,662,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,327 | py | '''
API's are based on https://github.com/fbessez/Tinder.
'''
# To perform RESTful API requests.
import requests
# To assemble and parse JSON objects.
import json
from user import *
import config
from location import *
from log import *
# Common headers used for all API requests.
HEADERS = {'x-auth-token': config.X_AUTH_TOKEN ,
'user-agent': 'Tinder/11.4.0 (iPhone; iOS 12.4.1; Scale/2.00)',
'content-type': 'application/json'}
def get_rec_list():
'''
Gets a list of recommended users to interact with.
@rtype list
@return A list of User objects that contains all users from the /user/recs API GET
request.
'''
url = 'https://api.gotinder.com/user/recs'
raw_response = requests.get(url, headers = HEADERS)
response = raw_response.json()
try:
if 401 == response['status']:
print('401 (Unauthorised) returned from the API call.')
log('401 (Unauthorised) returned from the API call.')
exit()
except:
print(response)
log(response)
exit()
try:
# This is a list that contains all rec'd users.
results = response['results']
except:
print('Could not get rec list.')
log('Could not get rec list.')
print(response)
log(response)
return []
rec_user_list = []
for result in results:
id = result['_id']
name = result['name']
distance = result['distance_mi']
rec_user = User(id, name, distance)
rec_user_list.append(rec_user)
return rec_user_list
def get_match_list(count):
'''
Gets a list of matched users to interact with.
NOTE: This is still in progress.
@type count: integer
@param count: Count of matches to get at a time.
'''
url = 'https://api.gotinder.com/v2/matches?count={}'.format(str(count))
raw_response = requests.get(url, headers = HEADERS)
response = raw_response.json()
matches = response['data']['matches']
match_list = []
for match in matches:
for key in match:
print(key)
return
match_detail = match['person']
id = match_detail['_id']
name = match_detail['name']
distance = match_detail['distance_mi']
matched_user = User(id, name, distance)
match_list.append(matched_user)
return match_list
def get_user(id):
'''
Gets a specific user's profile.
@type id: string
@param id: User ID.
'''
url = 'https://api.gotinder.com/user/{}'.format(id)
raw_response = requests.get(url, headers = HEADERS)
response = raw_response.json()
print(response)
def like_user(id):
'''
Likes a specific user.
@type id: string
@param id: User ID.
'''
url = 'https://api.gotinder.com/like/{}'.format(id)
raw_response = requests.get(url, headers = HEADERS)
response = raw_response.json()
def change_location(lat, lon):
'''
Changes my location to a specified location.
@type lat: string
@param lat: Latitude value.
@type lon: string
@param lon: Longitude value.
'''
url = 'https://api.gotinder.com/user/ping'
data = {'lat': lat, 'lon': lon}
raw_response = requests.post(url, headers = HEADERS, data = json.dumps(data), verify = True)
response = raw_response.json()
def travel(lat, lon):
'''
Travels/Sets the current location to a specified location.
@type lat: string
@param lat: Latitude value.
@type lon: string
@param lon: Longitude value.
'''
url = 'https://api.gotinder.com/passport/user/travel'
data = {'lat': lat, 'lon': lon}
raw_response = requests.post(url, headers = HEADERS, data = json.dumps(data), verify = True)
response = raw_response.json()
def message(id, message):
'''
Sends a message to a user.
@type id: string
@param id: User ID.
@type message: string
@param message: Longitude value.
'''
url = 'https://api.gotinder.com/user/matches/{}'.format(id)
data = {"message": message}
raw_response = requests.post(url, headers = HEADERS, data = json.dumps(data), verify = False)
response = raw_response.json()
print(response)
def get_geocode(address):
'''
Gets the latitude and longitude of an address.
@type address: string
@param address: Address of the city/place to query the geocoding of.
@rtype Location
@return A Location object containing the location info (latitude, longitude...).
'''
url = 'https://maps.googleapis.com/maps/api/geocode/json?address={}&key={}'.format(address, config.GOOGLE_API_KEY)
raw_response = requests.get(url)
response = raw_response.json()
try:
result = response['results'][0]
except:
print('The address given is invalid.')
return False
geometry = result['geometry']
location = geometry['location']
lat = location['lat']
lon = location['lng']
return Location(str(lat), str(lon))
| [
"[email protected]"
] | |
936932bb79fdd1481f0240df944bcbdd104de800 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /qYrTDRY7AN2RHxvXg_16.py | ac0bec8269966da1b71c214e4852827b65159799 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | """
Consider a right triangle. Its area and hypotenuse are known.
Create a function that returns the two missing sides. The first input is the
area and the second input is the hypotenuse. Return your answer as a list (the
shorter side first). If there is no such right triangle, return `"Does not
exist"`.
### Examples
f(3, 6) ➞ [1.015, 5.914]
f(30, 12) ➞ [5.675, 10.574]
f(30, 10) ➞ "Does not exist"
### Notes
Round your answer to three decimal places.
"""
def f(a, h):
y=0
while y**4-y*y*h*h+4*a*a>0 and y<h:
y+=1/100
while y**4-y*y*h*h+4*a*a<0 and y<h:
y-=1/100000
if y<=0 or y>=h: return "Does not exist"
x=2*a/y
return sorted([round(x,3),round(y,3)])
| [
"[email protected]"
] | |
3d3905f020430a87a54b4cd3c13ed122189a9fa6 | 63e2bed7329c79bf67279f9071194c9cba88a82c | /SevOneApi/python-client/swagger_client/models/top_n_settings_v1.py | 4f8ef27b278476432101e66db4523ed6741bfad7 | [] | no_license | jsthomason/LearningPython | 12422b969dbef89578ed326852dd65f65ab77496 | 2f71223250b6a198f2736bcb1b8681c51aa12c03 | refs/heads/master | 2021-01-21T01:05:46.208994 | 2019-06-27T13:40:37 | 2019-06-27T13:40:37 | 63,447,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,480 | py | # coding: utf-8
"""
SevOne API Documentation
Supported endpoints by the new RESTful API # noqa: E501
OpenAPI spec version: 2.1.18, Hash: db562e6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.aggregation_selection_setting import AggregationSelectionSetting # noqa: F401,E501
from swagger_client.models.result_limit_setting_v1 import ResultLimitSettingV1 # noqa: F401,E501
from swagger_client.models.top_n_setting_v1 import TopNSettingV1 # noqa: F401,E501
from swagger_client.models.units_setting import UnitsSetting # noqa: F401,E501
from swagger_client.models.work_hours_setting import WorkHoursSetting # noqa: F401,E501
class TopNSettingsV1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'aggregation_selection_setting': 'AggregationSelectionSetting',
'result_limit_setting': 'ResultLimitSettingV1',
'top_n_setting': 'TopNSettingV1',
'units_setting': 'UnitsSetting',
'work_hours_setting': 'WorkHoursSetting'
}
attribute_map = {
'aggregation_selection_setting': 'aggregationSelectionSetting',
'result_limit_setting': 'resultLimitSetting',
'top_n_setting': 'topNSetting',
'units_setting': 'unitsSetting',
'work_hours_setting': 'workHoursSetting'
}
def __init__(self, aggregation_selection_setting=None, result_limit_setting=None, top_n_setting=None, units_setting=None, work_hours_setting=None): # noqa: E501
"""TopNSettingsV1 - a model defined in Swagger""" # noqa: E501
self._aggregation_selection_setting = None
self._result_limit_setting = None
self._top_n_setting = None
self._units_setting = None
self._work_hours_setting = None
self.discriminator = None
if aggregation_selection_setting is not None:
self.aggregation_selection_setting = aggregation_selection_setting
if result_limit_setting is not None:
self.result_limit_setting = result_limit_setting
if top_n_setting is not None:
self.top_n_setting = top_n_setting
if units_setting is not None:
self.units_setting = units_setting
if work_hours_setting is not None:
self.work_hours_setting = work_hours_setting
@property
def aggregation_selection_setting(self):
"""Gets the aggregation_selection_setting of this TopNSettingsV1. # noqa: E501
:return: The aggregation_selection_setting of this TopNSettingsV1. # noqa: E501
:rtype: AggregationSelectionSetting
"""
return self._aggregation_selection_setting
@aggregation_selection_setting.setter
def aggregation_selection_setting(self, aggregation_selection_setting):
"""Sets the aggregation_selection_setting of this TopNSettingsV1.
:param aggregation_selection_setting: The aggregation_selection_setting of this TopNSettingsV1. # noqa: E501
:type: AggregationSelectionSetting
"""
self._aggregation_selection_setting = aggregation_selection_setting
@property
def result_limit_setting(self):
"""Gets the result_limit_setting of this TopNSettingsV1. # noqa: E501
:return: The result_limit_setting of this TopNSettingsV1. # noqa: E501
:rtype: ResultLimitSettingV1
"""
return self._result_limit_setting
@result_limit_setting.setter
def result_limit_setting(self, result_limit_setting):
"""Sets the result_limit_setting of this TopNSettingsV1.
:param result_limit_setting: The result_limit_setting of this TopNSettingsV1. # noqa: E501
:type: ResultLimitSettingV1
"""
self._result_limit_setting = result_limit_setting
@property
def top_n_setting(self):
"""Gets the top_n_setting of this TopNSettingsV1. # noqa: E501
:return: The top_n_setting of this TopNSettingsV1. # noqa: E501
:rtype: TopNSettingV1
"""
return self._top_n_setting
@top_n_setting.setter
def top_n_setting(self, top_n_setting):
"""Sets the top_n_setting of this TopNSettingsV1.
:param top_n_setting: The top_n_setting of this TopNSettingsV1. # noqa: E501
:type: TopNSettingV1
"""
self._top_n_setting = top_n_setting
@property
def units_setting(self):
"""Gets the units_setting of this TopNSettingsV1. # noqa: E501
:return: The units_setting of this TopNSettingsV1. # noqa: E501
:rtype: UnitsSetting
"""
return self._units_setting
@units_setting.setter
def units_setting(self, units_setting):
"""Sets the units_setting of this TopNSettingsV1.
:param units_setting: The units_setting of this TopNSettingsV1. # noqa: E501
:type: UnitsSetting
"""
self._units_setting = units_setting
@property
def work_hours_setting(self):
"""Gets the work_hours_setting of this TopNSettingsV1. # noqa: E501
:return: The work_hours_setting of this TopNSettingsV1. # noqa: E501
:rtype: WorkHoursSetting
"""
return self._work_hours_setting
@work_hours_setting.setter
def work_hours_setting(self, work_hours_setting):
"""Sets the work_hours_setting of this TopNSettingsV1.
:param work_hours_setting: The work_hours_setting of this TopNSettingsV1. # noqa: E501
:type: WorkHoursSetting
"""
self._work_hours_setting = work_hours_setting
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TopNSettingsV1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TopNSettingsV1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
afa2882b1eba013364b4905e4587d9b9659673af | b4c28049f1e3bc367d523f84f3816178e9e1d468 | /20170114/20170114.py | a70b49fd3c810e42076cb52f10b49cef44730f2b | [
"MIT"
] | permissive | JaeGyu/PythonEx_1 | 5f602274727722ddc352fcdd7b5f41b73d8aa784 | e67053db6ca7431c3dd66351c190c53229e3f141 | refs/heads/master | 2020-05-22T05:43:59.902893 | 2017-09-02T06:54:57 | 2017-09-02T06:54:57 | 50,916,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | print("-"*60)
def main():
print("hello")
v = [i for i in range(10)]
print(v)
v = [lambda x : x+1 for i in range(10)]
print(v)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
d7e1b702380217cdbf7e50d8d1f9c47f9d6dc0c7 | 8a04dffe7a45af171fa1f70bc1b7ae19a5cf14d9 | /venv/lib/python3.7/_weakrefset.py | f19be5dd359b9caf49c8ee8392c935fb388a1928 | [] | no_license | UmangKumar1/webscraper | 5f896f5c20172e952fe50f608f8457568c4086f0 | 67f690e167a9c7a0c297bebfec8891c34c1d6f38 | refs/heads/master | 2020-06-18T11:12:53.797857 | 2019-07-10T23:59:47 | 2019-07-10T23:59:47 | 196,284,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | /Users/mehakkumar/miniconda3/lib/python3.7/_weakrefset.py | [
"[email protected]"
] | |
3d88a4a543b65debc1577203dd10287d92deac00 | b1e48374dcf0ea7f71e03663e457980ad5968333 | /python/email/urls.py | 2facb411fee41084496424f3c8be21557d6fce7c | [] | no_license | nlake44/sample-apps | 306ad3773ce66bc2d212a181f8cdd8bf88172ef5 | 619eff9f14f0416ff2cb3ee8efd1838f46337339 | refs/heads/master | 2021-01-15T19:39:57.064088 | 2014-01-06T03:33:36 | 2014-01-06T03:33:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
(r"^$","sendmail.views.root"),
# Example:
# (r'^testapp/', include('testapp.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
)
| [
"[email protected]"
] | |
acfa537b578f6d76d25592188fdff1cef1a4d366 | 472943bcd41060072a8785e755102426521d175e | /9_plot_average_delays.py | 797be945a9c3680c9cff29972c0a172807da5ba0 | [] | no_license | Aausuman/Thesis | 8fef255a36123bdf3f5dbc090110e79ddc39fce9 | 8f2f5df32b0d9440007ffdcc77ae5136cc4b56e0 | refs/heads/master | 2023-01-24T20:06:33.659151 | 2020-11-01T21:02:47 | 2020-11-01T21:02:47 | 269,708,193 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext
import matplotlib.pyplot as plt
# Initialising the Spark environment (Local machine)
conf = SparkConf().setMaster("local[*]").setAppName("Average_Delays")
sc = SparkContext(conf=conf)
sqc = SQLContext(sc)
raw_records = sc.textFile("/Users/aausuman/Documents/Thesis/Average_Delays_Databricks/Avg_Delays.csv")
# Function to extract fields from our comma separated data files
def pre_process(record):
fields = record.split(",")
return fields
# Importing and processing our dataset
records_rdd = raw_records.map(pre_process)
records_df = records_rdd.toDF(schema=["LineID", "Avg_Delay", "Date", "Day"])
# Remapping records into an RDD by LineID as Key
mapped_records_rdd = records_df.rdd.map(lambda x: (int(x["LineID"]), [(int(x["LineID"]), float(x["Avg_Delay"]), \
str(x["Date"]), str(x["Day"]))]))
# Grouping by LineID
reduced_byLineID_list = mapped_records_rdd.reduceByKey(lambda a, b: a + b).collect()
for lineID in reduced_byLineID_list:
if lineID[0] == 747:
x_axis = [day[2] for day in lineID[1]]
y_axis = [day[1] for day in lineID[1]]
# print(y_axis)
# print(x_axis)
plt.title("Average Delay of line ID = " + str(lineID[0]))
# plt.scatter(x_axis, y_axis, color='darkblue', marker='x')
plt.plot(y_axis)
plt.xlabel("Days")
plt.ylabel("Average Delay")
plt.grid(True)
plt.legend()
plt.show()
| [
"[email protected]"
] | |
1df7d9c4e2a2e778fe39a48af7db88135044a33d | 6885d145acec4027eb2527e1c23fe3530a0bcdbc | /mycoffeed/core/models.py | 780b792dda36217011dad32f5b5208a68c475f04 | [] | no_license | pamelamirelly/Coffeed | dbe8cd0d73010dc3af49fbc26b37eafdd4ab3c54 | 686606692cc3395ae2f2a0aac51d2260301e7594 | refs/heads/master | 2016-09-11T04:48:38.133076 | 2015-07-24T07:08:21 | 2015-07-24T07:08:21 | 39,566,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,769 | py | from django.db import models
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.db.models import Avg
from geoposition.fields import GeopositionField
import os
import uuid
RATING_CHOICES = (
(0, 'None'),
(1, '*'),
(2, '**'),
(3, '***'),
(4, '****'),
(5, '*****'),
)
YESNO_CHOICES = (
(0, 'No'),
(1, 'Yes')
)
PLURAL_CHOICES = (
(0, 'None'),
(1, 'Minimal'),
(2, 'Some'),
(3, 'Ample')
)
WIFI_CHOICES = (
(0, 'None'),
(1, 'Spotty'),
(2, 'Strong')
)
COFFEE_CHOICES = (
(0, 'None'),
(1, 'Truck Stop'),
(2, 'Good'),
(3, 'Really Good'),
(4, 'Great'),
)
# Create your models here.
def upload_to_location(instance, filename):
blocks = filename.split('.')
ext = blocks[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
instance.title = blocks[0]
return os.path.join('uploads/', filename)
class Location(models.Model):
title = models.CharField (max_length=300)
description = models.TextField(null=True, blank=True)
address = models.TextField(null=True, blank=True)
hours = models.TextField(null=True, blank=True)
image_file = models.ImageField(upload_to=upload_to_location, null=True, blank=True)
create_at = models.DateTimeField(auto_now_add=True)
wifi = models.IntegerField(choices=WIFI_CHOICES, null=True, blank=True)
seating = models.IntegerField(choices=PLURAL_CHOICES, null=True, blank=True)
outlets = models.IntegerField(choices=PLURAL_CHOICES, null=True, blank=True)
bathrooms = models.IntegerField(choices=YESNO_CHOICES, null=True, blank=True)
coffee = models.IntegerField(choices=COFFEE_CHOICES, null=True, blank=True)
alcohol = models.IntegerField(choices=YESNO_CHOICES, null=True, blank=True)
outdoor = models.IntegerField(choices=YESNO_CHOICES, null=True, blank=True)
food = models.IntegerField(choices=YESNO_CHOICES, null=True, blank=True)
position = GeopositionField(null=True, blank=True)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse(viewname="location_list", args=[self.id])
#Esto es para el rating
def get_average_rating(self):
average = self.review_set.all().aggregate(Avg('rating'))['rating__avg']
if average == None:
return average
else:
return int(average)
#ESto es para los comentarios
def get_reviews(self):
return self.review_set.all()
class Review(models.Model):
location = models.ForeignKey(Location)
user = models.ForeignKey(User)
description = models.TextField(null=True, blank=True)
rating = models.IntegerField(choices=RATING_CHOICES, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
| [
"[email protected]"
] | |
ea95c7db4fb850cc6077673dd32f9a4f378b59e3 | 2bbf5e5e002f7a76a063f70fde58151d09e7a155 | /routepang/category/LocationCategory.py | abcfd231bfc1aec8c910ff11e749d5e70eebb29b | [] | no_license | jongwonleee/routepang_cwroller | a68de6b946763d84bf6f455e7f4f8b69dff6f076 | 8b7e453562719455b847e7dec7eca16c04d4f64a | refs/heads/master | 2022-03-13T10:15:58.933765 | 2019-11-18T08:31:00 | 2019-11-18T08:31:00 | 224,374,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,035 | py | # TODO ---------- 카테고리 목록 ------------
# UNKNOWN : 0
# ATTRACTION : 1
# FOOD : 2
# ACTIVITY : 3
# SHOPPING : 4
# TRAFFIC : 5
# RELIGIOUS : 6
# MEDICAL : 7
# PUBLIC : 8
# LODGE : 9
# ENTERTAINMENT : 10
# UTILITY : 11
# SERVICE : 12
class LocationCategory:
def __init__(self):
self.category = {
"accounting" : 0,
"airport" : 5,
"amusement_park" : 3,
"aquarium" : 3,
"art_gallery" : 3,
"atm" : 11,
"bakery" : 2,
"bank" : 11,
"bar" : 2,
"beauty_salon" : 12,
"bicycle_store" : 4,
"book_store" : 4,
"bowling_alley" : 10,
"bus_station" : 5,
"cafe" : 2,
"campground" : 3,
"car_dealer" : 4,
"car_rental" : 11,
"car_repair" : 0,
"car_wash" : 0,
"casino" : 10,
"cemetery" : 0,
"church" : 6,
"city_hall" : 8,
"clothing_store" : 4,
"convenience_store" : 4,
"courthouse" : 8,
"dentist" : 7,
"department_store" : 4,
"doctor" : 7,
"drugstore" : 4,
"electrician" : 0,
"electronics_store" : 4,
"embassy" : 11,
"fire_station" : 0,
"florist" : 4,
"funeral_home" : 0,
"furniture_store" : 4,
"gas_station" : 11,
"grocery_or_supermarket" : 4,
"gym" : 3,
"hair_care" : 12,
"hardware_store" : 4,
"hindu_temple" : 6,
"home_goods_store" : 4,
"hospital" : 7,
"insurance_agency" : 0,
"jewelry_store" : 4,
"laundry" : 11,
"lawyer" : 0,
"library" : 1,
"light_rail_station" : 5,
"liquor_store" : 4,
"local_government_office" : 8,
"locksmith" : 0,
"lodging" : 9,
"meal_delivery" : 2,
"meal_takeaway" : 2,
"mosque" : 6,
"movie_rental" : 10,
"movie_theater" : 10,
"moving_company" : 0,
"museum" : 3,
"night_club" : 10,
"painter" : 0,
"park" : 8,
"parking" : 11,
"pet_store" : 4,
"pharmacy" : 7,
"physiotherapist" : 7,
"plumber" : 0,
"police" : 8,
"post_office" : 8,
"primary_school" : 0,
"real_estate_agency" : 0,
"restaurant" : 2,
"roofing_contractor" : 0,
"rv_park" : 3,
"school" : 0,
"secondary_school" : 0,
"shoe_store" : 4,
"shopping_mall" : 4,
"spa" : 12,
"stadium" : 3,
"storage" : 0,
"store" : 4,
"subway_station" : 5,
"supermarket" : 4,
"synagogue" : 6,
"taxi_stand" : 5,
"tourist_attraction" : 1,
"train_station" : 5,
"transit_station" : 5,
"travel_agency" : 3,
"university" : 8,
"veterinary_care" : 7,
"zoo" : 3,
# additional category
"administrative_area_level_1" : 0,
"administrative_area_level_2" : 0,
"administrative_area_level_3" : 0,
"administrative_area_level_4" : 0,
"administrative_area_level_5" : 0,
"archipelago" : 0,
"colloquial_area" : 0,
"continent" : 0,
"country" : 0,
"establishment" : 2,
"finance" : 11,
"floor" : 0,
"food" : 2,
"general_contractor" : 0,
"geocode" : 0,
"health" : 7,
"intersection" : 0,
"locality" : 0,
"natural_feature" : 1,
"neighborhood" : 0,
"place_of_worship" : 6,
"point_of_interest" : 1,
"political" : 0,
"post_box" : 0,
"postal_code" : 8,
"postal_code_prefix" : 0,
"postal_code_suffix" : 0,
"postal_town" : 8,
"premise" : 0,
"room" : 0,
"route" : 0,
"street_address" : 0,
"street_number" : 0,
"sublocality" : 0,
"sublocality_level_1" : 0,
"sublocality_level_2" : 0,
"sublocality_level_3" : 0,
"sublocality_level_4" : 0,
"sublocality_level_5" : 0,
"subpremise" : 0,
}
# 구글의 카테고리를 루팡의 카테고리로 변경
def getCategoryNo(self, request):
category_num = self.category[request]
return category_num | [
"[email protected]"
] | |
2b48076d8241d82087750d770207f6e0920c5197 | 5cb637edf5dbe61b2619d0cc8cd5a45f02a4eed0 | /ApiZiDongHua/venv/Scripts/easy_install-script.py | 7546adc6b585a22a3a47e88c65618261b81173b8 | [] | no_license | cyl19920314/TestMode | af7bcce9c665f84d069fc61f82295b41d69f88f4 | 505bcb3f2edef3ce97f9e73668ea75ad19a12adc | refs/heads/master | 2020-08-27T23:15:41.497163 | 2019-10-25T11:57:24 | 2019-10-25T11:57:24 | 217,517,054 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | #!D:\pythonProject\demo1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.