path
stringlengths 14
112
| content
stringlengths 0
6.32M
| size
int64 0
6.32M
| max_lines
int64 1
100k
| repo_name
stringclasses 2
values | autogenerated
bool 1
class |
---|---|---|---|---|---|
cosmopolitan/third_party/python/Lib/test/decimaltestdata/ddCompare.decTest | ------------------------------------------------------------------------
-- ddCompare.decTest -- decDouble comparison that allows quiet NaNs --
-- Copyright (c) IBM Corporation, 1981, 2008. All rights reserved. --
------------------------------------------------------------------------
-- Please see the document "General Decimal Arithmetic Testcases" --
-- at http://www2.hursley.ibm.com/decimal for the description of --
-- these testcases. --
-- --
-- These testcases are experimental ('beta' versions), and they --
-- may contain errors. They are offered on an as-is basis. In --
-- particular, achieving the same results as the tests here is not --
-- a guarantee that an implementation complies with any Standard --
-- or specification. The tests are not exhaustive. --
-- --
-- Please send comments, suggestions, and corrections to the author: --
-- Mike Cowlishaw, IBM Fellow --
-- IBM UK, PO Box 31, Birmingham Road, Warwick CV34 5JL, UK --
-- [email protected] --
------------------------------------------------------------------------
version: 2.59
-- Note that we cannot assume add/subtract tests cover paths adequately,
-- here, because the code might be quite different (comparison cannot
-- overflow or underflow, so actual subtractions are not necessary).
-- All operands and results are decDoubles.
precision: 16
maxExponent: 384
minExponent: -383
extended: 1
clamp: 1
rounding: half_even
-- sanity checks
ddcom001 compare -2 -2 -> 0
ddcom002 compare -2 -1 -> -1
ddcom003 compare -2 0 -> -1
ddcom004 compare -2 1 -> -1
ddcom005 compare -2 2 -> -1
ddcom006 compare -1 -2 -> 1
ddcom007 compare -1 -1 -> 0
ddcom008 compare -1 0 -> -1
ddcom009 compare -1 1 -> -1
ddcom010 compare -1 2 -> -1
ddcom011 compare 0 -2 -> 1
ddcom012 compare 0 -1 -> 1
ddcom013 compare 0 0 -> 0
ddcom014 compare 0 1 -> -1
ddcom015 compare 0 2 -> -1
ddcom016 compare 1 -2 -> 1
ddcom017 compare 1 -1 -> 1
ddcom018 compare 1 0 -> 1
ddcom019 compare 1 1 -> 0
ddcom020 compare 1 2 -> -1
ddcom021 compare 2 -2 -> 1
ddcom022 compare 2 -1 -> 1
ddcom023 compare 2 0 -> 1
ddcom025 compare 2 1 -> 1
ddcom026 compare 2 2 -> 0
ddcom031 compare -20 -20 -> 0
ddcom032 compare -20 -10 -> -1
ddcom033 compare -20 00 -> -1
ddcom034 compare -20 10 -> -1
ddcom035 compare -20 20 -> -1
ddcom036 compare -10 -20 -> 1
ddcom037 compare -10 -10 -> 0
ddcom038 compare -10 00 -> -1
ddcom039 compare -10 10 -> -1
ddcom040 compare -10 20 -> -1
ddcom041 compare 00 -20 -> 1
ddcom042 compare 00 -10 -> 1
ddcom043 compare 00 00 -> 0
ddcom044 compare 00 10 -> -1
ddcom045 compare 00 20 -> -1
ddcom046 compare 10 -20 -> 1
ddcom047 compare 10 -10 -> 1
ddcom048 compare 10 00 -> 1
ddcom049 compare 10 10 -> 0
ddcom050 compare 10 20 -> -1
ddcom051 compare 20 -20 -> 1
ddcom052 compare 20 -10 -> 1
ddcom053 compare 20 00 -> 1
ddcom055 compare 20 10 -> 1
ddcom056 compare 20 20 -> 0
ddcom061 compare -2.0 -2.0 -> 0
ddcom062 compare -2.0 -1.0 -> -1
ddcom063 compare -2.0 0.0 -> -1
ddcom064 compare -2.0 1.0 -> -1
ddcom065 compare -2.0 2.0 -> -1
ddcom066 compare -1.0 -2.0 -> 1
ddcom067 compare -1.0 -1.0 -> 0
ddcom068 compare -1.0 0.0 -> -1
ddcom069 compare -1.0 1.0 -> -1
ddcom070 compare -1.0 2.0 -> -1
ddcom071 compare 0.0 -2.0 -> 1
ddcom072 compare 0.0 -1.0 -> 1
ddcom073 compare 0.0 0.0 -> 0
ddcom074 compare 0.0 1.0 -> -1
ddcom075 compare 0.0 2.0 -> -1
ddcom076 compare 1.0 -2.0 -> 1
ddcom077 compare 1.0 -1.0 -> 1
ddcom078 compare 1.0 0.0 -> 1
ddcom079 compare 1.0 1.0 -> 0
ddcom080 compare 1.0 2.0 -> -1
ddcom081 compare 2.0 -2.0 -> 1
ddcom082 compare 2.0 -1.0 -> 1
ddcom083 compare 2.0 0.0 -> 1
ddcom085 compare 2.0 1.0 -> 1
ddcom086 compare 2.0 2.0 -> 0
ddcom087 compare 1.0 0.1 -> 1
ddcom088 compare 0.1 1.0 -> -1
-- now some cases which might overflow if subtract were used
ddcom095 compare 9.999999999999999E+384 9.999999999999999E+384 -> 0
ddcom096 compare -9.999999999999999E+384 9.999999999999999E+384 -> -1
ddcom097 compare 9.999999999999999E+384 -9.999999999999999E+384 -> 1
ddcom098 compare -9.999999999999999E+384 -9.999999999999999E+384 -> 0
-- some differing length/exponent cases
ddcom100 compare 7.0 7.0 -> 0
ddcom101 compare 7.0 7 -> 0
ddcom102 compare 7 7.0 -> 0
ddcom103 compare 7E+0 7.0 -> 0
ddcom104 compare 70E-1 7.0 -> 0
ddcom105 compare 0.7E+1 7 -> 0
ddcom106 compare 70E-1 7 -> 0
ddcom107 compare 7.0 7E+0 -> 0
ddcom108 compare 7.0 70E-1 -> 0
ddcom109 compare 7 0.7E+1 -> 0
ddcom110 compare 7 70E-1 -> 0
ddcom120 compare 8.0 7.0 -> 1
ddcom121 compare 8.0 7 -> 1
ddcom122 compare 8 7.0 -> 1
ddcom123 compare 8E+0 7.0 -> 1
ddcom124 compare 80E-1 7.0 -> 1
ddcom125 compare 0.8E+1 7 -> 1
ddcom126 compare 80E-1 7 -> 1
ddcom127 compare 8.0 7E+0 -> 1
ddcom128 compare 8.0 70E-1 -> 1
ddcom129 compare 8 0.7E+1 -> 1
ddcom130 compare 8 70E-1 -> 1
ddcom140 compare 8.0 9.0 -> -1
ddcom141 compare 8.0 9 -> -1
ddcom142 compare 8 9.0 -> -1
ddcom143 compare 8E+0 9.0 -> -1
ddcom144 compare 80E-1 9.0 -> -1
ddcom145 compare 0.8E+1 9 -> -1
ddcom146 compare 80E-1 9 -> -1
ddcom147 compare 8.0 9E+0 -> -1
ddcom148 compare 8.0 90E-1 -> -1
ddcom149 compare 8 0.9E+1 -> -1
ddcom150 compare 8 90E-1 -> -1
-- and again, with sign changes -+ ..
ddcom200 compare -7.0 7.0 -> -1
ddcom201 compare -7.0 7 -> -1
ddcom202 compare -7 7.0 -> -1
ddcom203 compare -7E+0 7.0 -> -1
ddcom204 compare -70E-1 7.0 -> -1
ddcom205 compare -0.7E+1 7 -> -1
ddcom206 compare -70E-1 7 -> -1
ddcom207 compare -7.0 7E+0 -> -1
ddcom208 compare -7.0 70E-1 -> -1
ddcom209 compare -7 0.7E+1 -> -1
ddcom210 compare -7 70E-1 -> -1
ddcom220 compare -8.0 7.0 -> -1
ddcom221 compare -8.0 7 -> -1
ddcom222 compare -8 7.0 -> -1
ddcom223 compare -8E+0 7.0 -> -1
ddcom224 compare -80E-1 7.0 -> -1
ddcom225 compare -0.8E+1 7 -> -1
ddcom226 compare -80E-1 7 -> -1
ddcom227 compare -8.0 7E+0 -> -1
ddcom228 compare -8.0 70E-1 -> -1
ddcom229 compare -8 0.7E+1 -> -1
ddcom230 compare -8 70E-1 -> -1
ddcom240 compare -8.0 9.0 -> -1
ddcom241 compare -8.0 9 -> -1
ddcom242 compare -8 9.0 -> -1
ddcom243 compare -8E+0 9.0 -> -1
ddcom244 compare -80E-1 9.0 -> -1
ddcom245 compare -0.8E+1 9 -> -1
ddcom246 compare -80E-1 9 -> -1
ddcom247 compare -8.0 9E+0 -> -1
ddcom248 compare -8.0 90E-1 -> -1
ddcom249 compare -8 0.9E+1 -> -1
ddcom250 compare -8 90E-1 -> -1
-- and again, with sign changes +- ..
ddcom300 compare 7.0 -7.0 -> 1
ddcom301 compare 7.0 -7 -> 1
ddcom302 compare 7 -7.0 -> 1
ddcom303 compare 7E+0 -7.0 -> 1
ddcom304 compare 70E-1 -7.0 -> 1
ddcom305 compare .7E+1 -7 -> 1
ddcom306 compare 70E-1 -7 -> 1
ddcom307 compare 7.0 -7E+0 -> 1
ddcom308 compare 7.0 -70E-1 -> 1
ddcom309 compare 7 -.7E+1 -> 1
ddcom310 compare 7 -70E-1 -> 1
ddcom320 compare 8.0 -7.0 -> 1
ddcom321 compare 8.0 -7 -> 1
ddcom322 compare 8 -7.0 -> 1
ddcom323 compare 8E+0 -7.0 -> 1
ddcom324 compare 80E-1 -7.0 -> 1
ddcom325 compare .8E+1 -7 -> 1
ddcom326 compare 80E-1 -7 -> 1
ddcom327 compare 8.0 -7E+0 -> 1
ddcom328 compare 8.0 -70E-1 -> 1
ddcom329 compare 8 -.7E+1 -> 1
ddcom330 compare 8 -70E-1 -> 1
ddcom340 compare 8.0 -9.0 -> 1
ddcom341 compare 8.0 -9 -> 1
ddcom342 compare 8 -9.0 -> 1
ddcom343 compare 8E+0 -9.0 -> 1
ddcom344 compare 80E-1 -9.0 -> 1
ddcom345 compare .8E+1 -9 -> 1
ddcom346 compare 80E-1 -9 -> 1
ddcom347 compare 8.0 -9E+0 -> 1
ddcom348 compare 8.0 -90E-1 -> 1
ddcom349 compare 8 -.9E+1 -> 1
ddcom350 compare 8 -90E-1 -> 1
-- and again, with sign changes -- ..
ddcom400 compare -7.0 -7.0 -> 0
ddcom401 compare -7.0 -7 -> 0
ddcom402 compare -7 -7.0 -> 0
ddcom403 compare -7E+0 -7.0 -> 0
ddcom404 compare -70E-1 -7.0 -> 0
ddcom405 compare -.7E+1 -7 -> 0
ddcom406 compare -70E-1 -7 -> 0
ddcom407 compare -7.0 -7E+0 -> 0
ddcom408 compare -7.0 -70E-1 -> 0
ddcom409 compare -7 -.7E+1 -> 0
ddcom410 compare -7 -70E-1 -> 0
ddcom420 compare -8.0 -7.0 -> -1
ddcom421 compare -8.0 -7 -> -1
ddcom422 compare -8 -7.0 -> -1
ddcom423 compare -8E+0 -7.0 -> -1
ddcom424 compare -80E-1 -7.0 -> -1
ddcom425 compare -.8E+1 -7 -> -1
ddcom426 compare -80E-1 -7 -> -1
ddcom427 compare -8.0 -7E+0 -> -1
ddcom428 compare -8.0 -70E-1 -> -1
ddcom429 compare -8 -.7E+1 -> -1
ddcom430 compare -8 -70E-1 -> -1
ddcom440 compare -8.0 -9.0 -> 1
ddcom441 compare -8.0 -9 -> 1
ddcom442 compare -8 -9.0 -> 1
ddcom443 compare -8E+0 -9.0 -> 1
ddcom444 compare -80E-1 -9.0 -> 1
ddcom445 compare -.8E+1 -9 -> 1
ddcom446 compare -80E-1 -9 -> 1
ddcom447 compare -8.0 -9E+0 -> 1
ddcom448 compare -8.0 -90E-1 -> 1
ddcom449 compare -8 -.9E+1 -> 1
ddcom450 compare -8 -90E-1 -> 1
-- misalignment traps for little-endian
ddcom451 compare 1.0 0.1 -> 1
ddcom452 compare 0.1 1.0 -> -1
ddcom453 compare 10.0 0.1 -> 1
ddcom454 compare 0.1 10.0 -> -1
ddcom455 compare 100 1.0 -> 1
ddcom456 compare 1.0 100 -> -1
ddcom457 compare 1000 10.0 -> 1
ddcom458 compare 10.0 1000 -> -1
ddcom459 compare 10000 100.0 -> 1
ddcom460 compare 100.0 10000 -> -1
ddcom461 compare 100000 1000.0 -> 1
ddcom462 compare 1000.0 100000 -> -1
ddcom463 compare 1000000 10000.0 -> 1
ddcom464 compare 10000.0 1000000 -> -1
-- testcases that subtract to lots of zeros at boundaries [pgr]
ddcom473 compare 123.4560000000000E-89 123.456E-89 -> 0
ddcom474 compare 123.456000000000E+89 123.456E+89 -> 0
ddcom475 compare 123.45600000000E-89 123.456E-89 -> 0
ddcom476 compare 123.4560000000E+89 123.456E+89 -> 0
ddcom477 compare 123.456000000E-89 123.456E-89 -> 0
ddcom478 compare 123.45600000E+89 123.456E+89 -> 0
ddcom479 compare 123.4560000E-89 123.456E-89 -> 0
ddcom480 compare 123.456000E+89 123.456E+89 -> 0
ddcom481 compare 123.45600E-89 123.456E-89 -> 0
ddcom482 compare 123.4560E+89 123.456E+89 -> 0
ddcom483 compare 123.456E-89 123.456E-89 -> 0
ddcom487 compare 123.456E+89 123.4560000000000E+89 -> 0
ddcom488 compare 123.456E-89 123.456000000000E-89 -> 0
ddcom489 compare 123.456E+89 123.45600000000E+89 -> 0
ddcom490 compare 123.456E-89 123.4560000000E-89 -> 0
ddcom491 compare 123.456E+89 123.456000000E+89 -> 0
ddcom492 compare 123.456E-89 123.45600000E-89 -> 0
ddcom493 compare 123.456E+89 123.4560000E+89 -> 0
ddcom494 compare 123.456E-89 123.456000E-89 -> 0
ddcom495 compare 123.456E+89 123.45600E+89 -> 0
ddcom496 compare 123.456E-89 123.4560E-89 -> 0
ddcom497 compare 123.456E+89 123.456E+89 -> 0
-- wide-ranging, around precision; signs equal
ddcom500 compare 1 1E-15 -> 1
ddcom501 compare 1 1E-14 -> 1
ddcom502 compare 1 1E-13 -> 1
ddcom503 compare 1 1E-12 -> 1
ddcom504 compare 1 1E-11 -> 1
ddcom505 compare 1 1E-10 -> 1
ddcom506 compare 1 1E-9 -> 1
ddcom507 compare 1 1E-8 -> 1
ddcom508 compare 1 1E-7 -> 1
ddcom509 compare 1 1E-6 -> 1
ddcom510 compare 1 1E-5 -> 1
ddcom511 compare 1 1E-4 -> 1
ddcom512 compare 1 1E-3 -> 1
ddcom513 compare 1 1E-2 -> 1
ddcom514 compare 1 1E-1 -> 1
ddcom515 compare 1 1E-0 -> 0
ddcom516 compare 1 1E+1 -> -1
ddcom517 compare 1 1E+2 -> -1
ddcom518 compare 1 1E+3 -> -1
ddcom519 compare 1 1E+4 -> -1
ddcom521 compare 1 1E+5 -> -1
ddcom522 compare 1 1E+6 -> -1
ddcom523 compare 1 1E+7 -> -1
ddcom524 compare 1 1E+8 -> -1
ddcom525 compare 1 1E+9 -> -1
ddcom526 compare 1 1E+10 -> -1
ddcom527 compare 1 1E+11 -> -1
ddcom528 compare 1 1E+12 -> -1
ddcom529 compare 1 1E+13 -> -1
ddcom530 compare 1 1E+14 -> -1
ddcom531 compare 1 1E+15 -> -1
-- LR swap
ddcom540 compare 1E-15 1 -> -1
ddcom541 compare 1E-14 1 -> -1
ddcom542 compare 1E-13 1 -> -1
ddcom543 compare 1E-12 1 -> -1
ddcom544 compare 1E-11 1 -> -1
ddcom545 compare 1E-10 1 -> -1
ddcom546 compare 1E-9 1 -> -1
ddcom547 compare 1E-8 1 -> -1
ddcom548 compare 1E-7 1 -> -1
ddcom549 compare 1E-6 1 -> -1
ddcom550 compare 1E-5 1 -> -1
ddcom551 compare 1E-4 1 -> -1
ddcom552 compare 1E-3 1 -> -1
ddcom553 compare 1E-2 1 -> -1
ddcom554 compare 1E-1 1 -> -1
ddcom555 compare 1E-0 1 -> 0
ddcom556 compare 1E+1 1 -> 1
ddcom557 compare 1E+2 1 -> 1
ddcom558 compare 1E+3 1 -> 1
ddcom559 compare 1E+4 1 -> 1
ddcom561 compare 1E+5 1 -> 1
ddcom562 compare 1E+6 1 -> 1
ddcom563 compare 1E+7 1 -> 1
ddcom564 compare 1E+8 1 -> 1
ddcom565 compare 1E+9 1 -> 1
ddcom566 compare 1E+10 1 -> 1
ddcom567 compare 1E+11 1 -> 1
ddcom568 compare 1E+12 1 -> 1
ddcom569 compare 1E+13 1 -> 1
ddcom570 compare 1E+14 1 -> 1
ddcom571 compare 1E+15 1 -> 1
-- similar with a useful coefficient, one side only
ddcom580 compare 0.000000987654321 1E-15 -> 1
ddcom581 compare 0.000000987654321 1E-14 -> 1
ddcom582 compare 0.000000987654321 1E-13 -> 1
ddcom583 compare 0.000000987654321 1E-12 -> 1
ddcom584 compare 0.000000987654321 1E-11 -> 1
ddcom585 compare 0.000000987654321 1E-10 -> 1
ddcom586 compare 0.000000987654321 1E-9 -> 1
ddcom587 compare 0.000000987654321 1E-8 -> 1
ddcom588 compare 0.000000987654321 1E-7 -> 1
ddcom589 compare 0.000000987654321 1E-6 -> -1
ddcom590 compare 0.000000987654321 1E-5 -> -1
ddcom591 compare 0.000000987654321 1E-4 -> -1
ddcom592 compare 0.000000987654321 1E-3 -> -1
ddcom593 compare 0.000000987654321 1E-2 -> -1
ddcom594 compare 0.000000987654321 1E-1 -> -1
ddcom595 compare 0.000000987654321 1E-0 -> -1
ddcom596 compare 0.000000987654321 1E+1 -> -1
ddcom597 compare 0.000000987654321 1E+2 -> -1
ddcom598 compare 0.000000987654321 1E+3 -> -1
ddcom599 compare 0.000000987654321 1E+4 -> -1
-- check some unit-y traps
ddcom600 compare 12 12.2345 -> -1
ddcom601 compare 12.0 12.2345 -> -1
ddcom602 compare 12.00 12.2345 -> -1
ddcom603 compare 12.000 12.2345 -> -1
ddcom604 compare 12.0000 12.2345 -> -1
ddcom605 compare 12.00000 12.2345 -> -1
ddcom606 compare 12.000000 12.2345 -> -1
ddcom607 compare 12.0000000 12.2345 -> -1
ddcom608 compare 12.00000000 12.2345 -> -1
ddcom609 compare 12.000000000 12.2345 -> -1
ddcom610 compare 12.1234 12 -> 1
ddcom611 compare 12.1234 12.0 -> 1
ddcom612 compare 12.1234 12.00 -> 1
ddcom613 compare 12.1234 12.000 -> 1
ddcom614 compare 12.1234 12.0000 -> 1
ddcom615 compare 12.1234 12.00000 -> 1
ddcom616 compare 12.1234 12.000000 -> 1
ddcom617 compare 12.1234 12.0000000 -> 1
ddcom618 compare 12.1234 12.00000000 -> 1
ddcom619 compare 12.1234 12.000000000 -> 1
ddcom620 compare -12 -12.2345 -> 1
ddcom621 compare -12.0 -12.2345 -> 1
ddcom622 compare -12.00 -12.2345 -> 1
ddcom623 compare -12.000 -12.2345 -> 1
ddcom624 compare -12.0000 -12.2345 -> 1
ddcom625 compare -12.00000 -12.2345 -> 1
ddcom626 compare -12.000000 -12.2345 -> 1
ddcom627 compare -12.0000000 -12.2345 -> 1
ddcom628 compare -12.00000000 -12.2345 -> 1
ddcom629 compare -12.000000000 -12.2345 -> 1
ddcom630 compare -12.1234 -12 -> -1
ddcom631 compare -12.1234 -12.0 -> -1
ddcom632 compare -12.1234 -12.00 -> -1
ddcom633 compare -12.1234 -12.000 -> -1
ddcom634 compare -12.1234 -12.0000 -> -1
ddcom635 compare -12.1234 -12.00000 -> -1
ddcom636 compare -12.1234 -12.000000 -> -1
ddcom637 compare -12.1234 -12.0000000 -> -1
ddcom638 compare -12.1234 -12.00000000 -> -1
ddcom639 compare -12.1234 -12.000000000 -> -1
-- extended zeros
ddcom640 compare 0 0 -> 0
ddcom641 compare 0 -0 -> 0
ddcom642 compare 0 -0.0 -> 0
ddcom643 compare 0 0.0 -> 0
ddcom644 compare -0 0 -> 0
ddcom645 compare -0 -0 -> 0
ddcom646 compare -0 -0.0 -> 0
ddcom647 compare -0 0.0 -> 0
ddcom648 compare 0.0 0 -> 0
ddcom649 compare 0.0 -0 -> 0
ddcom650 compare 0.0 -0.0 -> 0
ddcom651 compare 0.0 0.0 -> 0
ddcom652 compare -0.0 0 -> 0
ddcom653 compare -0.0 -0 -> 0
ddcom654 compare -0.0 -0.0 -> 0
ddcom655 compare -0.0 0.0 -> 0
ddcom656 compare -0E1 0.0 -> 0
ddcom657 compare -0E2 0.0 -> 0
ddcom658 compare 0E1 0.0 -> 0
ddcom659 compare 0E2 0.0 -> 0
ddcom660 compare -0E1 0 -> 0
ddcom661 compare -0E2 0 -> 0
ddcom662 compare 0E1 0 -> 0
ddcom663 compare 0E2 0 -> 0
ddcom664 compare -0E1 -0E1 -> 0
ddcom665 compare -0E2 -0E1 -> 0
ddcom666 compare 0E1 -0E1 -> 0
ddcom667 compare 0E2 -0E1 -> 0
ddcom668 compare -0E1 -0E2 -> 0
ddcom669 compare -0E2 -0E2 -> 0
ddcom670 compare 0E1 -0E2 -> 0
ddcom671 compare 0E2 -0E2 -> 0
ddcom672 compare -0E1 0E1 -> 0
ddcom673 compare -0E2 0E1 -> 0
ddcom674 compare 0E1 0E1 -> 0
ddcom675 compare 0E2 0E1 -> 0
ddcom676 compare -0E1 0E2 -> 0
ddcom677 compare -0E2 0E2 -> 0
ddcom678 compare 0E1 0E2 -> 0
ddcom679 compare 0E2 0E2 -> 0
-- trailing zeros; unit-y
ddcom680 compare 12 12 -> 0
ddcom681 compare 12 12.0 -> 0
ddcom682 compare 12 12.00 -> 0
ddcom683 compare 12 12.000 -> 0
ddcom684 compare 12 12.0000 -> 0
ddcom685 compare 12 12.00000 -> 0
ddcom686 compare 12 12.000000 -> 0
ddcom687 compare 12 12.0000000 -> 0
ddcom688 compare 12 12.00000000 -> 0
ddcom689 compare 12 12.000000000 -> 0
ddcom690 compare 12 12 -> 0
ddcom691 compare 12.0 12 -> 0
ddcom692 compare 12.00 12 -> 0
ddcom693 compare 12.000 12 -> 0
ddcom694 compare 12.0000 12 -> 0
ddcom695 compare 12.00000 12 -> 0
ddcom696 compare 12.000000 12 -> 0
ddcom697 compare 12.0000000 12 -> 0
ddcom698 compare 12.00000000 12 -> 0
ddcom699 compare 12.000000000 12 -> 0
-- first, second, & last digit
ddcom700 compare 1234567890123456 1234567890123455 -> 1
ddcom701 compare 1234567890123456 1234567890123456 -> 0
ddcom702 compare 1234567890123456 1234567890123457 -> -1
ddcom703 compare 1234567890123456 0234567890123456 -> 1
ddcom704 compare 1234567890123456 1234567890123456 -> 0
ddcom705 compare 1234567890123456 2234567890123456 -> -1
ddcom706 compare 1134567890123456 1034567890123456 -> 1
ddcom707 compare 1134567890123456 1134567890123456 -> 0
ddcom708 compare 1134567890123456 1234567890123456 -> -1
-- miscellaneous
ddcom721 compare 12345678000 1 -> 1
ddcom722 compare 1 12345678000 -> -1
ddcom723 compare 1234567800 1 -> 1
ddcom724 compare 1 1234567800 -> -1
ddcom725 compare 1234567890 1 -> 1
ddcom726 compare 1 1234567890 -> -1
ddcom727 compare 1234567891 1 -> 1
ddcom728 compare 1 1234567891 -> -1
ddcom729 compare 12345678901 1 -> 1
ddcom730 compare 1 12345678901 -> -1
ddcom731 compare 1234567896 1 -> 1
ddcom732 compare 1 1234567896 -> -1
-- residue cases at lower precision
ddcom740 compare 1 0.9999999 -> 1
ddcom741 compare 1 0.999999 -> 1
ddcom742 compare 1 0.99999 -> 1
ddcom743 compare 1 1.0000 -> 0
ddcom744 compare 1 1.00001 -> -1
ddcom745 compare 1 1.000001 -> -1
ddcom746 compare 1 1.0000001 -> -1
ddcom750 compare 0.9999999 1 -> -1
ddcom751 compare 0.999999 1 -> -1
ddcom752 compare 0.99999 1 -> -1
ddcom753 compare 1.0000 1 -> 0
ddcom754 compare 1.00001 1 -> 1
ddcom755 compare 1.000001 1 -> 1
ddcom756 compare 1.0000001 1 -> 1
-- Specials
ddcom780 compare Inf -Inf -> 1
ddcom781 compare Inf -1000 -> 1
ddcom782 compare Inf -1 -> 1
ddcom783 compare Inf -0 -> 1
ddcom784 compare Inf 0 -> 1
ddcom785 compare Inf 1 -> 1
ddcom786 compare Inf 1000 -> 1
ddcom787 compare Inf Inf -> 0
ddcom788 compare -1000 Inf -> -1
ddcom789 compare -Inf Inf -> -1
ddcom790 compare -1 Inf -> -1
ddcom791 compare -0 Inf -> -1
ddcom792 compare 0 Inf -> -1
ddcom793 compare 1 Inf -> -1
ddcom794 compare 1000 Inf -> -1
ddcom795 compare Inf Inf -> 0
ddcom800 compare -Inf -Inf -> 0
ddcom801 compare -Inf -1000 -> -1
ddcom802 compare -Inf -1 -> -1
ddcom803 compare -Inf -0 -> -1
ddcom804 compare -Inf 0 -> -1
ddcom805 compare -Inf 1 -> -1
ddcom806 compare -Inf 1000 -> -1
ddcom807 compare -Inf Inf -> -1
ddcom808 compare -Inf -Inf -> 0
ddcom809 compare -1000 -Inf -> 1
ddcom810 compare -1 -Inf -> 1
ddcom811 compare -0 -Inf -> 1
ddcom812 compare 0 -Inf -> 1
ddcom813 compare 1 -Inf -> 1
ddcom814 compare 1000 -Inf -> 1
ddcom815 compare Inf -Inf -> 1
ddcom821 compare NaN -Inf -> NaN
ddcom822 compare NaN -1000 -> NaN
ddcom823 compare NaN -1 -> NaN
ddcom824 compare NaN -0 -> NaN
ddcom825 compare NaN 0 -> NaN
ddcom826 compare NaN 1 -> NaN
ddcom827 compare NaN 1000 -> NaN
ddcom828 compare NaN Inf -> NaN
ddcom829 compare NaN NaN -> NaN
ddcom830 compare -Inf NaN -> NaN
ddcom831 compare -1000 NaN -> NaN
ddcom832 compare -1 NaN -> NaN
ddcom833 compare -0 NaN -> NaN
ddcom834 compare 0 NaN -> NaN
ddcom835 compare 1 NaN -> NaN
ddcom836 compare 1000 NaN -> NaN
ddcom837 compare Inf NaN -> NaN
ddcom838 compare -NaN -NaN -> -NaN
ddcom839 compare +NaN -NaN -> NaN
ddcom840 compare -NaN +NaN -> -NaN
ddcom841 compare sNaN -Inf -> NaN Invalid_operation
ddcom842 compare sNaN -1000 -> NaN Invalid_operation
ddcom843 compare sNaN -1 -> NaN Invalid_operation
ddcom844 compare sNaN -0 -> NaN Invalid_operation
ddcom845 compare sNaN 0 -> NaN Invalid_operation
ddcom846 compare sNaN 1 -> NaN Invalid_operation
ddcom847 compare sNaN 1000 -> NaN Invalid_operation
ddcom848 compare sNaN NaN -> NaN Invalid_operation
ddcom849 compare sNaN sNaN -> NaN Invalid_operation
ddcom850 compare NaN sNaN -> NaN Invalid_operation
ddcom851 compare -Inf sNaN -> NaN Invalid_operation
ddcom852 compare -1000 sNaN -> NaN Invalid_operation
ddcom853 compare -1 sNaN -> NaN Invalid_operation
ddcom854 compare -0 sNaN -> NaN Invalid_operation
ddcom855 compare 0 sNaN -> NaN Invalid_operation
ddcom856 compare 1 sNaN -> NaN Invalid_operation
ddcom857 compare 1000 sNaN -> NaN Invalid_operation
ddcom858 compare Inf sNaN -> NaN Invalid_operation
ddcom859 compare NaN sNaN -> NaN Invalid_operation
-- propagating NaNs
ddcom860 compare NaN9 -Inf -> NaN9
ddcom861 compare NaN8 999 -> NaN8
ddcom862 compare NaN77 Inf -> NaN77
ddcom863 compare -NaN67 NaN5 -> -NaN67
ddcom864 compare -Inf -NaN4 -> -NaN4
ddcom865 compare -999 -NaN33 -> -NaN33
ddcom866 compare Inf NaN2 -> NaN2
ddcom867 compare -NaN41 -NaN42 -> -NaN41
ddcom868 compare +NaN41 -NaN42 -> NaN41
ddcom869 compare -NaN41 +NaN42 -> -NaN41
ddcom870 compare +NaN41 +NaN42 -> NaN41
ddcom871 compare -sNaN99 -Inf -> -NaN99 Invalid_operation
ddcom872 compare sNaN98 -11 -> NaN98 Invalid_operation
ddcom873 compare sNaN97 NaN -> NaN97 Invalid_operation
ddcom874 compare sNaN16 sNaN94 -> NaN16 Invalid_operation
ddcom875 compare NaN85 sNaN83 -> NaN83 Invalid_operation
ddcom876 compare -Inf sNaN92 -> NaN92 Invalid_operation
ddcom877 compare 088 sNaN81 -> NaN81 Invalid_operation
ddcom878 compare Inf sNaN90 -> NaN90 Invalid_operation
ddcom879 compare NaN -sNaN89 -> -NaN89 Invalid_operation
-- wide range
ddcom880 compare +1.23456789012345E-0 9E+384 -> -1
ddcom881 compare 9E+384 +1.23456789012345E-0 -> 1
ddcom882 compare +0.100 9E-383 -> 1
ddcom883 compare 9E-383 +0.100 -> -1
ddcom885 compare -1.23456789012345E-0 9E+384 -> -1
ddcom886 compare 9E+384 -1.23456789012345E-0 -> 1
ddcom887 compare -0.100 9E-383 -> -1
ddcom888 compare 9E-383 -0.100 -> 1
-- spread zeros
ddcom900 compare 0E-383 0 -> 0
ddcom901 compare 0E-383 -0 -> 0
ddcom902 compare -0E-383 0 -> 0
ddcom903 compare -0E-383 -0 -> 0
ddcom904 compare 0E-383 0E+384 -> 0
ddcom905 compare 0E-383 -0E+384 -> 0
ddcom906 compare -0E-383 0E+384 -> 0
ddcom907 compare -0E-383 -0E+384 -> 0
ddcom908 compare 0 0E+384 -> 0
ddcom909 compare 0 -0E+384 -> 0
ddcom910 compare -0 0E+384 -> 0
ddcom911 compare -0 -0E+384 -> 0
ddcom930 compare 0E+384 0 -> 0
ddcom931 compare 0E+384 -0 -> 0
ddcom932 compare -0E+384 0 -> 0
ddcom933 compare -0E+384 -0 -> 0
ddcom934 compare 0E+384 0E-383 -> 0
ddcom935 compare 0E+384 -0E-383 -> 0
ddcom936 compare -0E+384 0E-383 -> 0
ddcom937 compare -0E+384 -0E-383 -> 0
ddcom938 compare 0 0E-383 -> 0
ddcom939 compare 0 -0E-383 -> 0
ddcom940 compare -0 0E-383 -> 0
ddcom941 compare -0 -0E-383 -> 0
-- signs
ddcom961 compare 1e+77 1e+11 -> 1
ddcom962 compare 1e+77 -1e+11 -> 1
ddcom963 compare -1e+77 1e+11 -> -1
ddcom964 compare -1e+77 -1e+11 -> -1
ddcom965 compare 1e-77 1e-11 -> -1
ddcom966 compare 1e-77 -1e-11 -> 1
ddcom967 compare -1e-77 1e-11 -> -1
ddcom968 compare -1e-77 -1e-11 -> 1
-- full alignment range, both ways
ddcomp1001 compare 1 1.000000000000000 -> 0
ddcomp1002 compare 1 1.00000000000000 -> 0
ddcomp1003 compare 1 1.0000000000000 -> 0
ddcomp1004 compare 1 1.000000000000 -> 0
ddcomp1005 compare 1 1.00000000000 -> 0
ddcomp1006 compare 1 1.0000000000 -> 0
ddcomp1007 compare 1 1.000000000 -> 0
ddcomp1008 compare 1 1.00000000 -> 0
ddcomp1009 compare 1 1.0000000 -> 0
ddcomp1010 compare 1 1.000000 -> 0
ddcomp1011 compare 1 1.00000 -> 0
ddcomp1012 compare 1 1.0000 -> 0
ddcomp1013 compare 1 1.000 -> 0
ddcomp1014 compare 1 1.00 -> 0
ddcomp1015 compare 1 1.0 -> 0
ddcomp1021 compare 1.000000000000000 1 -> 0
ddcomp1022 compare 1.00000000000000 1 -> 0
ddcomp1023 compare 1.0000000000000 1 -> 0
ddcomp1024 compare 1.000000000000 1 -> 0
ddcomp1025 compare 1.00000000000 1 -> 0
ddcomp1026 compare 1.0000000000 1 -> 0
ddcomp1027 compare 1.000000000 1 -> 0
ddcomp1028 compare 1.00000000 1 -> 0
ddcomp1029 compare 1.0000000 1 -> 0
ddcomp1030 compare 1.000000 1 -> 0
ddcomp1031 compare 1.00000 1 -> 0
ddcomp1032 compare 1.0000 1 -> 0
ddcomp1033 compare 1.000 1 -> 0
ddcomp1034 compare 1.00 1 -> 0
ddcomp1035 compare 1.0 1 -> 0
-- check MSD always detected non-zero
ddcomp1040 compare 0 0.000000000000000 -> 0
ddcomp1041 compare 0 1.000000000000000 -> -1
ddcomp1042 compare 0 2.000000000000000 -> -1
ddcomp1043 compare 0 3.000000000000000 -> -1
ddcomp1044 compare 0 4.000000000000000 -> -1
ddcomp1045 compare 0 5.000000000000000 -> -1
ddcomp1046 compare 0 6.000000000000000 -> -1
ddcomp1047 compare 0 7.000000000000000 -> -1
ddcomp1048 compare 0 8.000000000000000 -> -1
ddcomp1049 compare 0 9.000000000000000 -> -1
ddcomp1050 compare 0.000000000000000 0 -> 0
ddcomp1051 compare 1.000000000000000 0 -> 1
ddcomp1052 compare 2.000000000000000 0 -> 1
ddcomp1053 compare 3.000000000000000 0 -> 1
ddcomp1054 compare 4.000000000000000 0 -> 1
ddcomp1055 compare 5.000000000000000 0 -> 1
ddcomp1056 compare 6.000000000000000 0 -> 1
ddcomp1057 compare 7.000000000000000 0 -> 1
ddcomp1058 compare 8.000000000000000 0 -> 1
ddcomp1059 compare 9.000000000000000 0 -> 1
-- Null tests
ddcom9990 compare 10 # -> NaN Invalid_operation
ddcom9991 compare # 10 -> NaN Invalid_operation
| 29,538 | 745 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/decimaltestdata/ddAnd.decTest | ------------------------------------------------------------------------
-- ddAnd.decTest -- digitwise logical AND for decDoubles --
-- Copyright (c) IBM Corporation, 1981, 2008. All rights reserved. --
------------------------------------------------------------------------
-- Please see the document "General Decimal Arithmetic Testcases" --
-- at http://www2.hursley.ibm.com/decimal for the description of --
-- these testcases. --
-- --
-- These testcases are experimental ('beta' versions), and they --
-- may contain errors. They are offered on an as-is basis. In --
-- particular, achieving the same results as the tests here is not --
-- a guarantee that an implementation complies with any Standard --
-- or specification. The tests are not exhaustive. --
-- --
-- Please send comments, suggestions, and corrections to the author: --
-- Mike Cowlishaw, IBM Fellow --
-- IBM UK, PO Box 31, Birmingham Road, Warwick CV34 5JL, UK --
-- [email protected] --
------------------------------------------------------------------------
version: 2.59
precision: 16
maxExponent: 384
minExponent: -383
extended: 1
clamp: 1
rounding: half_even
-- Sanity check (truth table)
ddand001 and 0 0 -> 0
ddand002 and 0 1 -> 0
ddand003 and 1 0 -> 0
ddand004 and 1 1 -> 1
ddand005 and 1100 1010 -> 1000
-- and at msd and msd-1
-- 1234567890123456 1234567890123456 1234567890123456
ddand006 and 0000000000000000 0000000000000000 -> 0
ddand007 and 0000000000000000 1000000000000000 -> 0
ddand008 and 1000000000000000 0000000000000000 -> 0
ddand009 and 1000000000000000 1000000000000000 -> 1000000000000000
ddand010 and 0000000000000000 0000000000000000 -> 0
ddand011 and 0000000000000000 0100000000000000 -> 0
ddand012 and 0100000000000000 0000000000000000 -> 0
ddand013 and 0100000000000000 0100000000000000 -> 100000000000000
-- Various lengths
-- 1234567890123456 1234567890123456 1234567890123456
ddand021 and 1111111111111111 1111111111111111 -> 1111111111111111
ddand024 and 1111111111111111 111111111111111 -> 111111111111111
ddand025 and 1111111111111111 11111111111111 -> 11111111111111
ddand026 and 1111111111111111 1111111111111 -> 1111111111111
ddand027 and 1111111111111111 111111111111 -> 111111111111
ddand028 and 1111111111111111 11111111111 -> 11111111111
ddand029 and 1111111111111111 1111111111 -> 1111111111
ddand030 and 1111111111111111 111111111 -> 111111111
ddand031 and 1111111111111111 11111111 -> 11111111
ddand032 and 1111111111111111 1111111 -> 1111111
ddand033 and 1111111111111111 111111 -> 111111
ddand034 and 1111111111111111 11111 -> 11111
ddand035 and 1111111111111111 1111 -> 1111
ddand036 and 1111111111111111 111 -> 111
ddand037 and 1111111111111111 11 -> 11
ddand038 and 1111111111111111 1 -> 1
ddand039 and 1111111111111111 0 -> 0
ddand040 and 1111111111111111 1111111111111111 -> 1111111111111111
ddand041 and 111111111111111 1111111111111111 -> 111111111111111
ddand042 and 111111111111111 1111111111111111 -> 111111111111111
ddand043 and 11111111111111 1111111111111111 -> 11111111111111
ddand044 and 1111111111111 1111111111111111 -> 1111111111111
ddand045 and 111111111111 1111111111111111 -> 111111111111
ddand046 and 11111111111 1111111111111111 -> 11111111111
ddand047 and 1111111111 1111111111111111 -> 1111111111
ddand048 and 111111111 1111111111111111 -> 111111111
ddand049 and 11111111 1111111111111111 -> 11111111
ddand050 and 1111111 1111111111111111 -> 1111111
ddand051 and 111111 1111111111111111 -> 111111
ddand052 and 11111 1111111111111111 -> 11111
ddand053 and 1111 1111111111111111 -> 1111
ddand054 and 111 1111111111111111 -> 111
ddand055 and 11 1111111111111111 -> 11
ddand056 and 1 1111111111111111 -> 1
ddand057 and 0 1111111111111111 -> 0
ddand150 and 1111111111 1 -> 1
ddand151 and 111111111 1 -> 1
ddand152 and 11111111 1 -> 1
ddand153 and 1111111 1 -> 1
ddand154 and 111111 1 -> 1
ddand155 and 11111 1 -> 1
ddand156 and 1111 1 -> 1
ddand157 and 111 1 -> 1
ddand158 and 11 1 -> 1
ddand159 and 1 1 -> 1
ddand160 and 1111111111 0 -> 0
ddand161 and 111111111 0 -> 0
ddand162 and 11111111 0 -> 0
ddand163 and 1111111 0 -> 0
ddand164 and 111111 0 -> 0
ddand165 and 11111 0 -> 0
ddand166 and 1111 0 -> 0
ddand167 and 111 0 -> 0
ddand168 and 11 0 -> 0
ddand169 and 1 0 -> 0
ddand170 and 1 1111111111 -> 1
ddand171 and 1 111111111 -> 1
ddand172 and 1 11111111 -> 1
ddand173 and 1 1111111 -> 1
ddand174 and 1 111111 -> 1
ddand175 and 1 11111 -> 1
ddand176 and 1 1111 -> 1
ddand177 and 1 111 -> 1
ddand178 and 1 11 -> 1
ddand179 and 1 1 -> 1
ddand180 and 0 1111111111 -> 0
ddand181 and 0 111111111 -> 0
ddand182 and 0 11111111 -> 0
ddand183 and 0 1111111 -> 0
ddand184 and 0 111111 -> 0
ddand185 and 0 11111 -> 0
ddand186 and 0 1111 -> 0
ddand187 and 0 111 -> 0
ddand188 and 0 11 -> 0
ddand189 and 0 1 -> 0
ddand090 and 011111111 111111111 -> 11111111
ddand091 and 101111111 111111111 -> 101111111
ddand092 and 110111111 111111111 -> 110111111
ddand093 and 111011111 111111111 -> 111011111
ddand094 and 111101111 111111111 -> 111101111
ddand095 and 111110111 111111111 -> 111110111
ddand096 and 111111011 111111111 -> 111111011
ddand097 and 111111101 111111111 -> 111111101
ddand098 and 111111110 111111111 -> 111111110
ddand100 and 111111111 011111111 -> 11111111
ddand101 and 111111111 101111111 -> 101111111
ddand102 and 111111111 110111111 -> 110111111
ddand103 and 111111111 111011111 -> 111011111
ddand104 and 111111111 111101111 -> 111101111
ddand105 and 111111111 111110111 -> 111110111
ddand106 and 111111111 111111011 -> 111111011
ddand107 and 111111111 111111101 -> 111111101
ddand108 and 111111111 111111110 -> 111111110
-- non-0/1 should not be accepted, nor should signs
ddand220 and 111111112 111111111 -> NaN Invalid_operation
ddand221 and 333333333 333333333 -> NaN Invalid_operation
ddand222 and 555555555 555555555 -> NaN Invalid_operation
ddand223 and 777777777 777777777 -> NaN Invalid_operation
ddand224 and 999999999 999999999 -> NaN Invalid_operation
ddand225 and 222222222 999999999 -> NaN Invalid_operation
ddand226 and 444444444 999999999 -> NaN Invalid_operation
ddand227 and 666666666 999999999 -> NaN Invalid_operation
ddand228 and 888888888 999999999 -> NaN Invalid_operation
ddand229 and 999999999 222222222 -> NaN Invalid_operation
ddand230 and 999999999 444444444 -> NaN Invalid_operation
ddand231 and 999999999 666666666 -> NaN Invalid_operation
ddand232 and 999999999 888888888 -> NaN Invalid_operation
-- a few randoms
ddand240 and 567468689 -934981942 -> NaN Invalid_operation
ddand241 and 567367689 934981942 -> NaN Invalid_operation
ddand242 and -631917772 -706014634 -> NaN Invalid_operation
ddand243 and -756253257 138579234 -> NaN Invalid_operation
ddand244 and 835590149 567435400 -> NaN Invalid_operation
-- test MSD
ddand250 and 2000000000000000 1000000000000000 -> NaN Invalid_operation
ddand251 and 7000000000000000 1000000000000000 -> NaN Invalid_operation
ddand252 and 8000000000000000 1000000000000000 -> NaN Invalid_operation
ddand253 and 9000000000000000 1000000000000000 -> NaN Invalid_operation
ddand254 and 2000000000000000 0000000000000000 -> NaN Invalid_operation
ddand255 and 7000000000000000 0000000000000000 -> NaN Invalid_operation
ddand256 and 8000000000000000 0000000000000000 -> NaN Invalid_operation
ddand257 and 9000000000000000 0000000000000000 -> NaN Invalid_operation
ddand258 and 1000000000000000 2000000000000000 -> NaN Invalid_operation
ddand259 and 1000000000000000 7000000000000000 -> NaN Invalid_operation
ddand260 and 1000000000000000 8000000000000000 -> NaN Invalid_operation
ddand261 and 1000000000000000 9000000000000000 -> NaN Invalid_operation
ddand262 and 0000000000000000 2000000000000000 -> NaN Invalid_operation
ddand263 and 0000000000000000 7000000000000000 -> NaN Invalid_operation
ddand264 and 0000000000000000 8000000000000000 -> NaN Invalid_operation
ddand265 and 0000000000000000 9000000000000000 -> NaN Invalid_operation
-- test MSD-1
ddand270 and 0200001000000000 1000100000000010 -> NaN Invalid_operation
ddand271 and 0700000100000000 1000010000000100 -> NaN Invalid_operation
ddand272 and 0800000010000000 1000001000001000 -> NaN Invalid_operation
ddand273 and 0900000001000000 1000000100010000 -> NaN Invalid_operation
ddand274 and 1000000000100000 0200000010100000 -> NaN Invalid_operation
ddand275 and 1000000000010000 0700000001000000 -> NaN Invalid_operation
ddand276 and 1000000000001000 0800000010100000 -> NaN Invalid_operation
ddand277 and 1000000000000100 0900000000010000 -> NaN Invalid_operation
-- test LSD
ddand280 and 0010000000000002 1000000100000001 -> NaN Invalid_operation
ddand281 and 0001000000000007 1000001000000011 -> NaN Invalid_operation
ddand282 and 0000100000000008 1000010000000001 -> NaN Invalid_operation
ddand283 and 0000010000000009 1000100000000001 -> NaN Invalid_operation
ddand284 and 1000001000000000 0001000000000002 -> NaN Invalid_operation
ddand285 and 1000000100000000 0010000000000007 -> NaN Invalid_operation
ddand286 and 1000000010000000 0100000000000008 -> NaN Invalid_operation
ddand287 and 1000000001000000 1000000000000009 -> NaN Invalid_operation
-- test Middie
ddand288 and 0010000020000000 1000001000000000 -> NaN Invalid_operation
ddand289 and 0001000070000001 1000000100000000 -> NaN Invalid_operation
ddand290 and 0000100080000010 1000000010000000 -> NaN Invalid_operation
ddand291 and 0000010090000100 1000000001000000 -> NaN Invalid_operation
ddand292 and 1000001000001000 0000000020100000 -> NaN Invalid_operation
ddand293 and 1000000100010000 0000000070010000 -> NaN Invalid_operation
ddand294 and 1000000010100000 0000000080001000 -> NaN Invalid_operation
ddand295 and 1000000001000000 0000000090000100 -> NaN Invalid_operation
-- signs
ddand296 and -1000000001000000 -0000010000000100 -> NaN Invalid_operation
ddand297 and -1000000001000000 0000000010000100 -> NaN Invalid_operation
ddand298 and 1000000001000000 -0000001000000100 -> NaN Invalid_operation
ddand299 and 1000000001000000 0000000011000100 -> 1000000
-- Nmax, Nmin, Ntiny-like
ddand331 and 2 9.99999999E+199 -> NaN Invalid_operation
ddand332 and 3 1E-199 -> NaN Invalid_operation
ddand333 and 4 1.00000000E-199 -> NaN Invalid_operation
ddand334 and 5 1E-100 -> NaN Invalid_operation
ddand335 and 6 -1E-100 -> NaN Invalid_operation
ddand336 and 7 -1.00000000E-199 -> NaN Invalid_operation
ddand337 and 8 -1E-199 -> NaN Invalid_operation
ddand338 and 9 -9.99999999E+199 -> NaN Invalid_operation
ddand341 and 9.99999999E+199 -18 -> NaN Invalid_operation
ddand342 and 1E-199 01 -> NaN Invalid_operation
ddand343 and 1.00000000E-199 -18 -> NaN Invalid_operation
ddand344 and 1E-100 18 -> NaN Invalid_operation
ddand345 and -1E-100 -10 -> NaN Invalid_operation
ddand346 and -1.00000000E-199 18 -> NaN Invalid_operation
ddand347 and -1E-199 10 -> NaN Invalid_operation
ddand348 and -9.99999999E+199 -18 -> NaN Invalid_operation
-- A few other non-integers
ddand361 and 1.0 1 -> NaN Invalid_operation
ddand362 and 1E+1 1 -> NaN Invalid_operation
ddand363 and 0.0 1 -> NaN Invalid_operation
ddand364 and 0E+1 1 -> NaN Invalid_operation
ddand365 and 9.9 1 -> NaN Invalid_operation
ddand366 and 9E+1 1 -> NaN Invalid_operation
ddand371 and 0 1.0 -> NaN Invalid_operation
ddand372 and 0 1E+1 -> NaN Invalid_operation
ddand373 and 0 0.0 -> NaN Invalid_operation
ddand374 and 0 0E+1 -> NaN Invalid_operation
ddand375 and 0 9.9 -> NaN Invalid_operation
ddand376 and 0 9E+1 -> NaN Invalid_operation
-- All Specials are in error
ddand780 and -Inf -Inf -> NaN Invalid_operation
ddand781 and -Inf -1000 -> NaN Invalid_operation
ddand782 and -Inf -1 -> NaN Invalid_operation
ddand783 and -Inf -0 -> NaN Invalid_operation
ddand784 and -Inf 0 -> NaN Invalid_operation
ddand785 and -Inf 1 -> NaN Invalid_operation
ddand786 and -Inf 1000 -> NaN Invalid_operation
ddand787 and -1000 -Inf -> NaN Invalid_operation
ddand788 and -Inf -Inf -> NaN Invalid_operation
ddand789 and -1 -Inf -> NaN Invalid_operation
ddand790 and -0 -Inf -> NaN Invalid_operation
ddand791 and 0 -Inf -> NaN Invalid_operation
ddand792 and 1 -Inf -> NaN Invalid_operation
ddand793 and 1000 -Inf -> NaN Invalid_operation
ddand794 and Inf -Inf -> NaN Invalid_operation
ddand800 and Inf -Inf -> NaN Invalid_operation
ddand801 and Inf -1000 -> NaN Invalid_operation
ddand802 and Inf -1 -> NaN Invalid_operation
ddand803 and Inf -0 -> NaN Invalid_operation
ddand804 and Inf 0 -> NaN Invalid_operation
ddand805 and Inf 1 -> NaN Invalid_operation
ddand806 and Inf 1000 -> NaN Invalid_operation
ddand807 and Inf Inf -> NaN Invalid_operation
ddand808 and -1000 Inf -> NaN Invalid_operation
ddand809 and -Inf Inf -> NaN Invalid_operation
ddand810 and -1 Inf -> NaN Invalid_operation
ddand811 and -0 Inf -> NaN Invalid_operation
ddand812 and 0 Inf -> NaN Invalid_operation
ddand813 and 1 Inf -> NaN Invalid_operation
ddand814 and 1000 Inf -> NaN Invalid_operation
ddand815 and Inf Inf -> NaN Invalid_operation
ddand821 and NaN -Inf -> NaN Invalid_operation
ddand822 and NaN -1000 -> NaN Invalid_operation
ddand823 and NaN -1 -> NaN Invalid_operation
ddand824 and NaN -0 -> NaN Invalid_operation
ddand825 and NaN 0 -> NaN Invalid_operation
ddand826 and NaN 1 -> NaN Invalid_operation
ddand827 and NaN 1000 -> NaN Invalid_operation
ddand828 and NaN Inf -> NaN Invalid_operation
ddand829 and NaN NaN -> NaN Invalid_operation
ddand830 and -Inf NaN -> NaN Invalid_operation
ddand831 and -1000 NaN -> NaN Invalid_operation
ddand832 and -1 NaN -> NaN Invalid_operation
ddand833 and -0 NaN -> NaN Invalid_operation
ddand834 and 0 NaN -> NaN Invalid_operation
ddand835 and 1 NaN -> NaN Invalid_operation
ddand836 and 1000 NaN -> NaN Invalid_operation
ddand837 and Inf NaN -> NaN Invalid_operation
ddand841 and sNaN -Inf -> NaN Invalid_operation
ddand842 and sNaN -1000 -> NaN Invalid_operation
ddand843 and sNaN -1 -> NaN Invalid_operation
ddand844 and sNaN -0 -> NaN Invalid_operation
ddand845 and sNaN 0 -> NaN Invalid_operation
ddand846 and sNaN 1 -> NaN Invalid_operation
ddand847 and sNaN 1000 -> NaN Invalid_operation
ddand848 and sNaN NaN -> NaN Invalid_operation
ddand849 and sNaN sNaN -> NaN Invalid_operation
ddand850 and NaN sNaN -> NaN Invalid_operation
ddand851 and -Inf sNaN -> NaN Invalid_operation
ddand852 and -1000 sNaN -> NaN Invalid_operation
ddand853 and -1 sNaN -> NaN Invalid_operation
ddand854 and -0 sNaN -> NaN Invalid_operation
ddand855 and 0 sNaN -> NaN Invalid_operation
ddand856 and 1 sNaN -> NaN Invalid_operation
ddand857 and 1000 sNaN -> NaN Invalid_operation
ddand858 and Inf sNaN -> NaN Invalid_operation
ddand859 and NaN sNaN -> NaN Invalid_operation
-- propagating NaNs
ddand861 and NaN1 -Inf -> NaN Invalid_operation
ddand862 and +NaN2 -1000 -> NaN Invalid_operation
ddand863 and NaN3 1000 -> NaN Invalid_operation
ddand864 and NaN4 Inf -> NaN Invalid_operation
ddand865 and NaN5 +NaN6 -> NaN Invalid_operation
ddand866 and -Inf NaN7 -> NaN Invalid_operation
ddand867 and -1000 NaN8 -> NaN Invalid_operation
ddand868 and 1000 NaN9 -> NaN Invalid_operation
ddand869 and Inf +NaN10 -> NaN Invalid_operation
ddand871 and sNaN11 -Inf -> NaN Invalid_operation
ddand872 and sNaN12 -1000 -> NaN Invalid_operation
ddand873 and sNaN13 1000 -> NaN Invalid_operation
ddand874 and sNaN14 NaN17 -> NaN Invalid_operation
ddand875 and sNaN15 sNaN18 -> NaN Invalid_operation
ddand876 and NaN16 sNaN19 -> NaN Invalid_operation
ddand877 and -Inf +sNaN20 -> NaN Invalid_operation
ddand878 and -1000 sNaN21 -> NaN Invalid_operation
ddand879 and 1000 sNaN22 -> NaN Invalid_operation
ddand880 and Inf sNaN23 -> NaN Invalid_operation
ddand881 and +NaN25 +sNaN24 -> NaN Invalid_operation
ddand882 and -NaN26 NaN28 -> NaN Invalid_operation
ddand883 and -sNaN27 sNaN29 -> NaN Invalid_operation
ddand884 and 1000 -NaN30 -> NaN Invalid_operation
ddand885 and 1000 -sNaN31 -> NaN Invalid_operation
| 18,272 | 348 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/decimaltestdata/dqCopy.decTest | ------------------------------------------------------------------------
-- dqCopy.decTest -- quiet decQuad copy --
-- Copyright (c) IBM Corporation, 1981, 2008. All rights reserved. --
------------------------------------------------------------------------
-- Please see the document "General Decimal Arithmetic Testcases" --
-- at http://www2.hursley.ibm.com/decimal for the description of --
-- these testcases. --
-- --
-- These testcases are experimental ('beta' versions), and they --
-- may contain errors. They are offered on an as-is basis. In --
-- particular, achieving the same results as the tests here is not --
-- a guarantee that an implementation complies with any Standard --
-- or specification. The tests are not exhaustive. --
-- --
-- Please send comments, suggestions, and corrections to the author: --
-- Mike Cowlishaw, IBM Fellow --
-- IBM UK, PO Box 31, Birmingham Road, Warwick CV34 5JL, UK --
-- [email protected] --
------------------------------------------------------------------------
version: 2.59
-- All operands and results are decQuads.
extended: 1
clamp: 1
precision: 34
maxExponent: 6144
minExponent: -6143
rounding: half_even
-- Sanity check
dqcpy001 copy +7.50 -> 7.50
-- Infinities
dqcpy011 copy Infinity -> Infinity
dqcpy012 copy -Infinity -> -Infinity
-- NaNs, 0 payload
dqcpy021 copy NaN -> NaN
dqcpy022 copy -NaN -> -NaN
dqcpy023 copy sNaN -> sNaN
dqcpy024 copy -sNaN -> -sNaN
-- NaNs, non-0 payload
dqcpy031 copy NaN10 -> NaN10
dqcpy032 copy -NaN10 -> -NaN10
dqcpy033 copy sNaN10 -> sNaN10
dqcpy034 copy -sNaN10 -> -sNaN10
dqcpy035 copy NaN7 -> NaN7
dqcpy036 copy -NaN7 -> -NaN7
dqcpy037 copy sNaN101 -> sNaN101
dqcpy038 copy -sNaN101 -> -sNaN101
-- finites
dqcpy101 copy 7 -> 7
dqcpy102 copy -7 -> -7
dqcpy103 copy 75 -> 75
dqcpy104 copy -75 -> -75
dqcpy105 copy 7.50 -> 7.50
dqcpy106 copy -7.50 -> -7.50
dqcpy107 copy 7.500 -> 7.500
dqcpy108 copy -7.500 -> -7.500
-- zeros
dqcpy111 copy 0 -> 0
dqcpy112 copy -0 -> -0
dqcpy113 copy 0E+4 -> 0E+4
dqcpy114 copy -0E+4 -> -0E+4
dqcpy115 copy 0.0000 -> 0.0000
dqcpy116 copy -0.0000 -> -0.0000
dqcpy117 copy 0E-141 -> 0E-141
dqcpy118 copy -0E-141 -> -0E-141
-- full coefficients, alternating bits
dqcpy121 copy 2682682682682682682682682682682682 -> 2682682682682682682682682682682682
dqcpy122 copy -2682682682682682682682682682682682 -> -2682682682682682682682682682682682
dqcpy123 copy 1341341341341341341341341341341341 -> 1341341341341341341341341341341341
dqcpy124 copy -1341341341341341341341341341341341 -> -1341341341341341341341341341341341
-- Nmax, Nmin, Ntiny
dqcpy131 copy 9.999999999999999999999999999999999E+6144 -> 9.999999999999999999999999999999999E+6144
dqcpy132 copy 1E-6143 -> 1E-6143
dqcpy133 copy 1.000000000000000000000000000000000E-6143 -> 1.000000000000000000000000000000000E-6143
dqcpy134 copy 1E-6176 -> 1E-6176
dqcpy135 copy -1E-6176 -> -1E-6176
dqcpy136 copy -1.000000000000000000000000000000000E-6143 -> -1.000000000000000000000000000000000E-6143
dqcpy137 copy -1E-6143 -> -1E-6143
dqcpy138 copy -9.999999999999999999999999999999999E+6144 -> -9.999999999999999999999999999999999E+6144
| 3,899 | 89 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/decimaltestdata/ddAdd.decTest | ------------------------------------------------------------------------
-- ddAdd.decTest -- decDouble addition --
-- Copyright (c) IBM Corporation, 1981, 2008. All rights reserved. --
------------------------------------------------------------------------
-- Please see the document "General Decimal Arithmetic Testcases" --
-- at http://www2.hursley.ibm.com/decimal for the description of --
-- these testcases. --
-- --
-- These testcases are experimental ('beta' versions), and they --
-- may contain errors. They are offered on an as-is basis. In --
-- particular, achieving the same results as the tests here is not --
-- a guarantee that an implementation complies with any Standard --
-- or specification. The tests are not exhaustive. --
-- --
-- Please send comments, suggestions, and corrections to the author: --
-- Mike Cowlishaw, IBM Fellow --
-- IBM UK, PO Box 31, Birmingham Road, Warwick CV34 5JL, UK --
-- [email protected] --
------------------------------------------------------------------------
version: 2.59
-- This set of tests are for decDoubles only; all arguments are
-- representable in a decDouble
precision: 16
maxExponent: 384
minExponent: -383
extended: 1
clamp: 1
rounding: half_even
-- [first group are 'quick confidence check']
ddadd001 add 1 1 -> 2
ddadd002 add 2 3 -> 5
ddadd003 add '5.75' '3.3' -> 9.05
ddadd004 add '5' '-3' -> 2
ddadd005 add '-5' '-3' -> -8
ddadd006 add '-7' '2.5' -> -4.5
ddadd007 add '0.7' '0.3' -> 1.0
ddadd008 add '1.25' '1.25' -> 2.50
ddadd009 add '1.23456789' '1.00000000' -> '2.23456789'
ddadd010 add '1.23456789' '1.00000011' -> '2.23456800'
-- 1234567890123456 1234567890123456
ddadd011 add '0.4444444444444446' '0.5555555555555555' -> '1.000000000000000' Inexact Rounded
ddadd012 add '0.4444444444444445' '0.5555555555555555' -> '1.000000000000000' Rounded
ddadd013 add '0.4444444444444444' '0.5555555555555555' -> '0.9999999999999999'
ddadd014 add '4444444444444444' '0.49' -> '4444444444444444' Inexact Rounded
ddadd015 add '4444444444444444' '0.499' -> '4444444444444444' Inexact Rounded
ddadd016 add '4444444444444444' '0.4999' -> '4444444444444444' Inexact Rounded
ddadd017 add '4444444444444444' '0.5000' -> '4444444444444444' Inexact Rounded
ddadd018 add '4444444444444444' '0.5001' -> '4444444444444445' Inexact Rounded
ddadd019 add '4444444444444444' '0.501' -> '4444444444444445' Inexact Rounded
ddadd020 add '4444444444444444' '0.51' -> '4444444444444445' Inexact Rounded
ddadd021 add 0 1 -> 1
ddadd022 add 1 1 -> 2
ddadd023 add 2 1 -> 3
ddadd024 add 3 1 -> 4
ddadd025 add 4 1 -> 5
ddadd026 add 5 1 -> 6
ddadd027 add 6 1 -> 7
ddadd028 add 7 1 -> 8
ddadd029 add 8 1 -> 9
ddadd030 add 9 1 -> 10
-- some carrying effects
ddadd031 add '0.9998' '0.0000' -> '0.9998'
ddadd032 add '0.9998' '0.0001' -> '0.9999'
ddadd033 add '0.9998' '0.0002' -> '1.0000'
ddadd034 add '0.9998' '0.0003' -> '1.0001'
ddadd035 add '70' '10000e+16' -> '1.000000000000000E+20' Inexact Rounded
ddadd036 add '700' '10000e+16' -> '1.000000000000000E+20' Inexact Rounded
ddadd037 add '7000' '10000e+16' -> '1.000000000000000E+20' Inexact Rounded
ddadd038 add '70000' '10000e+16' -> '1.000000000000001E+20' Inexact Rounded
ddadd039 add '700000' '10000e+16' -> '1.000000000000007E+20' Rounded
-- symmetry:
ddadd040 add '10000e+16' '70' -> '1.000000000000000E+20' Inexact Rounded
ddadd041 add '10000e+16' '700' -> '1.000000000000000E+20' Inexact Rounded
ddadd042 add '10000e+16' '7000' -> '1.000000000000000E+20' Inexact Rounded
ddadd044 add '10000e+16' '70000' -> '1.000000000000001E+20' Inexact Rounded
ddadd045 add '10000e+16' '700000' -> '1.000000000000007E+20' Rounded
-- same, without rounding
ddadd046 add '10000e+9' '7' -> '10000000000007'
ddadd047 add '10000e+9' '70' -> '10000000000070'
ddadd048 add '10000e+9' '700' -> '10000000000700'
ddadd049 add '10000e+9' '7000' -> '10000000007000'
ddadd050 add '10000e+9' '70000' -> '10000000070000'
ddadd051 add '10000e+9' '700000' -> '10000000700000'
ddadd052 add '10000e+9' '7000000' -> '10000007000000'
-- examples from decarith
ddadd053 add '12' '7.00' -> '19.00'
ddadd054 add '1.3' '-1.07' -> '0.23'
ddadd055 add '1.3' '-1.30' -> '0.00'
ddadd056 add '1.3' '-2.07' -> '-0.77'
ddadd057 add '1E+2' '1E+4' -> '1.01E+4'
-- leading zero preservation
ddadd061 add 1 '0.0001' -> '1.0001'
ddadd062 add 1 '0.00001' -> '1.00001'
ddadd063 add 1 '0.000001' -> '1.000001'
ddadd064 add 1 '0.0000001' -> '1.0000001'
ddadd065 add 1 '0.00000001' -> '1.00000001'
-- some funny zeros [in case of bad signum]
ddadd070 add 1 0 -> 1
ddadd071 add 1 0. -> 1
ddadd072 add 1 .0 -> 1.0
ddadd073 add 1 0.0 -> 1.0
ddadd074 add 1 0.00 -> 1.00
ddadd075 add 0 1 -> 1
ddadd076 add 0. 1 -> 1
ddadd077 add .0 1 -> 1.0
ddadd078 add 0.0 1 -> 1.0
ddadd079 add 0.00 1 -> 1.00
-- some carries
ddadd080 add 999999998 1 -> 999999999
ddadd081 add 999999999 1 -> 1000000000
ddadd082 add 99999999 1 -> 100000000
ddadd083 add 9999999 1 -> 10000000
ddadd084 add 999999 1 -> 1000000
ddadd085 add 99999 1 -> 100000
ddadd086 add 9999 1 -> 10000
ddadd087 add 999 1 -> 1000
ddadd088 add 99 1 -> 100
ddadd089 add 9 1 -> 10
-- more LHS swaps
ddadd090 add '-56267E-10' 0 -> '-0.0000056267'
ddadd091 add '-56267E-6' 0 -> '-0.056267'
ddadd092 add '-56267E-5' 0 -> '-0.56267'
ddadd093 add '-56267E-4' 0 -> '-5.6267'
ddadd094 add '-56267E-3' 0 -> '-56.267'
ddadd095 add '-56267E-2' 0 -> '-562.67'
ddadd096 add '-56267E-1' 0 -> '-5626.7'
ddadd097 add '-56267E-0' 0 -> '-56267'
ddadd098 add '-5E-10' 0 -> '-5E-10'
ddadd099 add '-5E-7' 0 -> '-5E-7'
ddadd100 add '-5E-6' 0 -> '-0.000005'
ddadd101 add '-5E-5' 0 -> '-0.00005'
ddadd102 add '-5E-4' 0 -> '-0.0005'
ddadd103 add '-5E-1' 0 -> '-0.5'
ddadd104 add '-5E0' 0 -> '-5'
ddadd105 add '-5E1' 0 -> '-50'
ddadd106 add '-5E5' 0 -> '-500000'
ddadd107 add '-5E15' 0 -> '-5000000000000000'
ddadd108 add '-5E16' 0 -> '-5.000000000000000E+16' Rounded
ddadd109 add '-5E17' 0 -> '-5.000000000000000E+17' Rounded
ddadd110 add '-5E18' 0 -> '-5.000000000000000E+18' Rounded
ddadd111 add '-5E100' 0 -> '-5.000000000000000E+100' Rounded
-- more RHS swaps
ddadd113 add 0 '-56267E-10' -> '-0.0000056267'
ddadd114 add 0 '-56267E-6' -> '-0.056267'
ddadd116 add 0 '-56267E-5' -> '-0.56267'
ddadd117 add 0 '-56267E-4' -> '-5.6267'
ddadd119 add 0 '-56267E-3' -> '-56.267'
ddadd120 add 0 '-56267E-2' -> '-562.67'
ddadd121 add 0 '-56267E-1' -> '-5626.7'
ddadd122 add 0 '-56267E-0' -> '-56267'
ddadd123 add 0 '-5E-10' -> '-5E-10'
ddadd124 add 0 '-5E-7' -> '-5E-7'
ddadd125 add 0 '-5E-6' -> '-0.000005'
ddadd126 add 0 '-5E-5' -> '-0.00005'
ddadd127 add 0 '-5E-4' -> '-0.0005'
ddadd128 add 0 '-5E-1' -> '-0.5'
ddadd129 add 0 '-5E0' -> '-5'
ddadd130 add 0 '-5E1' -> '-50'
ddadd131 add 0 '-5E5' -> '-500000'
ddadd132 add 0 '-5E15' -> '-5000000000000000'
ddadd133 add 0 '-5E16' -> '-5.000000000000000E+16' Rounded
ddadd134 add 0 '-5E17' -> '-5.000000000000000E+17' Rounded
ddadd135 add 0 '-5E18' -> '-5.000000000000000E+18' Rounded
ddadd136 add 0 '-5E100' -> '-5.000000000000000E+100' Rounded
-- related
ddadd137 add 1 '0E-19' -> '1.000000000000000' Rounded
ddadd138 add -1 '0E-19' -> '-1.000000000000000' Rounded
ddadd139 add '0E-19' 1 -> '1.000000000000000' Rounded
ddadd140 add '0E-19' -1 -> '-1.000000000000000' Rounded
ddadd141 add 1E+11 0.0000 -> '100000000000.0000'
ddadd142 add 1E+11 0.00000 -> '100000000000.0000' Rounded
ddadd143 add 0.000 1E+12 -> '1000000000000.000'
ddadd144 add 0.0000 1E+12 -> '1000000000000.000' Rounded
-- [some of the next group are really constructor tests]
ddadd146 add '00.0' 0 -> '0.0'
ddadd147 add '0.00' 0 -> '0.00'
ddadd148 add 0 '0.00' -> '0.00'
ddadd149 add 0 '00.0' -> '0.0'
ddadd150 add '00.0' '0.00' -> '0.00'
ddadd151 add '0.00' '00.0' -> '0.00'
ddadd152 add '3' '.3' -> '3.3'
ddadd153 add '3.' '.3' -> '3.3'
ddadd154 add '3.0' '.3' -> '3.3'
ddadd155 add '3.00' '.3' -> '3.30'
ddadd156 add '3' '3' -> '6'
ddadd157 add '3' '+3' -> '6'
ddadd158 add '3' '-3' -> '0'
ddadd159 add '0.3' '-0.3' -> '0.0'
ddadd160 add '0.03' '-0.03' -> '0.00'
-- try borderline precision, with carries, etc.
ddadd161 add '1E+12' '-1' -> '999999999999'
ddadd162 add '1E+12' '1.11' -> '1000000000001.11'
ddadd163 add '1.11' '1E+12' -> '1000000000001.11'
ddadd164 add '-1' '1E+12' -> '999999999999'
ddadd165 add '7E+12' '-1' -> '6999999999999'
ddadd166 add '7E+12' '1.11' -> '7000000000001.11'
ddadd167 add '1.11' '7E+12' -> '7000000000001.11'
ddadd168 add '-1' '7E+12' -> '6999999999999'
rounding: half_up
-- 1.234567890123456 1234567890123456 1 234567890123456
ddadd170 add '4.444444444444444' '0.5555555555555567' -> '5.000000000000001' Inexact Rounded
ddadd171 add '4.444444444444444' '0.5555555555555566' -> '5.000000000000001' Inexact Rounded
ddadd172 add '4.444444444444444' '0.5555555555555565' -> '5.000000000000001' Inexact Rounded
ddadd173 add '4.444444444444444' '0.5555555555555564' -> '5.000000000000000' Inexact Rounded
ddadd174 add '4.444444444444444' '0.5555555555555553' -> '4.999999999999999' Inexact Rounded
ddadd175 add '4.444444444444444' '0.5555555555555552' -> '4.999999999999999' Inexact Rounded
ddadd176 add '4.444444444444444' '0.5555555555555551' -> '4.999999999999999' Inexact Rounded
ddadd177 add '4.444444444444444' '0.5555555555555550' -> '4.999999999999999' Rounded
ddadd178 add '4.444444444444444' '0.5555555555555545' -> '4.999999999999999' Inexact Rounded
ddadd179 add '4.444444444444444' '0.5555555555555544' -> '4.999999999999998' Inexact Rounded
ddadd180 add '4.444444444444444' '0.5555555555555543' -> '4.999999999999998' Inexact Rounded
ddadd181 add '4.444444444444444' '0.5555555555555542' -> '4.999999999999998' Inexact Rounded
ddadd182 add '4.444444444444444' '0.5555555555555541' -> '4.999999999999998' Inexact Rounded
ddadd183 add '4.444444444444444' '0.5555555555555540' -> '4.999999999999998' Rounded
-- and some more, including residue effects and different roundings
rounding: half_up
ddadd200 add '1234560123456789' 0 -> '1234560123456789'
ddadd201 add '1234560123456789' 0.000000001 -> '1234560123456789' Inexact Rounded
ddadd202 add '1234560123456789' 0.000001 -> '1234560123456789' Inexact Rounded
ddadd203 add '1234560123456789' 0.1 -> '1234560123456789' Inexact Rounded
ddadd204 add '1234560123456789' 0.4 -> '1234560123456789' Inexact Rounded
ddadd205 add '1234560123456789' 0.49 -> '1234560123456789' Inexact Rounded
ddadd206 add '1234560123456789' 0.499999 -> '1234560123456789' Inexact Rounded
ddadd207 add '1234560123456789' 0.499999999 -> '1234560123456789' Inexact Rounded
ddadd208 add '1234560123456789' 0.5 -> '1234560123456790' Inexact Rounded
ddadd209 add '1234560123456789' 0.500000001 -> '1234560123456790' Inexact Rounded
ddadd210 add '1234560123456789' 0.500001 -> '1234560123456790' Inexact Rounded
ddadd211 add '1234560123456789' 0.51 -> '1234560123456790' Inexact Rounded
ddadd212 add '1234560123456789' 0.6 -> '1234560123456790' Inexact Rounded
ddadd213 add '1234560123456789' 0.9 -> '1234560123456790' Inexact Rounded
ddadd214 add '1234560123456789' 0.99999 -> '1234560123456790' Inexact Rounded
ddadd215 add '1234560123456789' 0.999999999 -> '1234560123456790' Inexact Rounded
ddadd216 add '1234560123456789' 1 -> '1234560123456790'
ddadd217 add '1234560123456789' 1.000000001 -> '1234560123456790' Inexact Rounded
ddadd218 add '1234560123456789' 1.00001 -> '1234560123456790' Inexact Rounded
ddadd219 add '1234560123456789' 1.1 -> '1234560123456790' Inexact Rounded
rounding: half_even
ddadd220 add '1234560123456789' 0 -> '1234560123456789'
ddadd221 add '1234560123456789' 0.000000001 -> '1234560123456789' Inexact Rounded
ddadd222 add '1234560123456789' 0.000001 -> '1234560123456789' Inexact Rounded
ddadd223 add '1234560123456789' 0.1 -> '1234560123456789' Inexact Rounded
ddadd224 add '1234560123456789' 0.4 -> '1234560123456789' Inexact Rounded
ddadd225 add '1234560123456789' 0.49 -> '1234560123456789' Inexact Rounded
ddadd226 add '1234560123456789' 0.499999 -> '1234560123456789' Inexact Rounded
ddadd227 add '1234560123456789' 0.499999999 -> '1234560123456789' Inexact Rounded
ddadd228 add '1234560123456789' 0.5 -> '1234560123456790' Inexact Rounded
ddadd229 add '1234560123456789' 0.500000001 -> '1234560123456790' Inexact Rounded
ddadd230 add '1234560123456789' 0.500001 -> '1234560123456790' Inexact Rounded
ddadd231 add '1234560123456789' 0.51 -> '1234560123456790' Inexact Rounded
ddadd232 add '1234560123456789' 0.6 -> '1234560123456790' Inexact Rounded
ddadd233 add '1234560123456789' 0.9 -> '1234560123456790' Inexact Rounded
ddadd234 add '1234560123456789' 0.99999 -> '1234560123456790' Inexact Rounded
ddadd235 add '1234560123456789' 0.999999999 -> '1234560123456790' Inexact Rounded
ddadd236 add '1234560123456789' 1 -> '1234560123456790'
ddadd237 add '1234560123456789' 1.00000001 -> '1234560123456790' Inexact Rounded
ddadd238 add '1234560123456789' 1.00001 -> '1234560123456790' Inexact Rounded
ddadd239 add '1234560123456789' 1.1 -> '1234560123456790' Inexact Rounded
-- critical few with even bottom digit...
ddadd240 add '1234560123456788' 0.499999999 -> '1234560123456788' Inexact Rounded
ddadd241 add '1234560123456788' 0.5 -> '1234560123456788' Inexact Rounded
ddadd242 add '1234560123456788' 0.500000001 -> '1234560123456789' Inexact Rounded
rounding: down
ddadd250 add '1234560123456789' 0 -> '1234560123456789'
ddadd251 add '1234560123456789' 0.000000001 -> '1234560123456789' Inexact Rounded
ddadd252 add '1234560123456789' 0.000001 -> '1234560123456789' Inexact Rounded
ddadd253 add '1234560123456789' 0.1 -> '1234560123456789' Inexact Rounded
ddadd254 add '1234560123456789' 0.4 -> '1234560123456789' Inexact Rounded
ddadd255 add '1234560123456789' 0.49 -> '1234560123456789' Inexact Rounded
ddadd256 add '1234560123456789' 0.499999 -> '1234560123456789' Inexact Rounded
ddadd257 add '1234560123456789' 0.499999999 -> '1234560123456789' Inexact Rounded
ddadd258 add '1234560123456789' 0.5 -> '1234560123456789' Inexact Rounded
ddadd259 add '1234560123456789' 0.500000001 -> '1234560123456789' Inexact Rounded
ddadd260 add '1234560123456789' 0.500001 -> '1234560123456789' Inexact Rounded
ddadd261 add '1234560123456789' 0.51 -> '1234560123456789' Inexact Rounded
ddadd262 add '1234560123456789' 0.6 -> '1234560123456789' Inexact Rounded
ddadd263 add '1234560123456789' 0.9 -> '1234560123456789' Inexact Rounded
ddadd264 add '1234560123456789' 0.99999 -> '1234560123456789' Inexact Rounded
ddadd265 add '1234560123456789' 0.999999999 -> '1234560123456789' Inexact Rounded
ddadd266 add '1234560123456789' 1 -> '1234560123456790'
ddadd267 add '1234560123456789' 1.00000001 -> '1234560123456790' Inexact Rounded
ddadd268 add '1234560123456789' 1.00001 -> '1234560123456790' Inexact Rounded
ddadd269 add '1234560123456789' 1.1 -> '1234560123456790' Inexact Rounded
-- 1 in last place tests
rounding: half_up
ddadd301 add -1 1 -> 0
ddadd302 add 0 1 -> 1
ddadd303 add 1 1 -> 2
ddadd304 add 12 1 -> 13
ddadd305 add 98 1 -> 99
ddadd306 add 99 1 -> 100
ddadd307 add 100 1 -> 101
ddadd308 add 101 1 -> 102
ddadd309 add -1 -1 -> -2
ddadd310 add 0 -1 -> -1
ddadd311 add 1 -1 -> 0
ddadd312 add 12 -1 -> 11
ddadd313 add 98 -1 -> 97
ddadd314 add 99 -1 -> 98
ddadd315 add 100 -1 -> 99
ddadd316 add 101 -1 -> 100
ddadd321 add -0.01 0.01 -> 0.00
ddadd322 add 0.00 0.01 -> 0.01
ddadd323 add 0.01 0.01 -> 0.02
ddadd324 add 0.12 0.01 -> 0.13
ddadd325 add 0.98 0.01 -> 0.99
ddadd326 add 0.99 0.01 -> 1.00
ddadd327 add 1.00 0.01 -> 1.01
ddadd328 add 1.01 0.01 -> 1.02
ddadd329 add -0.01 -0.01 -> -0.02
ddadd330 add 0.00 -0.01 -> -0.01
ddadd331 add 0.01 -0.01 -> 0.00
ddadd332 add 0.12 -0.01 -> 0.11
ddadd333 add 0.98 -0.01 -> 0.97
ddadd334 add 0.99 -0.01 -> 0.98
ddadd335 add 1.00 -0.01 -> 0.99
ddadd336 add 1.01 -0.01 -> 1.00
-- some more cases where adding 0 affects the coefficient
ddadd340 add 1E+3 0 -> 1000
ddadd341 add 1E+15 0 -> 1000000000000000
ddadd342 add 1E+16 0 -> 1.000000000000000E+16 Rounded
ddadd343 add 1E+20 0 -> 1.000000000000000E+20 Rounded
-- which simply follow from these cases ...
ddadd344 add 1E+3 1 -> 1001
ddadd345 add 1E+15 1 -> 1000000000000001
ddadd346 add 1E+16 1 -> 1.000000000000000E+16 Inexact Rounded
ddadd347 add 1E+20 1 -> 1.000000000000000E+20 Inexact Rounded
ddadd348 add 1E+3 7 -> 1007
ddadd349 add 1E+15 7 -> 1000000000000007
ddadd350 add 1E+16 7 -> 1.000000000000001E+16 Inexact Rounded
ddadd351 add 1E+20 7 -> 1.000000000000000E+20 Inexact Rounded
-- tryzeros cases
rounding: half_up
ddadd360 add 0E+50 10000E+1 -> 1.0000E+5
ddadd361 add 0E-50 10000E+1 -> 100000.0000000000 Rounded
ddadd362 add 10000E+1 0E-50 -> 100000.0000000000 Rounded
ddadd363 add 10000E+1 10000E-50 -> 100000.0000000000 Rounded Inexact
ddadd364 add 9.999999999999999E+384 -9.999999999999999E+384 -> 0E+369
-- a curiosity from JSR 13 testing
rounding: half_down
ddadd370 add 999999999999999 815 -> 1000000000000814
ddadd371 add 9999999999999999 815 -> 1.000000000000081E+16 Rounded Inexact
rounding: half_up
ddadd372 add 999999999999999 815 -> 1000000000000814
ddadd373 add 9999999999999999 815 -> 1.000000000000081E+16 Rounded Inexact
rounding: half_even
ddadd374 add 999999999999999 815 -> 1000000000000814
ddadd375 add 9999999999999999 815 -> 1.000000000000081E+16 Rounded Inexact
-- operands folded
ddadd380 add 1E+384 1E+384 -> 2.000000000000000E+384 Clamped
ddadd381 add 1E+380 1E+380 -> 2.00000000000E+380 Clamped
ddadd382 add 1E+376 1E+376 -> 2.0000000E+376 Clamped
ddadd383 add 1E+372 1E+372 -> 2.000E+372 Clamped
ddadd384 add 1E+370 1E+370 -> 2.0E+370 Clamped
ddadd385 add 1E+369 1E+369 -> 2E+369
ddadd386 add 1E+368 1E+368 -> 2E+368
-- ulp replacement tests
ddadd400 add 1 77e-14 -> 1.00000000000077
ddadd401 add 1 77e-15 -> 1.000000000000077
ddadd402 add 1 77e-16 -> 1.000000000000008 Inexact Rounded
ddadd403 add 1 77e-17 -> 1.000000000000001 Inexact Rounded
ddadd404 add 1 77e-18 -> 1.000000000000000 Inexact Rounded
ddadd405 add 1 77e-19 -> 1.000000000000000 Inexact Rounded
ddadd406 add 1 77e-299 -> 1.000000000000000 Inexact Rounded
ddadd410 add 10 77e-14 -> 10.00000000000077
ddadd411 add 10 77e-15 -> 10.00000000000008 Inexact Rounded
ddadd412 add 10 77e-16 -> 10.00000000000001 Inexact Rounded
ddadd413 add 10 77e-17 -> 10.00000000000000 Inexact Rounded
ddadd414 add 10 77e-18 -> 10.00000000000000 Inexact Rounded
ddadd415 add 10 77e-19 -> 10.00000000000000 Inexact Rounded
ddadd416 add 10 77e-299 -> 10.00000000000000 Inexact Rounded
ddadd420 add 77e-14 1 -> 1.00000000000077
ddadd421 add 77e-15 1 -> 1.000000000000077
ddadd422 add 77e-16 1 -> 1.000000000000008 Inexact Rounded
ddadd423 add 77e-17 1 -> 1.000000000000001 Inexact Rounded
ddadd424 add 77e-18 1 -> 1.000000000000000 Inexact Rounded
ddadd425 add 77e-19 1 -> 1.000000000000000 Inexact Rounded
ddadd426 add 77e-299 1 -> 1.000000000000000 Inexact Rounded
ddadd430 add 77e-14 10 -> 10.00000000000077
ddadd431 add 77e-15 10 -> 10.00000000000008 Inexact Rounded
ddadd432 add 77e-16 10 -> 10.00000000000001 Inexact Rounded
ddadd433 add 77e-17 10 -> 10.00000000000000 Inexact Rounded
ddadd434 add 77e-18 10 -> 10.00000000000000 Inexact Rounded
ddadd435 add 77e-19 10 -> 10.00000000000000 Inexact Rounded
ddadd436 add 77e-299 10 -> 10.00000000000000 Inexact Rounded
-- fastpath boundary (more in dqadd)
-- 1234567890123456
ddadd539 add '4444444444444444' '3333333333333333' -> '7777777777777777'
ddadd540 add '4444444444444444' '4444444444444444' -> '8888888888888888'
ddadd541 add '4444444444444444' '5555555555555555' -> '9999999999999999'
ddadd542 add '3333333333333333' '4444444444444444' -> '7777777777777777'
ddadd543 add '4444444444444444' '4444444444444444' -> '8888888888888888'
ddadd544 add '5555555555555555' '4444444444444444' -> '9999999999999999'
ddadd545 add '3000004000000000' '3000000000000040' -> '6000004000000040'
ddadd546 add '3000000400000000' '4000000000000400' -> '7000000400000400'
ddadd547 add '3000000040000000' '5000000000004000' -> '8000000040004000'
ddadd548 add '4000000004000000' '3000000000040000' -> '7000000004040000'
ddadd549 add '4000000000400000' '4000000000400000' -> '8000000000800000'
ddadd550 add '4000000000040000' '5000000004000000' -> '9000000004040000'
ddadd551 add '5000000000004000' '3000000040000000' -> '8000000040004000'
ddadd552 add '5000000000000400' '4000000400000000' -> '9000000400000400'
ddadd553 add '5000000000000040' '5000004000000000' -> 1.000000400000004E+16 Rounded
-- check propagation
ddadd554 add '8999999999999999' '0000000000000001' -> 9000000000000000
ddadd555 add '0000000000000001' '8999999999999999' -> 9000000000000000
ddadd556 add '0999999999999999' '0000000000000001' -> 1000000000000000
ddadd557 add '0000000000000001' '0999999999999999' -> 1000000000000000
ddadd558 add '4444444444444444' '4555555555555556' -> 9000000000000000
ddadd559 add '4555555555555556' '4444444444444444' -> 9000000000000000
-- negative ulps
ddadd6440 add 1 -77e-14 -> 0.99999999999923
ddadd6441 add 1 -77e-15 -> 0.999999999999923
ddadd6442 add 1 -77e-16 -> 0.9999999999999923
ddadd6443 add 1 -77e-17 -> 0.9999999999999992 Inexact Rounded
ddadd6444 add 1 -77e-18 -> 0.9999999999999999 Inexact Rounded
ddadd6445 add 1 -77e-19 -> 1.000000000000000 Inexact Rounded
ddadd6446 add 1 -77e-99 -> 1.000000000000000 Inexact Rounded
ddadd6450 add 10 -77e-14 -> 9.99999999999923
ddadd6451 add 10 -77e-15 -> 9.999999999999923
ddadd6452 add 10 -77e-16 -> 9.999999999999992 Inexact Rounded
ddadd6453 add 10 -77e-17 -> 9.999999999999999 Inexact Rounded
ddadd6454 add 10 -77e-18 -> 10.00000000000000 Inexact Rounded
ddadd6455 add 10 -77e-19 -> 10.00000000000000 Inexact Rounded
ddadd6456 add 10 -77e-99 -> 10.00000000000000 Inexact Rounded
ddadd6460 add -77e-14 1 -> 0.99999999999923
ddadd6461 add -77e-15 1 -> 0.999999999999923
ddadd6462 add -77e-16 1 -> 0.9999999999999923
ddadd6463 add -77e-17 1 -> 0.9999999999999992 Inexact Rounded
ddadd6464 add -77e-18 1 -> 0.9999999999999999 Inexact Rounded
ddadd6465 add -77e-19 1 -> 1.000000000000000 Inexact Rounded
ddadd6466 add -77e-99 1 -> 1.000000000000000 Inexact Rounded
ddadd6470 add -77e-14 10 -> 9.99999999999923
ddadd6471 add -77e-15 10 -> 9.999999999999923
ddadd6472 add -77e-16 10 -> 9.999999999999992 Inexact Rounded
ddadd6473 add -77e-17 10 -> 9.999999999999999 Inexact Rounded
ddadd6474 add -77e-18 10 -> 10.00000000000000 Inexact Rounded
ddadd6475 add -77e-19 10 -> 10.00000000000000 Inexact Rounded
ddadd6476 add -77e-99 10 -> 10.00000000000000 Inexact Rounded
-- negative ulps
ddadd6480 add -1 77e-14 -> -0.99999999999923
ddadd6481 add -1 77e-15 -> -0.999999999999923
ddadd6482 add -1 77e-16 -> -0.9999999999999923
ddadd6483 add -1 77e-17 -> -0.9999999999999992 Inexact Rounded
ddadd6484 add -1 77e-18 -> -0.9999999999999999 Inexact Rounded
ddadd6485 add -1 77e-19 -> -1.000000000000000 Inexact Rounded
ddadd6486 add -1 77e-99 -> -1.000000000000000 Inexact Rounded
ddadd6490 add -10 77e-14 -> -9.99999999999923
ddadd6491 add -10 77e-15 -> -9.999999999999923
ddadd6492 add -10 77e-16 -> -9.999999999999992 Inexact Rounded
ddadd6493 add -10 77e-17 -> -9.999999999999999 Inexact Rounded
ddadd6494 add -10 77e-18 -> -10.00000000000000 Inexact Rounded
ddadd6495 add -10 77e-19 -> -10.00000000000000 Inexact Rounded
ddadd6496 add -10 77e-99 -> -10.00000000000000 Inexact Rounded
ddadd6500 add 77e-14 -1 -> -0.99999999999923
ddadd6501 add 77e-15 -1 -> -0.999999999999923
ddadd6502 add 77e-16 -1 -> -0.9999999999999923
ddadd6503 add 77e-17 -1 -> -0.9999999999999992 Inexact Rounded
ddadd6504 add 77e-18 -1 -> -0.9999999999999999 Inexact Rounded
ddadd6505 add 77e-19 -1 -> -1.000000000000000 Inexact Rounded
ddadd6506 add 77e-99 -1 -> -1.000000000000000 Inexact Rounded
ddadd6510 add 77e-14 -10 -> -9.99999999999923
ddadd6511 add 77e-15 -10 -> -9.999999999999923
ddadd6512 add 77e-16 -10 -> -9.999999999999992 Inexact Rounded
ddadd6513 add 77e-17 -10 -> -9.999999999999999 Inexact Rounded
ddadd6514 add 77e-18 -10 -> -10.00000000000000 Inexact Rounded
ddadd6515 add 77e-19 -10 -> -10.00000000000000 Inexact Rounded
ddadd6516 add 77e-99 -10 -> -10.00000000000000 Inexact Rounded
-- and some more residue effects and different roundings
rounding: half_up
ddadd6540 add '6543210123456789' 0 -> '6543210123456789'
ddadd6541 add '6543210123456789' 0.000000001 -> '6543210123456789' Inexact Rounded
ddadd6542 add '6543210123456789' 0.000001 -> '6543210123456789' Inexact Rounded
ddadd6543 add '6543210123456789' 0.1 -> '6543210123456789' Inexact Rounded
ddadd6544 add '6543210123456789' 0.4 -> '6543210123456789' Inexact Rounded
ddadd6545 add '6543210123456789' 0.49 -> '6543210123456789' Inexact Rounded
ddadd6546 add '6543210123456789' 0.499999 -> '6543210123456789' Inexact Rounded
ddadd6547 add '6543210123456789' 0.499999999 -> '6543210123456789' Inexact Rounded
ddadd6548 add '6543210123456789' 0.5 -> '6543210123456790' Inexact Rounded
ddadd6549 add '6543210123456789' 0.500000001 -> '6543210123456790' Inexact Rounded
ddadd6550 add '6543210123456789' 0.500001 -> '6543210123456790' Inexact Rounded
ddadd6551 add '6543210123456789' 0.51 -> '6543210123456790' Inexact Rounded
ddadd6552 add '6543210123456789' 0.6 -> '6543210123456790' Inexact Rounded
ddadd6553 add '6543210123456789' 0.9 -> '6543210123456790' Inexact Rounded
ddadd6554 add '6543210123456789' 0.99999 -> '6543210123456790' Inexact Rounded
ddadd6555 add '6543210123456789' 0.999999999 -> '6543210123456790' Inexact Rounded
ddadd6556 add '6543210123456789' 1 -> '6543210123456790'
ddadd6557 add '6543210123456789' 1.000000001 -> '6543210123456790' Inexact Rounded
ddadd6558 add '6543210123456789' 1.00001 -> '6543210123456790' Inexact Rounded
ddadd6559 add '6543210123456789' 1.1 -> '6543210123456790' Inexact Rounded
rounding: half_even
ddadd6560 add '6543210123456789' 0 -> '6543210123456789'
ddadd6561 add '6543210123456789' 0.000000001 -> '6543210123456789' Inexact Rounded
ddadd6562 add '6543210123456789' 0.000001 -> '6543210123456789' Inexact Rounded
ddadd6563 add '6543210123456789' 0.1 -> '6543210123456789' Inexact Rounded
ddadd6564 add '6543210123456789' 0.4 -> '6543210123456789' Inexact Rounded
ddadd6565 add '6543210123456789' 0.49 -> '6543210123456789' Inexact Rounded
ddadd6566 add '6543210123456789' 0.499999 -> '6543210123456789' Inexact Rounded
ddadd6567 add '6543210123456789' 0.499999999 -> '6543210123456789' Inexact Rounded
ddadd6568 add '6543210123456789' 0.5 -> '6543210123456790' Inexact Rounded
ddadd6569 add '6543210123456789' 0.500000001 -> '6543210123456790' Inexact Rounded
ddadd6570 add '6543210123456789' 0.500001 -> '6543210123456790' Inexact Rounded
ddadd6571 add '6543210123456789' 0.51 -> '6543210123456790' Inexact Rounded
ddadd6572 add '6543210123456789' 0.6 -> '6543210123456790' Inexact Rounded
ddadd6573 add '6543210123456789' 0.9 -> '6543210123456790' Inexact Rounded
ddadd6574 add '6543210123456789' 0.99999 -> '6543210123456790' Inexact Rounded
ddadd6575 add '6543210123456789' 0.999999999 -> '6543210123456790' Inexact Rounded
ddadd6576 add '6543210123456789' 1 -> '6543210123456790'
ddadd6577 add '6543210123456789' 1.00000001 -> '6543210123456790' Inexact Rounded
ddadd6578 add '6543210123456789' 1.00001 -> '6543210123456790' Inexact Rounded
ddadd6579 add '6543210123456789' 1.1 -> '6543210123456790' Inexact Rounded
-- critical few with even bottom digit...
ddadd7540 add '6543210123456788' 0.499999999 -> '6543210123456788' Inexact Rounded
ddadd7541 add '6543210123456788' 0.5 -> '6543210123456788' Inexact Rounded
ddadd7542 add '6543210123456788' 0.500000001 -> '6543210123456789' Inexact Rounded
rounding: down
ddadd7550 add '6543210123456789' 0 -> '6543210123456789'
ddadd7551 add '6543210123456789' 0.000000001 -> '6543210123456789' Inexact Rounded
ddadd7552 add '6543210123456789' 0.000001 -> '6543210123456789' Inexact Rounded
ddadd7553 add '6543210123456789' 0.1 -> '6543210123456789' Inexact Rounded
ddadd7554 add '6543210123456789' 0.4 -> '6543210123456789' Inexact Rounded
ddadd7555 add '6543210123456789' 0.49 -> '6543210123456789' Inexact Rounded
ddadd7556 add '6543210123456789' 0.499999 -> '6543210123456789' Inexact Rounded
ddadd7557 add '6543210123456789' 0.499999999 -> '6543210123456789' Inexact Rounded
ddadd7558 add '6543210123456789' 0.5 -> '6543210123456789' Inexact Rounded
ddadd7559 add '6543210123456789' 0.500000001 -> '6543210123456789' Inexact Rounded
ddadd7560 add '6543210123456789' 0.500001 -> '6543210123456789' Inexact Rounded
ddadd7561 add '6543210123456789' 0.51 -> '6543210123456789' Inexact Rounded
ddadd7562 add '6543210123456789' 0.6 -> '6543210123456789' Inexact Rounded
ddadd7563 add '6543210123456789' 0.9 -> '6543210123456789' Inexact Rounded
ddadd7564 add '6543210123456789' 0.99999 -> '6543210123456789' Inexact Rounded
ddadd7565 add '6543210123456789' 0.999999999 -> '6543210123456789' Inexact Rounded
ddadd7566 add '6543210123456789' 1 -> '6543210123456790'
ddadd7567 add '6543210123456789' 1.00000001 -> '6543210123456790' Inexact Rounded
ddadd7568 add '6543210123456789' 1.00001 -> '6543210123456790' Inexact Rounded
ddadd7569 add '6543210123456789' 1.1 -> '6543210123456790' Inexact Rounded
-- verify a query
rounding: down
ddadd7661 add 1e-398 9.000000000000000E+384 -> 9.000000000000000E+384 Inexact Rounded
ddadd7662 add 0 9.000000000000000E+384 -> 9.000000000000000E+384 Rounded
ddadd7663 add 1e-388 9.000000000000000E+374 -> 9.000000000000000E+374 Inexact Rounded
ddadd7664 add 0 9.000000000000000E+374 -> 9.000000000000000E+374 Rounded
-- more zeros, etc.
rounding: half_even
ddadd7701 add 5.00 1.00E-3 -> 5.00100
ddadd7702 add 00.00 0.000 -> 0.000
ddadd7703 add 00.00 0E-3 -> 0.000
ddadd7704 add 0E-3 00.00 -> 0.000
ddadd7710 add 0E+3 00.00 -> 0.00
ddadd7711 add 0E+3 00.0 -> 0.0
ddadd7712 add 0E+3 00. -> 0
ddadd7713 add 0E+3 00.E+1 -> 0E+1
ddadd7714 add 0E+3 00.E+2 -> 0E+2
ddadd7715 add 0E+3 00.E+3 -> 0E+3
ddadd7716 add 0E+3 00.E+4 -> 0E+3
ddadd7717 add 0E+3 00.E+5 -> 0E+3
ddadd7718 add 0E+3 -00.0 -> 0.0
ddadd7719 add 0E+3 -00. -> 0
ddadd7731 add 0E+3 -00.E+1 -> 0E+1
ddadd7720 add 00.00 0E+3 -> 0.00
ddadd7721 add 00.0 0E+3 -> 0.0
ddadd7722 add 00. 0E+3 -> 0
ddadd7723 add 00.E+1 0E+3 -> 0E+1
ddadd7724 add 00.E+2 0E+3 -> 0E+2
ddadd7725 add 00.E+3 0E+3 -> 0E+3
ddadd7726 add 00.E+4 0E+3 -> 0E+3
ddadd7727 add 00.E+5 0E+3 -> 0E+3
ddadd7728 add -00.00 0E+3 -> 0.00
ddadd7729 add -00.0 0E+3 -> 0.0
ddadd7730 add -00. 0E+3 -> 0
ddadd7732 add 0 0 -> 0
ddadd7733 add 0 -0 -> 0
ddadd7734 add -0 0 -> 0
ddadd7735 add -0 -0 -> -0 -- IEEE 854 special case
ddadd7736 add 1 -1 -> 0
ddadd7737 add -1 -1 -> -2
ddadd7738 add 1 1 -> 2
ddadd7739 add -1 1 -> 0
ddadd7741 add 0 -1 -> -1
ddadd7742 add -0 -1 -> -1
ddadd7743 add 0 1 -> 1
ddadd7744 add -0 1 -> 1
ddadd7745 add -1 0 -> -1
ddadd7746 add -1 -0 -> -1
ddadd7747 add 1 0 -> 1
ddadd7748 add 1 -0 -> 1
ddadd7751 add 0.0 -1 -> -1.0
ddadd7752 add -0.0 -1 -> -1.0
ddadd7753 add 0.0 1 -> 1.0
ddadd7754 add -0.0 1 -> 1.0
ddadd7755 add -1.0 0 -> -1.0
ddadd7756 add -1.0 -0 -> -1.0
ddadd7757 add 1.0 0 -> 1.0
ddadd7758 add 1.0 -0 -> 1.0
ddadd7761 add 0 -1.0 -> -1.0
ddadd7762 add -0 -1.0 -> -1.0
ddadd7763 add 0 1.0 -> 1.0
ddadd7764 add -0 1.0 -> 1.0
ddadd7765 add -1 0.0 -> -1.0
ddadd7766 add -1 -0.0 -> -1.0
ddadd7767 add 1 0.0 -> 1.0
ddadd7768 add 1 -0.0 -> 1.0
ddadd7771 add 0.0 -1.0 -> -1.0
ddadd7772 add -0.0 -1.0 -> -1.0
ddadd7773 add 0.0 1.0 -> 1.0
ddadd7774 add -0.0 1.0 -> 1.0
ddadd7775 add -1.0 0.0 -> -1.0
ddadd7776 add -1.0 -0.0 -> -1.0
ddadd7777 add 1.0 0.0 -> 1.0
ddadd7778 add 1.0 -0.0 -> 1.0
-- Specials
ddadd7780 add -Inf -Inf -> -Infinity
ddadd7781 add -Inf -1000 -> -Infinity
ddadd7782 add -Inf -1 -> -Infinity
ddadd7783 add -Inf -0 -> -Infinity
ddadd7784 add -Inf 0 -> -Infinity
ddadd7785 add -Inf 1 -> -Infinity
ddadd7786 add -Inf 1000 -> -Infinity
ddadd7787 add -1000 -Inf -> -Infinity
ddadd7788 add -Inf -Inf -> -Infinity
ddadd7789 add -1 -Inf -> -Infinity
ddadd7790 add -0 -Inf -> -Infinity
ddadd7791 add 0 -Inf -> -Infinity
ddadd7792 add 1 -Inf -> -Infinity
ddadd7793 add 1000 -Inf -> -Infinity
ddadd7794 add Inf -Inf -> NaN Invalid_operation
ddadd7800 add Inf -Inf -> NaN Invalid_operation
ddadd7801 add Inf -1000 -> Infinity
ddadd7802 add Inf -1 -> Infinity
ddadd7803 add Inf -0 -> Infinity
ddadd7804 add Inf 0 -> Infinity
ddadd7805 add Inf 1 -> Infinity
ddadd7806 add Inf 1000 -> Infinity
ddadd7807 add Inf Inf -> Infinity
ddadd7808 add -1000 Inf -> Infinity
ddadd7809 add -Inf Inf -> NaN Invalid_operation
ddadd7810 add -1 Inf -> Infinity
ddadd7811 add -0 Inf -> Infinity
ddadd7812 add 0 Inf -> Infinity
ddadd7813 add 1 Inf -> Infinity
ddadd7814 add 1000 Inf -> Infinity
ddadd7815 add Inf Inf -> Infinity
ddadd7821 add NaN -Inf -> NaN
ddadd7822 add NaN -1000 -> NaN
ddadd7823 add NaN -1 -> NaN
ddadd7824 add NaN -0 -> NaN
ddadd7825 add NaN 0 -> NaN
ddadd7826 add NaN 1 -> NaN
ddadd7827 add NaN 1000 -> NaN
ddadd7828 add NaN Inf -> NaN
ddadd7829 add NaN NaN -> NaN
ddadd7830 add -Inf NaN -> NaN
ddadd7831 add -1000 NaN -> NaN
ddadd7832 add -1 NaN -> NaN
ddadd7833 add -0 NaN -> NaN
ddadd7834 add 0 NaN -> NaN
ddadd7835 add 1 NaN -> NaN
ddadd7836 add 1000 NaN -> NaN
ddadd7837 add Inf NaN -> NaN
ddadd7841 add sNaN -Inf -> NaN Invalid_operation
ddadd7842 add sNaN -1000 -> NaN Invalid_operation
ddadd7843 add sNaN -1 -> NaN Invalid_operation
ddadd7844 add sNaN -0 -> NaN Invalid_operation
ddadd7845 add sNaN 0 -> NaN Invalid_operation
ddadd7846 add sNaN 1 -> NaN Invalid_operation
ddadd7847 add sNaN 1000 -> NaN Invalid_operation
ddadd7848 add sNaN NaN -> NaN Invalid_operation
ddadd7849 add sNaN sNaN -> NaN Invalid_operation
ddadd7850 add NaN sNaN -> NaN Invalid_operation
ddadd7851 add -Inf sNaN -> NaN Invalid_operation
ddadd7852 add -1000 sNaN -> NaN Invalid_operation
ddadd7853 add -1 sNaN -> NaN Invalid_operation
ddadd7854 add -0 sNaN -> NaN Invalid_operation
ddadd7855 add 0 sNaN -> NaN Invalid_operation
ddadd7856 add 1 sNaN -> NaN Invalid_operation
ddadd7857 add 1000 sNaN -> NaN Invalid_operation
ddadd7858 add Inf sNaN -> NaN Invalid_operation
ddadd7859 add NaN sNaN -> NaN Invalid_operation
-- propagating NaNs
ddadd7861 add NaN1 -Inf -> NaN1
ddadd7862 add +NaN2 -1000 -> NaN2
ddadd7863 add NaN3 1000 -> NaN3
ddadd7864 add NaN4 Inf -> NaN4
ddadd7865 add NaN5 +NaN6 -> NaN5
ddadd7866 add -Inf NaN7 -> NaN7
ddadd7867 add -1000 NaN8 -> NaN8
ddadd7868 add 1000 NaN9 -> NaN9
ddadd7869 add Inf +NaN10 -> NaN10
ddadd7871 add sNaN11 -Inf -> NaN11 Invalid_operation
ddadd7872 add sNaN12 -1000 -> NaN12 Invalid_operation
ddadd7873 add sNaN13 1000 -> NaN13 Invalid_operation
ddadd7874 add sNaN14 NaN17 -> NaN14 Invalid_operation
ddadd7875 add sNaN15 sNaN18 -> NaN15 Invalid_operation
ddadd7876 add NaN16 sNaN19 -> NaN19 Invalid_operation
ddadd7877 add -Inf +sNaN20 -> NaN20 Invalid_operation
ddadd7878 add -1000 sNaN21 -> NaN21 Invalid_operation
ddadd7879 add 1000 sNaN22 -> NaN22 Invalid_operation
ddadd7880 add Inf sNaN23 -> NaN23 Invalid_operation
ddadd7881 add +NaN25 +sNaN24 -> NaN24 Invalid_operation
ddadd7882 add -NaN26 NaN28 -> -NaN26
ddadd7883 add -sNaN27 sNaN29 -> -NaN27 Invalid_operation
ddadd7884 add 1000 -NaN30 -> -NaN30
ddadd7885 add 1000 -sNaN31 -> -NaN31 Invalid_operation
-- Here we explore near the boundary of rounding a subnormal to Nmin
ddadd7575 add 1E-383 -1E-398 -> 9.99999999999999E-384 Subnormal
ddadd7576 add -1E-383 +1E-398 -> -9.99999999999999E-384 Subnormal
-- and another curious case
ddadd7577 add 7.000000000000E-385 -1.00000E-391 -> 6.999999000000E-385 Subnormal
-- check overflow edge case
-- 1234567890123456
ddadd7972 apply 9.999999999999999E+384 -> 9.999999999999999E+384
ddadd7973 add 9.999999999999999E+384 1 -> 9.999999999999999E+384 Inexact Rounded
ddadd7974 add 9999999999999999E+369 1 -> 9.999999999999999E+384 Inexact Rounded
ddadd7975 add 9999999999999999E+369 1E+369 -> Infinity Overflow Inexact Rounded
ddadd7976 add 9999999999999999E+369 9E+368 -> Infinity Overflow Inexact Rounded
ddadd7977 add 9999999999999999E+369 8E+368 -> Infinity Overflow Inexact Rounded
ddadd7978 add 9999999999999999E+369 7E+368 -> Infinity Overflow Inexact Rounded
ddadd7979 add 9999999999999999E+369 6E+368 -> Infinity Overflow Inexact Rounded
ddadd7980 add 9999999999999999E+369 5E+368 -> Infinity Overflow Inexact Rounded
ddadd7981 add 9999999999999999E+369 4E+368 -> 9.999999999999999E+384 Inexact Rounded
ddadd7982 add 9999999999999999E+369 3E+368 -> 9.999999999999999E+384 Inexact Rounded
ddadd7983 add 9999999999999999E+369 2E+368 -> 9.999999999999999E+384 Inexact Rounded
ddadd7984 add 9999999999999999E+369 1E+368 -> 9.999999999999999E+384 Inexact Rounded
ddadd7985 apply -9.999999999999999E+384 -> -9.999999999999999E+384
ddadd7986 add -9.999999999999999E+384 -1 -> -9.999999999999999E+384 Inexact Rounded
ddadd7987 add -9999999999999999E+369 -1 -> -9.999999999999999E+384 Inexact Rounded
ddadd7988 add -9999999999999999E+369 -1E+369 -> -Infinity Overflow Inexact Rounded
ddadd7989 add -9999999999999999E+369 -9E+368 -> -Infinity Overflow Inexact Rounded
ddadd7990 add -9999999999999999E+369 -8E+368 -> -Infinity Overflow Inexact Rounded
ddadd7991 add -9999999999999999E+369 -7E+368 -> -Infinity Overflow Inexact Rounded
ddadd7992 add -9999999999999999E+369 -6E+368 -> -Infinity Overflow Inexact Rounded
ddadd7993 add -9999999999999999E+369 -5E+368 -> -Infinity Overflow Inexact Rounded
ddadd7994 add -9999999999999999E+369 -4E+368 -> -9.999999999999999E+384 Inexact Rounded
ddadd7995 add -9999999999999999E+369 -3E+368 -> -9.999999999999999E+384 Inexact Rounded
ddadd7996 add -9999999999999999E+369 -2E+368 -> -9.999999999999999E+384 Inexact Rounded
ddadd7997 add -9999999999999999E+369 -1E+368 -> -9.999999999999999E+384 Inexact Rounded
-- And for round down full and subnormal results
rounding: down
ddadd71100 add 1e+2 -1e-383 -> 99.99999999999999 Rounded Inexact
ddadd71101 add 1e+1 -1e-383 -> 9.999999999999999 Rounded Inexact
ddadd71103 add +1 -1e-383 -> 0.9999999999999999 Rounded Inexact
ddadd71104 add 1e-1 -1e-383 -> 0.09999999999999999 Rounded Inexact
ddadd71105 add 1e-2 -1e-383 -> 0.009999999999999999 Rounded Inexact
ddadd71106 add 1e-3 -1e-383 -> 0.0009999999999999999 Rounded Inexact
ddadd71107 add 1e-4 -1e-383 -> 0.00009999999999999999 Rounded Inexact
ddadd71108 add 1e-5 -1e-383 -> 0.000009999999999999999 Rounded Inexact
ddadd71109 add 1e-6 -1e-383 -> 9.999999999999999E-7 Rounded Inexact
rounding: ceiling
ddadd71110 add -1e+2 +1e-383 -> -99.99999999999999 Rounded Inexact
ddadd71111 add -1e+1 +1e-383 -> -9.999999999999999 Rounded Inexact
ddadd71113 add -1 +1e-383 -> -0.9999999999999999 Rounded Inexact
ddadd71114 add -1e-1 +1e-383 -> -0.09999999999999999 Rounded Inexact
ddadd71115 add -1e-2 +1e-383 -> -0.009999999999999999 Rounded Inexact
ddadd71116 add -1e-3 +1e-383 -> -0.0009999999999999999 Rounded Inexact
ddadd71117 add -1e-4 +1e-383 -> -0.00009999999999999999 Rounded Inexact
ddadd71118 add -1e-5 +1e-383 -> -0.000009999999999999999 Rounded Inexact
ddadd71119 add -1e-6 +1e-383 -> -9.999999999999999E-7 Rounded Inexact
-- tests based on Gunnar Degnbol's edge case
rounding: half_even
ddadd71300 add 1E16 -0.5 -> 1.000000000000000E+16 Inexact Rounded
ddadd71310 add 1E16 -0.51 -> 9999999999999999 Inexact Rounded
ddadd71311 add 1E16 -0.501 -> 9999999999999999 Inexact Rounded
ddadd71312 add 1E16 -0.5001 -> 9999999999999999 Inexact Rounded
ddadd71313 add 1E16 -0.50001 -> 9999999999999999 Inexact Rounded
ddadd71314 add 1E16 -0.500001 -> 9999999999999999 Inexact Rounded
ddadd71315 add 1E16 -0.5000001 -> 9999999999999999 Inexact Rounded
ddadd71316 add 1E16 -0.50000001 -> 9999999999999999 Inexact Rounded
ddadd71317 add 1E16 -0.500000001 -> 9999999999999999 Inexact Rounded
ddadd71318 add 1E16 -0.5000000001 -> 9999999999999999 Inexact Rounded
ddadd71319 add 1E16 -0.50000000001 -> 9999999999999999 Inexact Rounded
ddadd71320 add 1E16 -0.500000000001 -> 9999999999999999 Inexact Rounded
ddadd71321 add 1E16 -0.5000000000001 -> 9999999999999999 Inexact Rounded
ddadd71322 add 1E16 -0.50000000000001 -> 9999999999999999 Inexact Rounded
ddadd71323 add 1E16 -0.500000000000001 -> 9999999999999999 Inexact Rounded
ddadd71324 add 1E16 -0.5000000000000001 -> 9999999999999999 Inexact Rounded
ddadd71325 add 1E16 -0.5000000000000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71326 add 1E16 -0.500000000000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71327 add 1E16 -0.50000000000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71328 add 1E16 -0.5000000000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71329 add 1E16 -0.500000000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71330 add 1E16 -0.50000000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71331 add 1E16 -0.5000000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71332 add 1E16 -0.500000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71333 add 1E16 -0.50000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71334 add 1E16 -0.5000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71335 add 1E16 -0.500000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71336 add 1E16 -0.50000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71337 add 1E16 -0.5000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71338 add 1E16 -0.500 -> 1.000000000000000E+16 Inexact Rounded
ddadd71339 add 1E16 -0.50 -> 1.000000000000000E+16 Inexact Rounded
ddadd71340 add 1E16 -5000000.000010001 -> 9999999995000000 Inexact Rounded
ddadd71341 add 1E16 -5000000.000000001 -> 9999999995000000 Inexact Rounded
ddadd71349 add 9999999999999999 0.4 -> 9999999999999999 Inexact Rounded
ddadd71350 add 9999999999999999 0.49 -> 9999999999999999 Inexact Rounded
ddadd71351 add 9999999999999999 0.499 -> 9999999999999999 Inexact Rounded
ddadd71352 add 9999999999999999 0.4999 -> 9999999999999999 Inexact Rounded
ddadd71353 add 9999999999999999 0.49999 -> 9999999999999999 Inexact Rounded
ddadd71354 add 9999999999999999 0.499999 -> 9999999999999999 Inexact Rounded
ddadd71355 add 9999999999999999 0.4999999 -> 9999999999999999 Inexact Rounded
ddadd71356 add 9999999999999999 0.49999999 -> 9999999999999999 Inexact Rounded
ddadd71357 add 9999999999999999 0.499999999 -> 9999999999999999 Inexact Rounded
ddadd71358 add 9999999999999999 0.4999999999 -> 9999999999999999 Inexact Rounded
ddadd71359 add 9999999999999999 0.49999999999 -> 9999999999999999 Inexact Rounded
ddadd71360 add 9999999999999999 0.499999999999 -> 9999999999999999 Inexact Rounded
ddadd71361 add 9999999999999999 0.4999999999999 -> 9999999999999999 Inexact Rounded
ddadd71362 add 9999999999999999 0.49999999999999 -> 9999999999999999 Inexact Rounded
ddadd71363 add 9999999999999999 0.499999999999999 -> 9999999999999999 Inexact Rounded
ddadd71364 add 9999999999999999 0.4999999999999999 -> 9999999999999999 Inexact Rounded
ddadd71365 add 9999999999999999 0.5000000000000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71367 add 9999999999999999 0.500000000000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71368 add 9999999999999999 0.50000000000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71369 add 9999999999999999 0.5000000000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71370 add 9999999999999999 0.500000000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71371 add 9999999999999999 0.50000000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71372 add 9999999999999999 0.5000000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71373 add 9999999999999999 0.500000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71374 add 9999999999999999 0.50000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71375 add 9999999999999999 0.5000000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71376 add 9999999999999999 0.500000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71377 add 9999999999999999 0.50000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71378 add 9999999999999999 0.5000 -> 1.000000000000000E+16 Inexact Rounded
ddadd71379 add 9999999999999999 0.500 -> 1.000000000000000E+16 Inexact Rounded
ddadd71380 add 9999999999999999 0.50 -> 1.000000000000000E+16 Inexact Rounded
ddadd71381 add 9999999999999999 0.5 -> 1.000000000000000E+16 Inexact Rounded
ddadd71382 add 9999999999999999 0.5000000000000001 -> 1.000000000000000E+16 Inexact Rounded
ddadd71383 add 9999999999999999 0.500000000000001 -> 1.000000000000000E+16 Inexact Rounded
ddadd71384 add 9999999999999999 0.50000000000001 -> 1.000000000000000E+16 Inexact Rounded
ddadd71385 add 9999999999999999 0.5000000000001 -> 1.000000000000000E+16 Inexact Rounded
ddadd71386 add 9999999999999999 0.500000000001 -> 1.000000000000000E+16 Inexact Rounded
ddadd71387 add 9999999999999999 0.50000000001 -> 1.000000000000000E+16 Inexact Rounded
ddadd71388 add 9999999999999999 0.5000000001 -> 1.000000000000000E+16 Inexact Rounded
ddadd71389 add 9999999999999999 0.500000001 -> 1.000000000000000E+16 Inexact Rounded
ddadd71390 add 9999999999999999 0.50000001 -> 1.000000000000000E+16 Inexact Rounded
ddadd71391 add 9999999999999999 0.5000001 -> 1.000000000000000E+16 Inexact Rounded
ddadd71392 add 9999999999999999 0.500001 -> 1.000000000000000E+16 Inexact Rounded
ddadd71393 add 9999999999999999 0.50001 -> 1.000000000000000E+16 Inexact Rounded
ddadd71394 add 9999999999999999 0.5001 -> 1.000000000000000E+16 Inexact Rounded
ddadd71395 add 9999999999999999 0.501 -> 1.000000000000000E+16 Inexact Rounded
ddadd71396 add 9999999999999999 0.51 -> 1.000000000000000E+16 Inexact Rounded
-- More GD edge cases, where difference between the unadjusted
-- exponents is larger than the maximum precision and one side is 0
ddadd71420 add 0 1.123456789012345 -> 1.123456789012345
ddadd71421 add 0 1.123456789012345E-1 -> 0.1123456789012345
ddadd71422 add 0 1.123456789012345E-2 -> 0.01123456789012345
ddadd71423 add 0 1.123456789012345E-3 -> 0.001123456789012345
ddadd71424 add 0 1.123456789012345E-4 -> 0.0001123456789012345
ddadd71425 add 0 1.123456789012345E-5 -> 0.00001123456789012345
ddadd71426 add 0 1.123456789012345E-6 -> 0.000001123456789012345
ddadd71427 add 0 1.123456789012345E-7 -> 1.123456789012345E-7
ddadd71428 add 0 1.123456789012345E-8 -> 1.123456789012345E-8
ddadd71429 add 0 1.123456789012345E-9 -> 1.123456789012345E-9
ddadd71430 add 0 1.123456789012345E-10 -> 1.123456789012345E-10
ddadd71431 add 0 1.123456789012345E-11 -> 1.123456789012345E-11
ddadd71432 add 0 1.123456789012345E-12 -> 1.123456789012345E-12
ddadd71433 add 0 1.123456789012345E-13 -> 1.123456789012345E-13
ddadd71434 add 0 1.123456789012345E-14 -> 1.123456789012345E-14
ddadd71435 add 0 1.123456789012345E-15 -> 1.123456789012345E-15
ddadd71436 add 0 1.123456789012345E-16 -> 1.123456789012345E-16
ddadd71437 add 0 1.123456789012345E-17 -> 1.123456789012345E-17
ddadd71438 add 0 1.123456789012345E-18 -> 1.123456789012345E-18
ddadd71439 add 0 1.123456789012345E-19 -> 1.123456789012345E-19
-- same, reversed 0
ddadd71440 add 1.123456789012345 0 -> 1.123456789012345
ddadd71441 add 1.123456789012345E-1 0 -> 0.1123456789012345
ddadd71442 add 1.123456789012345E-2 0 -> 0.01123456789012345
ddadd71443 add 1.123456789012345E-3 0 -> 0.001123456789012345
ddadd71444 add 1.123456789012345E-4 0 -> 0.0001123456789012345
ddadd71445 add 1.123456789012345E-5 0 -> 0.00001123456789012345
ddadd71446 add 1.123456789012345E-6 0 -> 0.000001123456789012345
ddadd71447 add 1.123456789012345E-7 0 -> 1.123456789012345E-7
ddadd71448 add 1.123456789012345E-8 0 -> 1.123456789012345E-8
ddadd71449 add 1.123456789012345E-9 0 -> 1.123456789012345E-9
ddadd71450 add 1.123456789012345E-10 0 -> 1.123456789012345E-10
ddadd71451 add 1.123456789012345E-11 0 -> 1.123456789012345E-11
ddadd71452 add 1.123456789012345E-12 0 -> 1.123456789012345E-12
ddadd71453 add 1.123456789012345E-13 0 -> 1.123456789012345E-13
ddadd71454 add 1.123456789012345E-14 0 -> 1.123456789012345E-14
ddadd71455 add 1.123456789012345E-15 0 -> 1.123456789012345E-15
ddadd71456 add 1.123456789012345E-16 0 -> 1.123456789012345E-16
ddadd71457 add 1.123456789012345E-17 0 -> 1.123456789012345E-17
ddadd71458 add 1.123456789012345E-18 0 -> 1.123456789012345E-18
ddadd71459 add 1.123456789012345E-19 0 -> 1.123456789012345E-19
-- same, Es on the 0
ddadd71460 add 1.123456789012345 0E-0 -> 1.123456789012345
ddadd71461 add 1.123456789012345 0E-1 -> 1.123456789012345
ddadd71462 add 1.123456789012345 0E-2 -> 1.123456789012345
ddadd71463 add 1.123456789012345 0E-3 -> 1.123456789012345
ddadd71464 add 1.123456789012345 0E-4 -> 1.123456789012345
ddadd71465 add 1.123456789012345 0E-5 -> 1.123456789012345
ddadd71466 add 1.123456789012345 0E-6 -> 1.123456789012345
ddadd71467 add 1.123456789012345 0E-7 -> 1.123456789012345
ddadd71468 add 1.123456789012345 0E-8 -> 1.123456789012345
ddadd71469 add 1.123456789012345 0E-9 -> 1.123456789012345
ddadd71470 add 1.123456789012345 0E-10 -> 1.123456789012345
ddadd71471 add 1.123456789012345 0E-11 -> 1.123456789012345
ddadd71472 add 1.123456789012345 0E-12 -> 1.123456789012345
ddadd71473 add 1.123456789012345 0E-13 -> 1.123456789012345
ddadd71474 add 1.123456789012345 0E-14 -> 1.123456789012345
ddadd71475 add 1.123456789012345 0E-15 -> 1.123456789012345
-- next four flag Rounded because the 0 extends the result
ddadd71476 add 1.123456789012345 0E-16 -> 1.123456789012345 Rounded
ddadd71477 add 1.123456789012345 0E-17 -> 1.123456789012345 Rounded
ddadd71478 add 1.123456789012345 0E-18 -> 1.123456789012345 Rounded
ddadd71479 add 1.123456789012345 0E-19 -> 1.123456789012345 Rounded
-- sum of two opposite-sign operands is exactly 0 and floor => -0
rounding: half_up
-- exact zeros from zeros
ddadd71500 add 0 0E-19 -> 0E-19
ddadd71501 add -0 0E-19 -> 0E-19
ddadd71502 add 0 -0E-19 -> 0E-19
ddadd71503 add -0 -0E-19 -> -0E-19
-- exact zeros from non-zeros
ddadd71511 add -11 11 -> 0
ddadd71512 add 11 -11 -> 0
rounding: half_down
-- exact zeros from zeros
ddadd71520 add 0 0E-19 -> 0E-19
ddadd71521 add -0 0E-19 -> 0E-19
ddadd71522 add 0 -0E-19 -> 0E-19
ddadd71523 add -0 -0E-19 -> -0E-19
-- exact zeros from non-zeros
ddadd71531 add -11 11 -> 0
ddadd71532 add 11 -11 -> 0
rounding: half_even
-- exact zeros from zeros
ddadd71540 add 0 0E-19 -> 0E-19
ddadd71541 add -0 0E-19 -> 0E-19
ddadd71542 add 0 -0E-19 -> 0E-19
ddadd71543 add -0 -0E-19 -> -0E-19
-- exact zeros from non-zeros
ddadd71551 add -11 11 -> 0
ddadd71552 add 11 -11 -> 0
rounding: up
-- exact zeros from zeros
ddadd71560 add 0 0E-19 -> 0E-19
ddadd71561 add -0 0E-19 -> 0E-19
ddadd71562 add 0 -0E-19 -> 0E-19
ddadd71563 add -0 -0E-19 -> -0E-19
-- exact zeros from non-zeros
ddadd71571 add -11 11 -> 0
ddadd71572 add 11 -11 -> 0
rounding: down
-- exact zeros from zeros
ddadd71580 add 0 0E-19 -> 0E-19
ddadd71581 add -0 0E-19 -> 0E-19
ddadd71582 add 0 -0E-19 -> 0E-19
ddadd71583 add -0 -0E-19 -> -0E-19
-- exact zeros from non-zeros
ddadd71591 add -11 11 -> 0
ddadd71592 add 11 -11 -> 0
rounding: ceiling
-- exact zeros from zeros
ddadd71600 add 0 0E-19 -> 0E-19
ddadd71601 add -0 0E-19 -> 0E-19
ddadd71602 add 0 -0E-19 -> 0E-19
ddadd71603 add -0 -0E-19 -> -0E-19
-- exact zeros from non-zeros
ddadd71611 add -11 11 -> 0
ddadd71612 add 11 -11 -> 0
-- and the extra-special ugly case; unusual minuses marked by -- *
rounding: floor
-- exact zeros from zeros
ddadd71620 add 0 0E-19 -> 0E-19
ddadd71621 add -0 0E-19 -> -0E-19 -- *
ddadd71622 add 0 -0E-19 -> -0E-19 -- *
ddadd71623 add -0 -0E-19 -> -0E-19
-- exact zeros from non-zeros
ddadd71631 add -11 11 -> -0 -- *
ddadd71632 add 11 -11 -> -0 -- *
-- Examples from SQL proposal (Krishna Kulkarni)
ddadd71701 add 130E-2 120E-2 -> 2.50
ddadd71702 add 130E-2 12E-1 -> 2.50
ddadd71703 add 130E-2 1E0 -> 2.30
ddadd71704 add 1E2 1E4 -> 1.01E+4
ddadd71705 add 130E-2 -120E-2 -> 0.10
ddadd71706 add 130E-2 -12E-1 -> 0.10
ddadd71707 add 130E-2 -1E0 -> 0.30
ddadd71708 add 1E2 -1E4 -> -9.9E+3
-- query from Vincent Kulandaisamy
rounding: ceiling
ddadd71801 add 7.8822773805862E+277 -5.1757503820663E-21 -> 7.882277380586200E+277 Inexact Rounded
ddadd71802 add 7.882277380586200E+277 12.341 -> 7.882277380586201E+277 Inexact Rounded
ddadd71803 add 7.882277380586201E+277 2.7270545046613E-31 -> 7.882277380586202E+277 Inexact Rounded
ddadd71811 add 12.341 -5.1757503820663E-21 -> 12.34100000000000 Inexact Rounded
ddadd71812 add 12.34100000000000 2.7270545046613E-31 -> 12.34100000000001 Inexact Rounded
ddadd71813 add 12.34100000000001 7.8822773805862E+277 -> 7.882277380586201E+277 Inexact Rounded
-- Gappy coefficients; check residue handling even with full coefficient gap
rounding: half_even
ddadd75001 add 1234567890123456 1 -> 1234567890123457
ddadd75002 add 1234567890123456 0.6 -> 1234567890123457 Inexact Rounded
ddadd75003 add 1234567890123456 0.06 -> 1234567890123456 Inexact Rounded
ddadd75004 add 1234567890123456 6E-3 -> 1234567890123456 Inexact Rounded
ddadd75005 add 1234567890123456 6E-4 -> 1234567890123456 Inexact Rounded
ddadd75006 add 1234567890123456 6E-5 -> 1234567890123456 Inexact Rounded
ddadd75007 add 1234567890123456 6E-6 -> 1234567890123456 Inexact Rounded
ddadd75008 add 1234567890123456 6E-7 -> 1234567890123456 Inexact Rounded
ddadd75009 add 1234567890123456 6E-8 -> 1234567890123456 Inexact Rounded
ddadd75010 add 1234567890123456 6E-9 -> 1234567890123456 Inexact Rounded
ddadd75011 add 1234567890123456 6E-10 -> 1234567890123456 Inexact Rounded
ddadd75012 add 1234567890123456 6E-11 -> 1234567890123456 Inexact Rounded
ddadd75013 add 1234567890123456 6E-12 -> 1234567890123456 Inexact Rounded
ddadd75014 add 1234567890123456 6E-13 -> 1234567890123456 Inexact Rounded
ddadd75015 add 1234567890123456 6E-14 -> 1234567890123456 Inexact Rounded
ddadd75016 add 1234567890123456 6E-15 -> 1234567890123456 Inexact Rounded
ddadd75017 add 1234567890123456 6E-16 -> 1234567890123456 Inexact Rounded
ddadd75018 add 1234567890123456 6E-17 -> 1234567890123456 Inexact Rounded
ddadd75019 add 1234567890123456 6E-18 -> 1234567890123456 Inexact Rounded
ddadd75020 add 1234567890123456 6E-19 -> 1234567890123456 Inexact Rounded
ddadd75021 add 1234567890123456 6E-20 -> 1234567890123456 Inexact Rounded
-- widening second argument at gap
ddadd75030 add 12345678 1 -> 12345679
ddadd75031 add 12345678 0.1 -> 12345678.1
ddadd75032 add 12345678 0.12 -> 12345678.12
ddadd75033 add 12345678 0.123 -> 12345678.123
ddadd75034 add 12345678 0.1234 -> 12345678.1234
ddadd75035 add 12345678 0.12345 -> 12345678.12345
ddadd75036 add 12345678 0.123456 -> 12345678.123456
ddadd75037 add 12345678 0.1234567 -> 12345678.1234567
ddadd75038 add 12345678 0.12345678 -> 12345678.12345678
ddadd75039 add 12345678 0.123456789 -> 12345678.12345679 Inexact Rounded
ddadd75040 add 12345678 0.123456785 -> 12345678.12345678 Inexact Rounded
ddadd75041 add 12345678 0.1234567850 -> 12345678.12345678 Inexact Rounded
ddadd75042 add 12345678 0.1234567851 -> 12345678.12345679 Inexact Rounded
ddadd75043 add 12345678 0.12345678501 -> 12345678.12345679 Inexact Rounded
ddadd75044 add 12345678 0.123456785001 -> 12345678.12345679 Inexact Rounded
ddadd75045 add 12345678 0.1234567850001 -> 12345678.12345679 Inexact Rounded
ddadd75046 add 12345678 0.12345678500001 -> 12345678.12345679 Inexact Rounded
ddadd75047 add 12345678 0.123456785000001 -> 12345678.12345679 Inexact Rounded
ddadd75048 add 12345678 0.1234567850000001 -> 12345678.12345679 Inexact Rounded
ddadd75049 add 12345678 0.1234567850000000 -> 12345678.12345678 Inexact Rounded
-- 90123456
rounding: half_even
ddadd75050 add 12345678 0.0234567750000000 -> 12345678.02345678 Inexact Rounded
ddadd75051 add 12345678 0.0034567750000000 -> 12345678.00345678 Inexact Rounded
ddadd75052 add 12345678 0.0004567750000000 -> 12345678.00045678 Inexact Rounded
ddadd75053 add 12345678 0.0000567750000000 -> 12345678.00005678 Inexact Rounded
ddadd75054 add 12345678 0.0000067750000000 -> 12345678.00000678 Inexact Rounded
ddadd75055 add 12345678 0.0000007750000000 -> 12345678.00000078 Inexact Rounded
ddadd75056 add 12345678 0.0000000750000000 -> 12345678.00000008 Inexact Rounded
ddadd75057 add 12345678 0.0000000050000000 -> 12345678.00000000 Inexact Rounded
ddadd75060 add 12345678 0.0234567750000001 -> 12345678.02345678 Inexact Rounded
ddadd75061 add 12345678 0.0034567750000001 -> 12345678.00345678 Inexact Rounded
ddadd75062 add 12345678 0.0004567750000001 -> 12345678.00045678 Inexact Rounded
ddadd75063 add 12345678 0.0000567750000001 -> 12345678.00005678 Inexact Rounded
ddadd75064 add 12345678 0.0000067750000001 -> 12345678.00000678 Inexact Rounded
ddadd75065 add 12345678 0.0000007750000001 -> 12345678.00000078 Inexact Rounded
ddadd75066 add 12345678 0.0000000750000001 -> 12345678.00000008 Inexact Rounded
ddadd75067 add 12345678 0.0000000050000001 -> 12345678.00000001 Inexact Rounded
-- far-out residues (full coefficient gap is 16+15 digits)
rounding: up
ddadd75070 add 12345678 1E-8 -> 12345678.00000001
ddadd75071 add 12345678 1E-9 -> 12345678.00000001 Inexact Rounded
ddadd75072 add 12345678 1E-10 -> 12345678.00000001 Inexact Rounded
ddadd75073 add 12345678 1E-11 -> 12345678.00000001 Inexact Rounded
ddadd75074 add 12345678 1E-12 -> 12345678.00000001 Inexact Rounded
ddadd75075 add 12345678 1E-13 -> 12345678.00000001 Inexact Rounded
ddadd75076 add 12345678 1E-14 -> 12345678.00000001 Inexact Rounded
ddadd75077 add 12345678 1E-15 -> 12345678.00000001 Inexact Rounded
ddadd75078 add 12345678 1E-16 -> 12345678.00000001 Inexact Rounded
ddadd75079 add 12345678 1E-17 -> 12345678.00000001 Inexact Rounded
ddadd75080 add 12345678 1E-18 -> 12345678.00000001 Inexact Rounded
ddadd75081 add 12345678 1E-19 -> 12345678.00000001 Inexact Rounded
ddadd75082 add 12345678 1E-20 -> 12345678.00000001 Inexact Rounded
ddadd75083 add 12345678 1E-25 -> 12345678.00000001 Inexact Rounded
ddadd75084 add 12345678 1E-30 -> 12345678.00000001 Inexact Rounded
ddadd75085 add 12345678 1E-31 -> 12345678.00000001 Inexact Rounded
ddadd75086 add 12345678 1E-32 -> 12345678.00000001 Inexact Rounded
ddadd75087 add 12345678 1E-33 -> 12345678.00000001 Inexact Rounded
ddadd75088 add 12345678 1E-34 -> 12345678.00000001 Inexact Rounded
ddadd75089 add 12345678 1E-35 -> 12345678.00000001 Inexact Rounded
-- Punit's
ddadd75100 add 1.000 -200.000 -> -199.000
-- Rounding swathe
rounding: half_even
ddadd81100 add .2300 12345678901234.00 -> 12345678901234.23 Rounded
ddadd81101 add .2301 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81102 add .2310 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81103 add .2350 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81104 add .2351 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81105 add .2450 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81106 add .2451 12345678901234.00 -> 12345678901234.25 Inexact Rounded
ddadd81107 add .2360 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81108 add .2370 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81109 add .2399 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81120 add 9999999999999999E+369 9E+369 -> Infinity Overflow Inexact Rounded
ddadd81121 add -9999999999999999E+369 -9E+369 -> -Infinity Overflow Inexact Rounded
rounding: half_up
ddadd81200 add .2300 12345678901234.00 -> 12345678901234.23 Rounded
ddadd81201 add .2301 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81202 add .2310 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81203 add .2350 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81204 add .2351 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81205 add .2450 12345678901234.00 -> 12345678901234.25 Inexact Rounded
ddadd81206 add .2451 12345678901234.00 -> 12345678901234.25 Inexact Rounded
ddadd81207 add .2360 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81208 add .2370 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81209 add .2399 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81220 add 9999999999999999E+369 9E+369 -> Infinity Overflow Inexact Rounded
ddadd81221 add -9999999999999999E+369 -9E+369 -> -Infinity Overflow Inexact Rounded
rounding: half_down
ddadd81300 add .2300 12345678901234.00 -> 12345678901234.23 Rounded
ddadd81301 add .2301 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81302 add .2310 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81303 add .2350 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81304 add .2351 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81305 add .2450 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81306 add .2451 12345678901234.00 -> 12345678901234.25 Inexact Rounded
ddadd81307 add .2360 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81308 add .2370 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81309 add .2399 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81320 add 9999999999999999E+369 9E+369 -> Infinity Overflow Inexact Rounded
ddadd81321 add -9999999999999999E+369 -9E+369 -> -Infinity Overflow Inexact Rounded
rounding: up
ddadd81400 add .2300 12345678901234.00 -> 12345678901234.23 Rounded
ddadd81401 add .2301 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81402 add .2310 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81403 add .2350 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81404 add .2351 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81405 add .2450 12345678901234.00 -> 12345678901234.25 Inexact Rounded
ddadd81406 add .2451 12345678901234.00 -> 12345678901234.25 Inexact Rounded
ddadd81407 add .2360 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81408 add .2370 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81409 add .2399 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81411 add -.2399 -12345678901234.00 -> -12345678901234.24 Inexact Rounded
ddadd81420 add 9999999999999999E+369 9E+369 -> Infinity Overflow Inexact Rounded
ddadd81421 add -9999999999999999E+369 -9E+369 -> -Infinity Overflow Inexact Rounded
rounding: down
ddadd81500 add .2300 12345678901234.00 -> 12345678901234.23 Rounded
ddadd81501 add .2301 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81502 add .2310 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81503 add .2350 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81504 add .2351 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81505 add .2450 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81506 add .2451 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81507 add .2360 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81508 add .2370 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81509 add .2399 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81511 add -.2399 -12345678901234.00 -> -12345678901234.23 Inexact Rounded
ddadd81520 add 9999999999999999E+369 9E+369 -> 9.999999999999999E+384 Overflow Inexact Rounded
ddadd81521 add -9999999999999999E+369 -9E+369 -> -9.999999999999999E+384 Overflow Inexact Rounded
rounding: ceiling
ddadd81600 add .2300 12345678901234.00 -> 12345678901234.23 Rounded
ddadd81601 add .2301 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81602 add .2310 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81603 add .2350 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81604 add .2351 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81605 add .2450 12345678901234.00 -> 12345678901234.25 Inexact Rounded
ddadd81606 add .2451 12345678901234.00 -> 12345678901234.25 Inexact Rounded
ddadd81607 add .2360 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81608 add .2370 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81609 add .2399 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81611 add -.2399 -12345678901234.00 -> -12345678901234.23 Inexact Rounded
ddadd81620 add 9999999999999999E+369 9E+369 -> Infinity Overflow Inexact Rounded
ddadd81621 add -9999999999999999E+369 -9E+369 -> -9.999999999999999E+384 Overflow Inexact Rounded
rounding: floor
ddadd81700 add .2300 12345678901234.00 -> 12345678901234.23 Rounded
ddadd81701 add .2301 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81702 add .2310 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81703 add .2350 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81704 add .2351 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81705 add .2450 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81706 add .2451 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd81707 add .2360 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81708 add .2370 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81709 add .2399 12345678901234.00 -> 12345678901234.23 Inexact Rounded
ddadd81711 add -.2399 -12345678901234.00 -> -12345678901234.24 Inexact Rounded
ddadd81720 add 9999999999999999E+369 9E+369 -> 9.999999999999999E+384 Overflow Inexact Rounded
ddadd81721 add -9999999999999999E+369 -9E+369 -> -Infinity Overflow Inexact Rounded
rounding: 05up
ddadd81800 add .2000 12345678901234.00 -> 12345678901234.20 Rounded
ddadd81801 add .2001 12345678901234.00 -> 12345678901234.21 Inexact Rounded
ddadd81802 add .2010 12345678901234.00 -> 12345678901234.21 Inexact Rounded
ddadd81803 add .2050 12345678901234.00 -> 12345678901234.21 Inexact Rounded
ddadd81804 add .2051 12345678901234.00 -> 12345678901234.21 Inexact Rounded
ddadd81807 add .2060 12345678901234.00 -> 12345678901234.21 Inexact Rounded
ddadd81808 add .2070 12345678901234.00 -> 12345678901234.21 Inexact Rounded
ddadd81809 add .2099 12345678901234.00 -> 12345678901234.21 Inexact Rounded
ddadd81811 add -.2099 -12345678901234.00 -> -12345678901234.21 Inexact Rounded
ddadd81820 add 9999999999999999E+369 9E+369 -> 9.999999999999999E+384 Overflow Inexact Rounded
ddadd81821 add -9999999999999999E+369 -9E+369 -> -9.999999999999999E+384 Overflow Inexact Rounded
ddadd81900 add .2100 12345678901234.00 -> 12345678901234.21 Rounded
ddadd81901 add .2101 12345678901234.00 -> 12345678901234.21 Inexact Rounded
ddadd81902 add .2110 12345678901234.00 -> 12345678901234.21 Inexact Rounded
ddadd81903 add .2150 12345678901234.00 -> 12345678901234.21 Inexact Rounded
ddadd81904 add .2151 12345678901234.00 -> 12345678901234.21 Inexact Rounded
ddadd81907 add .2160 12345678901234.00 -> 12345678901234.21 Inexact Rounded
ddadd81908 add .2170 12345678901234.00 -> 12345678901234.21 Inexact Rounded
ddadd81909 add .2199 12345678901234.00 -> 12345678901234.21 Inexact Rounded
ddadd81911 add -.2199 -12345678901234.00 -> -12345678901234.21 Inexact Rounded
ddadd82000 add .2400 12345678901234.00 -> 12345678901234.24 Rounded
ddadd82001 add .2401 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd82002 add .2410 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd82003 add .2450 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd82004 add .2451 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd82007 add .2460 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd82008 add .2470 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd82009 add .2499 12345678901234.00 -> 12345678901234.24 Inexact Rounded
ddadd82011 add -.2499 -12345678901234.00 -> -12345678901234.24 Inexact Rounded
ddadd82100 add .2500 12345678901234.00 -> 12345678901234.25 Rounded
ddadd82101 add .2501 12345678901234.00 -> 12345678901234.26 Inexact Rounded
ddadd82102 add .2510 12345678901234.00 -> 12345678901234.26 Inexact Rounded
ddadd82103 add .2550 12345678901234.00 -> 12345678901234.26 Inexact Rounded
ddadd82104 add .2551 12345678901234.00 -> 12345678901234.26 Inexact Rounded
ddadd82107 add .2560 12345678901234.00 -> 12345678901234.26 Inexact Rounded
ddadd82108 add .2570 12345678901234.00 -> 12345678901234.26 Inexact Rounded
ddadd82109 add .2599 12345678901234.00 -> 12345678901234.26 Inexact Rounded
ddadd82111 add -.2599 -12345678901234.00 -> -12345678901234.26 Inexact Rounded
ddadd82200 add .2600 12345678901234.00 -> 12345678901234.26 Rounded
ddadd82201 add .2601 12345678901234.00 -> 12345678901234.26 Inexact Rounded
ddadd82202 add .2610 12345678901234.00 -> 12345678901234.26 Inexact Rounded
ddadd82203 add .2650 12345678901234.00 -> 12345678901234.26 Inexact Rounded
ddadd82204 add .2651 12345678901234.00 -> 12345678901234.26 Inexact Rounded
ddadd82207 add .2660 12345678901234.00 -> 12345678901234.26 Inexact Rounded
ddadd82208 add .2670 12345678901234.00 -> 12345678901234.26 Inexact Rounded
ddadd82209 add .2699 12345678901234.00 -> 12345678901234.26 Inexact Rounded
ddadd82211 add -.2699 -12345678901234.00 -> -12345678901234.26 Inexact Rounded
ddadd82300 add .2900 12345678901234.00 -> 12345678901234.29 Rounded
ddadd82301 add .2901 12345678901234.00 -> 12345678901234.29 Inexact Rounded
ddadd82302 add .2910 12345678901234.00 -> 12345678901234.29 Inexact Rounded
ddadd82303 add .2950 12345678901234.00 -> 12345678901234.29 Inexact Rounded
ddadd82304 add .2951 12345678901234.00 -> 12345678901234.29 Inexact Rounded
ddadd82307 add .2960 12345678901234.00 -> 12345678901234.29 Inexact Rounded
ddadd82308 add .2970 12345678901234.00 -> 12345678901234.29 Inexact Rounded
ddadd82309 add .2999 12345678901234.00 -> 12345678901234.29 Inexact Rounded
ddadd82311 add -.2999 -12345678901234.00 -> -12345678901234.29 Inexact Rounded
-- Null tests
ddadd9990 add 10 # -> NaN Invalid_operation
ddadd9991 add # 10 -> NaN Invalid_operation
| 76,767 | 1,329 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/decimaltestdata/dqShift.decTest | ------------------------------------------------------------------------
-- dqShift.decTest -- shift decQuad coefficient left or right --
-- Copyright (c) IBM Corporation, 1981, 2008. All rights reserved. --
------------------------------------------------------------------------
-- Please see the document "General Decimal Arithmetic Testcases" --
-- at http://www2.hursley.ibm.com/decimal for the description of --
-- these testcases. --
-- --
-- These testcases are experimental ('beta' versions), and they --
-- may contain errors. They are offered on an as-is basis. In --
-- particular, achieving the same results as the tests here is not --
-- a guarantee that an implementation complies with any Standard --
-- or specification. The tests are not exhaustive. --
-- --
-- Please send comments, suggestions, and corrections to the author: --
-- Mike Cowlishaw, IBM Fellow --
-- IBM UK, PO Box 31, Birmingham Road, Warwick CV34 5JL, UK --
-- [email protected] --
------------------------------------------------------------------------
version: 2.59
extended: 1
clamp: 1
precision: 34
maxExponent: 6144
minExponent: -6143
rounding: half_even
-- Sanity check
dqshi001 shift 0 0 -> 0
dqshi002 shift 0 2 -> 0
dqshi003 shift 1 2 -> 100
dqshi004 shift 1 33 -> 1000000000000000000000000000000000
dqshi005 shift 1 34 -> 0
dqshi006 shift 1 -1 -> 0
dqshi007 shift 0 -2 -> 0
dqshi008 shift 1234567890123456789012345678901234 -1 -> 123456789012345678901234567890123
dqshi009 shift 1234567890123456789012345678901234 -33 -> 1
dqshi010 shift 1234567890123456789012345678901234 -34 -> 0
dqshi011 shift 9934567890123456789012345678901234 -33 -> 9
dqshi012 shift 9934567890123456789012345678901234 -34 -> 0
-- rhs must be an integer
dqshi015 shift 1 1.5 -> NaN Invalid_operation
dqshi016 shift 1 1.0 -> NaN Invalid_operation
dqshi017 shift 1 0.1 -> NaN Invalid_operation
dqshi018 shift 1 0.0 -> NaN Invalid_operation
dqshi019 shift 1 1E+1 -> NaN Invalid_operation
dqshi020 shift 1 1E+99 -> NaN Invalid_operation
dqshi021 shift 1 Inf -> NaN Invalid_operation
dqshi022 shift 1 -Inf -> NaN Invalid_operation
-- and |rhs| <= precision
dqshi025 shift 1 -1000 -> NaN Invalid_operation
dqshi026 shift 1 -35 -> NaN Invalid_operation
dqshi027 shift 1 35 -> NaN Invalid_operation
dqshi028 shift 1 1000 -> NaN Invalid_operation
-- full shifting pattern
dqshi030 shift 1234567890123456789012345678901234 -34 -> 0
dqshi031 shift 1234567890123456789012345678901234 -33 -> 1
dqshi032 shift 1234567890123456789012345678901234 -32 -> 12
dqshi033 shift 1234567890123456789012345678901234 -31 -> 123
dqshi034 shift 1234567890123456789012345678901234 -30 -> 1234
dqshi035 shift 1234567890123456789012345678901234 -29 -> 12345
dqshi036 shift 1234567890123456789012345678901234 -28 -> 123456
dqshi037 shift 1234567890123456789012345678901234 -27 -> 1234567
dqshi038 shift 1234567890123456789012345678901234 -26 -> 12345678
dqshi039 shift 1234567890123456789012345678901234 -25 -> 123456789
dqshi040 shift 1234567890123456789012345678901234 -24 -> 1234567890
dqshi041 shift 1234567890123456789012345678901234 -23 -> 12345678901
dqshi042 shift 1234567890123456789012345678901234 -22 -> 123456789012
dqshi043 shift 1234567890123456789012345678901234 -21 -> 1234567890123
dqshi044 shift 1234567890123456789012345678901234 -20 -> 12345678901234
dqshi045 shift 1234567890123456789012345678901234 -19 -> 123456789012345
dqshi047 shift 1234567890123456789012345678901234 -18 -> 1234567890123456
dqshi048 shift 1234567890123456789012345678901234 -17 -> 12345678901234567
dqshi049 shift 1234567890123456789012345678901234 -16 -> 123456789012345678
dqshi050 shift 1234567890123456789012345678901234 -15 -> 1234567890123456789
dqshi051 shift 1234567890123456789012345678901234 -14 -> 12345678901234567890
dqshi052 shift 1234567890123456789012345678901234 -13 -> 123456789012345678901
dqshi053 shift 1234567890123456789012345678901234 -12 -> 1234567890123456789012
dqshi054 shift 1234567890123456789012345678901234 -11 -> 12345678901234567890123
dqshi055 shift 1234567890123456789012345678901234 -10 -> 123456789012345678901234
dqshi056 shift 1234567890123456789012345678901234 -9 -> 1234567890123456789012345
dqshi057 shift 1234567890123456789012345678901234 -8 -> 12345678901234567890123456
dqshi058 shift 1234567890123456789012345678901234 -7 -> 123456789012345678901234567
dqshi059 shift 1234567890123456789012345678901234 -6 -> 1234567890123456789012345678
dqshi060 shift 1234567890123456789012345678901234 -5 -> 12345678901234567890123456789
dqshi061 shift 1234567890123456789012345678901234 -4 -> 123456789012345678901234567890
dqshi062 shift 1234567890123456789012345678901234 -3 -> 1234567890123456789012345678901
dqshi063 shift 1234567890123456789012345678901234 -2 -> 12345678901234567890123456789012
dqshi064 shift 1234567890123456789012345678901234 -1 -> 123456789012345678901234567890123
dqshi065 shift 1234567890123456789012345678901234 -0 -> 1234567890123456789012345678901234
dqshi066 shift 1234567890123456789012345678901234 +0 -> 1234567890123456789012345678901234
dqshi067 shift 1234567890123456789012345678901234 +1 -> 2345678901234567890123456789012340
dqshi068 shift 1234567890123456789012345678901234 +2 -> 3456789012345678901234567890123400
dqshi069 shift 1234567890123456789012345678901234 +3 -> 4567890123456789012345678901234000
dqshi070 shift 1234567890123456789012345678901234 +4 -> 5678901234567890123456789012340000
dqshi071 shift 1234567890123456789012345678901234 +5 -> 6789012345678901234567890123400000
dqshi072 shift 1234567890123456789012345678901234 +6 -> 7890123456789012345678901234000000
dqshi073 shift 1234567890123456789012345678901234 +7 -> 8901234567890123456789012340000000
dqshi074 shift 1234567890123456789012345678901234 +8 -> 9012345678901234567890123400000000
dqshi075 shift 1234567890123456789012345678901234 +9 -> 123456789012345678901234000000000
dqshi076 shift 1234567890123456789012345678901234 +10 -> 1234567890123456789012340000000000
dqshi077 shift 1234567890123456789012345678901234 +11 -> 2345678901234567890123400000000000
dqshi078 shift 1234567890123456789012345678901234 +12 -> 3456789012345678901234000000000000
dqshi079 shift 1234567890123456789012345678901234 +13 -> 4567890123456789012340000000000000
dqshi080 shift 1234567890123456789012345678901234 +14 -> 5678901234567890123400000000000000
dqshi081 shift 1234567890123456789012345678901234 +15 -> 6789012345678901234000000000000000
dqshi082 shift 1234567890123456789012345678901234 +16 -> 7890123456789012340000000000000000
dqshi083 shift 1234567890123456789012345678901234 +17 -> 8901234567890123400000000000000000
dqshi084 shift 1234567890123456789012345678901234 +18 -> 9012345678901234000000000000000000
dqshi085 shift 1234567890123456789012345678901234 +19 -> 123456789012340000000000000000000
dqshi086 shift 1234567890123456789012345678901234 +20 -> 1234567890123400000000000000000000
dqshi087 shift 1234567890123456789012345678901234 +21 -> 2345678901234000000000000000000000
dqshi088 shift 1234567890123456789012345678901234 +22 -> 3456789012340000000000000000000000
dqshi089 shift 1234567890123456789012345678901234 +23 -> 4567890123400000000000000000000000
dqshi090 shift 1234567890123456789012345678901234 +24 -> 5678901234000000000000000000000000
dqshi091 shift 1234567890123456789012345678901234 +25 -> 6789012340000000000000000000000000
dqshi092 shift 1234567890123456789012345678901234 +26 -> 7890123400000000000000000000000000
dqshi093 shift 1234567890123456789012345678901234 +27 -> 8901234000000000000000000000000000
dqshi094 shift 1234567890123456789012345678901234 +28 -> 9012340000000000000000000000000000
dqshi095 shift 1234567890123456789012345678901234 +29 -> 123400000000000000000000000000000
dqshi096 shift 1234567890123456789012345678901234 +30 -> 1234000000000000000000000000000000
dqshi097 shift 1234567890123456789012345678901234 +31 -> 2340000000000000000000000000000000
dqshi098 shift 1234567890123456789012345678901234 +32 -> 3400000000000000000000000000000000
dqshi099 shift 1234567890123456789012345678901234 +33 -> 4000000000000000000000000000000000
dqshi100 shift 1234567890123456789012345678901234 +34 -> 0
-- zeros
dqshi270 shift 0E-10 +29 -> 0E-10
dqshi271 shift 0E-10 -29 -> 0E-10
dqshi272 shift 0.000 +29 -> 0.000
dqshi273 shift 0.000 -29 -> 0.000
dqshi274 shift 0E+10 +29 -> 0E+10
dqshi275 shift 0E+10 -29 -> 0E+10
dqshi276 shift -0E-10 +29 -> -0E-10
dqshi277 shift -0E-10 -29 -> -0E-10
dqshi278 shift -0.000 +29 -> -0.000
dqshi279 shift -0.000 -29 -> -0.000
dqshi280 shift -0E+10 +29 -> -0E+10
dqshi281 shift -0E+10 -29 -> -0E+10
-- Nmax, Nmin, Ntiny
dqshi141 shift 9.999999999999999999999999999999999E+6144 -1 -> 9.99999999999999999999999999999999E+6143
dqshi142 shift 9.999999999999999999999999999999999E+6144 -33 -> 9E+6111
dqshi143 shift 9.999999999999999999999999999999999E+6144 1 -> 9.999999999999999999999999999999990E+6144
dqshi144 shift 9.999999999999999999999999999999999E+6144 33 -> 9.000000000000000000000000000000000E+6144
dqshi145 shift 1E-6143 -1 -> 0E-6143
dqshi146 shift 1E-6143 -33 -> 0E-6143
dqshi147 shift 1E-6143 1 -> 1.0E-6142
dqshi148 shift 1E-6143 33 -> 1.000000000000000000000000000000000E-6110
dqshi151 shift 1.000000000000000000000000000000000E-6143 -1 -> 1.00000000000000000000000000000000E-6144
dqshi152 shift 1.000000000000000000000000000000000E-6143 -33 -> 1E-6176
dqshi153 shift 1.000000000000000000000000000000000E-6143 1 -> 0E-6176
dqshi154 shift 1.000000000000000000000000000000000E-6143 33 -> 0E-6176
dqshi155 shift 9.000000000000000000000000000000000E-6143 -1 -> 9.00000000000000000000000000000000E-6144
dqshi156 shift 9.000000000000000000000000000000000E-6143 -33 -> 9E-6176
dqshi157 shift 9.000000000000000000000000000000000E-6143 1 -> 0E-6176
dqshi158 shift 9.000000000000000000000000000000000E-6143 33 -> 0E-6176
dqshi160 shift 1E-6176 -1 -> 0E-6176
dqshi161 shift 1E-6176 -33 -> 0E-6176
dqshi162 shift 1E-6176 1 -> 1.0E-6175
dqshi163 shift 1E-6176 33 -> 1.000000000000000000000000000000000E-6143
-- negatives
dqshi171 shift -9.999999999999999999999999999999999E+6144 -1 -> -9.99999999999999999999999999999999E+6143
dqshi172 shift -9.999999999999999999999999999999999E+6144 -33 -> -9E+6111
dqshi173 shift -9.999999999999999999999999999999999E+6144 1 -> -9.999999999999999999999999999999990E+6144
dqshi174 shift -9.999999999999999999999999999999999E+6144 33 -> -9.000000000000000000000000000000000E+6144
dqshi175 shift -1E-6143 -1 -> -0E-6143
dqshi176 shift -1E-6143 -33 -> -0E-6143
dqshi177 shift -1E-6143 1 -> -1.0E-6142
dqshi178 shift -1E-6143 33 -> -1.000000000000000000000000000000000E-6110
dqshi181 shift -1.000000000000000000000000000000000E-6143 -1 -> -1.00000000000000000000000000000000E-6144
dqshi182 shift -1.000000000000000000000000000000000E-6143 -33 -> -1E-6176
dqshi183 shift -1.000000000000000000000000000000000E-6143 1 -> -0E-6176
dqshi184 shift -1.000000000000000000000000000000000E-6143 33 -> -0E-6176
dqshi185 shift -9.000000000000000000000000000000000E-6143 -1 -> -9.00000000000000000000000000000000E-6144
dqshi186 shift -9.000000000000000000000000000000000E-6143 -33 -> -9E-6176
dqshi187 shift -9.000000000000000000000000000000000E-6143 1 -> -0E-6176
dqshi188 shift -9.000000000000000000000000000000000E-6143 33 -> -0E-6176
dqshi190 shift -1E-6176 -1 -> -0E-6176
dqshi191 shift -1E-6176 -33 -> -0E-6176
dqshi192 shift -1E-6176 1 -> -1.0E-6175
dqshi193 shift -1E-6176 33 -> -1.000000000000000000000000000000000E-6143
-- more negatives (of sanities)
dqshi201 shift -0 0 -> -0
dqshi202 shift -0 2 -> -0
dqshi203 shift -1 2 -> -100
dqshi204 shift -1 33 -> -1000000000000000000000000000000000
dqshi205 shift -1 34 -> -0
dqshi206 shift -1 -1 -> -0
dqshi207 shift -0 -2 -> -0
dqshi208 shift -1234567890123456789012345678901234 -1 -> -123456789012345678901234567890123
dqshi209 shift -1234567890123456789012345678901234 -33 -> -1
dqshi210 shift -1234567890123456789012345678901234 -34 -> -0
dqshi211 shift -9934567890123456789012345678901234 -33 -> -9
dqshi212 shift -9934567890123456789012345678901234 -34 -> -0
-- Specials; NaNs are handled as usual
dqshi781 shift -Inf -8 -> -Infinity
dqshi782 shift -Inf -1 -> -Infinity
dqshi783 shift -Inf -0 -> -Infinity
dqshi784 shift -Inf 0 -> -Infinity
dqshi785 shift -Inf 1 -> -Infinity
dqshi786 shift -Inf 8 -> -Infinity
dqshi787 shift -1000 -Inf -> NaN Invalid_operation
dqshi788 shift -Inf -Inf -> NaN Invalid_operation
dqshi789 shift -1 -Inf -> NaN Invalid_operation
dqshi790 shift -0 -Inf -> NaN Invalid_operation
dqshi791 shift 0 -Inf -> NaN Invalid_operation
dqshi792 shift 1 -Inf -> NaN Invalid_operation
dqshi793 shift 1000 -Inf -> NaN Invalid_operation
dqshi794 shift Inf -Inf -> NaN Invalid_operation
dqshi800 shift Inf -Inf -> NaN Invalid_operation
dqshi801 shift Inf -8 -> Infinity
dqshi802 shift Inf -1 -> Infinity
dqshi803 shift Inf -0 -> Infinity
dqshi804 shift Inf 0 -> Infinity
dqshi805 shift Inf 1 -> Infinity
dqshi806 shift Inf 8 -> Infinity
dqshi807 shift Inf Inf -> NaN Invalid_operation
dqshi808 shift -1000 Inf -> NaN Invalid_operation
dqshi809 shift -Inf Inf -> NaN Invalid_operation
dqshi810 shift -1 Inf -> NaN Invalid_operation
dqshi811 shift -0 Inf -> NaN Invalid_operation
dqshi812 shift 0 Inf -> NaN Invalid_operation
dqshi813 shift 1 Inf -> NaN Invalid_operation
dqshi814 shift 1000 Inf -> NaN Invalid_operation
dqshi815 shift Inf Inf -> NaN Invalid_operation
dqshi821 shift NaN -Inf -> NaN
dqshi822 shift NaN -1000 -> NaN
dqshi823 shift NaN -1 -> NaN
dqshi824 shift NaN -0 -> NaN
dqshi825 shift NaN 0 -> NaN
dqshi826 shift NaN 1 -> NaN
dqshi827 shift NaN 1000 -> NaN
dqshi828 shift NaN Inf -> NaN
dqshi829 shift NaN NaN -> NaN
dqshi830 shift -Inf NaN -> NaN
dqshi831 shift -1000 NaN -> NaN
dqshi832 shift -1 NaN -> NaN
dqshi833 shift -0 NaN -> NaN
dqshi834 shift 0 NaN -> NaN
dqshi835 shift 1 NaN -> NaN
dqshi836 shift 1000 NaN -> NaN
dqshi837 shift Inf NaN -> NaN
dqshi841 shift sNaN -Inf -> NaN Invalid_operation
dqshi842 shift sNaN -1000 -> NaN Invalid_operation
dqshi843 shift sNaN -1 -> NaN Invalid_operation
dqshi844 shift sNaN -0 -> NaN Invalid_operation
dqshi845 shift sNaN 0 -> NaN Invalid_operation
dqshi846 shift sNaN 1 -> NaN Invalid_operation
dqshi847 shift sNaN 1000 -> NaN Invalid_operation
dqshi848 shift sNaN NaN -> NaN Invalid_operation
dqshi849 shift sNaN sNaN -> NaN Invalid_operation
dqshi850 shift NaN sNaN -> NaN Invalid_operation
dqshi851 shift -Inf sNaN -> NaN Invalid_operation
dqshi852 shift -1000 sNaN -> NaN Invalid_operation
dqshi853 shift -1 sNaN -> NaN Invalid_operation
dqshi854 shift -0 sNaN -> NaN Invalid_operation
dqshi855 shift 0 sNaN -> NaN Invalid_operation
dqshi856 shift 1 sNaN -> NaN Invalid_operation
dqshi857 shift 1000 sNaN -> NaN Invalid_operation
dqshi858 shift Inf sNaN -> NaN Invalid_operation
dqshi859 shift NaN sNaN -> NaN Invalid_operation
-- propagating NaNs
dqshi861 shift NaN1 -Inf -> NaN1
dqshi862 shift +NaN2 -1000 -> NaN2
dqshi863 shift NaN3 1000 -> NaN3
dqshi864 shift NaN4 Inf -> NaN4
dqshi865 shift NaN5 +NaN6 -> NaN5
dqshi866 shift -Inf NaN7 -> NaN7
dqshi867 shift -1000 NaN8 -> NaN8
dqshi868 shift 1000 NaN9 -> NaN9
dqshi869 shift Inf +NaN10 -> NaN10
dqshi871 shift sNaN11 -Inf -> NaN11 Invalid_operation
dqshi872 shift sNaN12 -1000 -> NaN12 Invalid_operation
dqshi873 shift sNaN13 1000 -> NaN13 Invalid_operation
dqshi874 shift sNaN14 NaN17 -> NaN14 Invalid_operation
dqshi875 shift sNaN15 sNaN18 -> NaN15 Invalid_operation
dqshi876 shift NaN16 sNaN19 -> NaN19 Invalid_operation
dqshi877 shift -Inf +sNaN20 -> NaN20 Invalid_operation
dqshi878 shift -1000 sNaN21 -> NaN21 Invalid_operation
dqshi879 shift 1000 sNaN22 -> NaN22 Invalid_operation
dqshi880 shift Inf sNaN23 -> NaN23 Invalid_operation
dqshi881 shift +NaN25 +sNaN24 -> NaN24 Invalid_operation
dqshi882 shift -NaN26 NaN28 -> -NaN26
dqshi883 shift -sNaN27 sNaN29 -> -NaN27 Invalid_operation
dqshi884 shift 1000 -NaN30 -> -NaN30
dqshi885 shift 1000 -sNaN31 -> -NaN31 Invalid_operation
| 19,138 | 299 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/decimaltestdata/ddRemainder.decTest | ------------------------------------------------------------------------
-- ddRemainder.decTest -- decDouble remainder --
-- Copyright (c) IBM Corporation, 1981, 2008. All rights reserved. --
------------------------------------------------------------------------
-- Please see the document "General Decimal Arithmetic Testcases" --
-- at http://www2.hursley.ibm.com/decimal for the description of --
-- these testcases. --
-- --
-- These testcases are experimental ('beta' versions), and they --
-- may contain errors. They are offered on an as-is basis. In --
-- particular, achieving the same results as the tests here is not --
-- a guarantee that an implementation complies with any Standard --
-- or specification. The tests are not exhaustive. --
-- --
-- Please send comments, suggestions, and corrections to the author: --
-- Mike Cowlishaw, IBM Fellow --
-- IBM UK, PO Box 31, Birmingham Road, Warwick CV34 5JL, UK --
-- [email protected] --
------------------------------------------------------------------------
version: 2.59
precision: 16
maxExponent: 384
minExponent: -383
extended: 1
clamp: 1
rounding: half_even
-- sanity checks (as base, above)
ddrem001 remainder 1 1 -> 0
ddrem002 remainder 2 1 -> 0
ddrem003 remainder 1 2 -> 1
ddrem004 remainder 2 2 -> 0
ddrem005 remainder 0 1 -> 0
ddrem006 remainder 0 2 -> 0
ddrem007 remainder 1 3 -> 1
ddrem008 remainder 2 3 -> 2
ddrem009 remainder 3 3 -> 0
ddrem010 remainder 2.4 1 -> 0.4
ddrem011 remainder 2.4 -1 -> 0.4
ddrem012 remainder -2.4 1 -> -0.4
ddrem013 remainder -2.4 -1 -> -0.4
ddrem014 remainder 2.40 1 -> 0.40
ddrem015 remainder 2.400 1 -> 0.400
ddrem016 remainder 2.4 2 -> 0.4
ddrem017 remainder 2.400 2 -> 0.400
ddrem018 remainder 2. 2 -> 0
ddrem019 remainder 20 20 -> 0
ddrem020 remainder 187 187 -> 0
ddrem021 remainder 5 2 -> 1
ddrem022 remainder 5 2.0 -> 1.0
ddrem023 remainder 5 2.000 -> 1.000
ddrem024 remainder 5 0.200 -> 0.000
ddrem025 remainder 5 0.200 -> 0.000
ddrem030 remainder 1 2 -> 1
ddrem031 remainder 1 4 -> 1
ddrem032 remainder 1 8 -> 1
ddrem033 remainder 1 16 -> 1
ddrem034 remainder 1 32 -> 1
ddrem035 remainder 1 64 -> 1
ddrem040 remainder 1 -2 -> 1
ddrem041 remainder 1 -4 -> 1
ddrem042 remainder 1 -8 -> 1
ddrem043 remainder 1 -16 -> 1
ddrem044 remainder 1 -32 -> 1
ddrem045 remainder 1 -64 -> 1
ddrem050 remainder -1 2 -> -1
ddrem051 remainder -1 4 -> -1
ddrem052 remainder -1 8 -> -1
ddrem053 remainder -1 16 -> -1
ddrem054 remainder -1 32 -> -1
ddrem055 remainder -1 64 -> -1
ddrem060 remainder -1 -2 -> -1
ddrem061 remainder -1 -4 -> -1
ddrem062 remainder -1 -8 -> -1
ddrem063 remainder -1 -16 -> -1
ddrem064 remainder -1 -32 -> -1
ddrem065 remainder -1 -64 -> -1
ddrem066 remainder 999999999 1 -> 0
ddrem067 remainder 999999999.4 1 -> 0.4
ddrem068 remainder 999999999.5 1 -> 0.5
ddrem069 remainder 999999999.9 1 -> 0.9
ddrem070 remainder 999999999.999 1 -> 0.999
ddrem071 remainder 999999.999999 1 -> 0.999999
ddrem072 remainder 9 1 -> 0
ddrem073 remainder 9999999999999999 1 -> 0
ddrem074 remainder 9999999999999999 2 -> 1
ddrem075 remainder 9999999999999999 3 -> 0
ddrem076 remainder 9999999999999999 4 -> 3
ddrem080 remainder 0. 1 -> 0
ddrem081 remainder .0 1 -> 0.0
ddrem082 remainder 0.00 1 -> 0.00
ddrem083 remainder 0.00E+9 1 -> 0
ddrem084 remainder 0.00E+3 1 -> 0
ddrem085 remainder 0.00E+2 1 -> 0
ddrem086 remainder 0.00E+1 1 -> 0.0
ddrem087 remainder 0.00E+0 1 -> 0.00
ddrem088 remainder 0.00E-0 1 -> 0.00
ddrem089 remainder 0.00E-1 1 -> 0.000
ddrem090 remainder 0.00E-2 1 -> 0.0000
ddrem091 remainder 0.00E-3 1 -> 0.00000
ddrem092 remainder 0.00E-4 1 -> 0.000000
ddrem093 remainder 0.00E-5 1 -> 0E-7
ddrem094 remainder 0.00E-6 1 -> 0E-8
ddrem095 remainder 0.0000E-50 1 -> 0E-54
-- Various flavours of remainder by 0
ddrem101 remainder 0 0 -> NaN Division_undefined
ddrem102 remainder 0 -0 -> NaN Division_undefined
ddrem103 remainder -0 0 -> NaN Division_undefined
ddrem104 remainder -0 -0 -> NaN Division_undefined
ddrem105 remainder 0.0E5 0 -> NaN Division_undefined
ddrem106 remainder 0.000 0 -> NaN Division_undefined
-- [Some think this next group should be Division_by_zero exception, but
-- IEEE 854 is explicit that it is Invalid operation .. for
-- remainder-near, anyway]
ddrem107 remainder 0.0001 0 -> NaN Invalid_operation
ddrem108 remainder 0.01 0 -> NaN Invalid_operation
ddrem109 remainder 0.1 0 -> NaN Invalid_operation
ddrem110 remainder 1 0 -> NaN Invalid_operation
ddrem111 remainder 1 0.0 -> NaN Invalid_operation
ddrem112 remainder 10 0.0 -> NaN Invalid_operation
ddrem113 remainder 1E+100 0.0 -> NaN Invalid_operation
ddrem114 remainder 1E+380 0 -> NaN Invalid_operation
ddrem115 remainder 0.0001 -0 -> NaN Invalid_operation
ddrem116 remainder 0.01 -0 -> NaN Invalid_operation
ddrem119 remainder 0.1 -0 -> NaN Invalid_operation
ddrem120 remainder 1 -0 -> NaN Invalid_operation
ddrem121 remainder 1 -0.0 -> NaN Invalid_operation
ddrem122 remainder 10 -0.0 -> NaN Invalid_operation
ddrem123 remainder 1E+100 -0.0 -> NaN Invalid_operation
ddrem124 remainder 1E+384 -0 -> NaN Invalid_operation
-- and zeros on left
ddrem130 remainder 0 1 -> 0
ddrem131 remainder 0 -1 -> 0
ddrem132 remainder 0.0 1 -> 0.0
ddrem133 remainder 0.0 -1 -> 0.0
ddrem134 remainder -0 1 -> -0
ddrem135 remainder -0 -1 -> -0
ddrem136 remainder -0.0 1 -> -0.0
ddrem137 remainder -0.0 -1 -> -0.0
-- 0.5ers
ddrem143 remainder 0.5 2 -> 0.5
ddrem144 remainder 0.5 2.1 -> 0.5
ddrem145 remainder 0.5 2.01 -> 0.50
ddrem146 remainder 0.5 2.001 -> 0.500
ddrem147 remainder 0.50 2 -> 0.50
ddrem148 remainder 0.50 2.01 -> 0.50
ddrem149 remainder 0.50 2.001 -> 0.500
-- steadies
ddrem150 remainder 1 1 -> 0
ddrem151 remainder 1 2 -> 1
ddrem152 remainder 1 3 -> 1
ddrem153 remainder 1 4 -> 1
ddrem154 remainder 1 5 -> 1
ddrem155 remainder 1 6 -> 1
ddrem156 remainder 1 7 -> 1
ddrem157 remainder 1 8 -> 1
ddrem158 remainder 1 9 -> 1
ddrem159 remainder 1 10 -> 1
ddrem160 remainder 1 1 -> 0
ddrem161 remainder 2 1 -> 0
ddrem162 remainder 3 1 -> 0
ddrem163 remainder 4 1 -> 0
ddrem164 remainder 5 1 -> 0
ddrem165 remainder 6 1 -> 0
ddrem166 remainder 7 1 -> 0
ddrem167 remainder 8 1 -> 0
ddrem168 remainder 9 1 -> 0
ddrem169 remainder 10 1 -> 0
-- some differences from remainderNear
ddrem171 remainder 0.4 1.020 -> 0.400
ddrem172 remainder 0.50 1.020 -> 0.500
ddrem173 remainder 0.51 1.020 -> 0.510
ddrem174 remainder 0.52 1.020 -> 0.520
ddrem175 remainder 0.6 1.020 -> 0.600
-- More flavours of remainder by 0
ddrem201 remainder 0 0 -> NaN Division_undefined
ddrem202 remainder 0.0E5 0 -> NaN Division_undefined
ddrem203 remainder 0.000 0 -> NaN Division_undefined
ddrem204 remainder 0.0001 0 -> NaN Invalid_operation
ddrem205 remainder 0.01 0 -> NaN Invalid_operation
ddrem206 remainder 0.1 0 -> NaN Invalid_operation
ddrem207 remainder 1 0 -> NaN Invalid_operation
ddrem208 remainder 1 0.0 -> NaN Invalid_operation
ddrem209 remainder 10 0.0 -> NaN Invalid_operation
ddrem210 remainder 1E+100 0.0 -> NaN Invalid_operation
ddrem211 remainder 1E+380 0 -> NaN Invalid_operation
-- some differences from remainderNear
ddrem231 remainder -0.4 1.020 -> -0.400
ddrem232 remainder -0.50 1.020 -> -0.500
ddrem233 remainder -0.51 1.020 -> -0.510
ddrem234 remainder -0.52 1.020 -> -0.520
ddrem235 remainder -0.6 1.020 -> -0.600
-- high Xs
ddrem240 remainder 1E+2 1.00 -> 0.00
-- ddrem3xx are from DiagBigDecimal
ddrem301 remainder 1 3 -> 1
ddrem302 remainder 5 5 -> 0
ddrem303 remainder 13 10 -> 3
ddrem304 remainder 13 50 -> 13
ddrem305 remainder 13 100 -> 13
ddrem306 remainder 13 1000 -> 13
ddrem307 remainder .13 1 -> 0.13
ddrem308 remainder 0.133 1 -> 0.133
ddrem309 remainder 0.1033 1 -> 0.1033
ddrem310 remainder 1.033 1 -> 0.033
ddrem311 remainder 10.33 1 -> 0.33
ddrem312 remainder 10.33 10 -> 0.33
ddrem313 remainder 103.3 1 -> 0.3
ddrem314 remainder 133 10 -> 3
ddrem315 remainder 1033 10 -> 3
ddrem316 remainder 1033 50 -> 33
ddrem317 remainder 101.0 3 -> 2.0
ddrem318 remainder 102.0 3 -> 0.0
ddrem319 remainder 103.0 3 -> 1.0
ddrem320 remainder 2.40 1 -> 0.40
ddrem321 remainder 2.400 1 -> 0.400
ddrem322 remainder 2.4 1 -> 0.4
ddrem323 remainder 2.4 2 -> 0.4
ddrem324 remainder 2.400 2 -> 0.400
ddrem325 remainder 1 0.3 -> 0.1
ddrem326 remainder 1 0.30 -> 0.10
ddrem327 remainder 1 0.300 -> 0.100
ddrem328 remainder 1 0.3000 -> 0.1000
ddrem329 remainder 1.0 0.3 -> 0.1
ddrem330 remainder 1.00 0.3 -> 0.10
ddrem331 remainder 1.000 0.3 -> 0.100
ddrem332 remainder 1.0000 0.3 -> 0.1000
ddrem333 remainder 0.5 2 -> 0.5
ddrem334 remainder 0.5 2.1 -> 0.5
ddrem335 remainder 0.5 2.01 -> 0.50
ddrem336 remainder 0.5 2.001 -> 0.500
ddrem337 remainder 0.50 2 -> 0.50
ddrem338 remainder 0.50 2.01 -> 0.50
ddrem339 remainder 0.50 2.001 -> 0.500
ddrem340 remainder 0.5 0.5000001 -> 0.5000000
ddrem341 remainder 0.5 0.50000001 -> 0.50000000
ddrem342 remainder 0.5 0.500000001 -> 0.500000000
ddrem343 remainder 0.5 0.5000000001 -> 0.5000000000
ddrem344 remainder 0.5 0.50000000001 -> 0.50000000000
ddrem345 remainder 0.5 0.4999999 -> 1E-7
ddrem346 remainder 0.5 0.49999999 -> 1E-8
ddrem347 remainder 0.5 0.499999999 -> 1E-9
ddrem348 remainder 0.5 0.4999999999 -> 1E-10
ddrem349 remainder 0.5 0.49999999999 -> 1E-11
ddrem350 remainder 0.5 0.499999999999 -> 1E-12
ddrem351 remainder 0.03 7 -> 0.03
ddrem352 remainder 5 2 -> 1
ddrem353 remainder 4.1 2 -> 0.1
ddrem354 remainder 4.01 2 -> 0.01
ddrem355 remainder 4.001 2 -> 0.001
ddrem356 remainder 4.0001 2 -> 0.0001
ddrem357 remainder 4.00001 2 -> 0.00001
ddrem358 remainder 4.000001 2 -> 0.000001
ddrem359 remainder 4.0000001 2 -> 1E-7
ddrem360 remainder 1.2 0.7345 -> 0.4655
ddrem361 remainder 0.8 12 -> 0.8
ddrem362 remainder 0.8 0.2 -> 0.0
ddrem363 remainder 0.8 0.3 -> 0.2
ddrem364 remainder 0.800 12 -> 0.800
ddrem365 remainder 0.800 1.7 -> 0.800
ddrem366 remainder 2.400 2 -> 0.400
ddrem371 remainder 2.400 2 -> 0.400
ddrem381 remainder 12345 1 -> 0
ddrem382 remainder 12345 1.0001 -> 0.7657
ddrem383 remainder 12345 1.001 -> 0.668
ddrem384 remainder 12345 1.01 -> 0.78
ddrem385 remainder 12345 1.1 -> 0.8
ddrem386 remainder 12355 4 -> 3
ddrem387 remainder 12345 4 -> 1
ddrem388 remainder 12355 4.0001 -> 2.6912
ddrem389 remainder 12345 4.0001 -> 0.6914
ddrem390 remainder 12345 4.9 -> 1.9
ddrem391 remainder 12345 4.99 -> 4.73
ddrem392 remainder 12345 4.999 -> 2.469
ddrem393 remainder 12345 4.9999 -> 0.2469
ddrem394 remainder 12345 5 -> 0
ddrem395 remainder 12345 5.0001 -> 4.7532
ddrem396 remainder 12345 5.001 -> 2.532
ddrem397 remainder 12345 5.01 -> 0.36
ddrem398 remainder 12345 5.1 -> 3.0
-- the nasty division-by-1 cases
ddrem401 remainder 0.5 1 -> 0.5
ddrem402 remainder 0.55 1 -> 0.55
ddrem403 remainder 0.555 1 -> 0.555
ddrem404 remainder 0.5555 1 -> 0.5555
ddrem405 remainder 0.55555 1 -> 0.55555
ddrem406 remainder 0.555555 1 -> 0.555555
ddrem407 remainder 0.5555555 1 -> 0.5555555
ddrem408 remainder 0.55555555 1 -> 0.55555555
ddrem409 remainder 0.555555555 1 -> 0.555555555
-- folddowns
ddrem421 remainder 1E+384 1 -> NaN Division_impossible
ddrem422 remainder 1E+384 1E+383 -> 0E+369 Clamped
ddrem423 remainder 1E+384 2E+383 -> 0E+369 Clamped
ddrem424 remainder 1E+384 3E+383 -> 1.00000000000000E+383 Clamped
ddrem425 remainder 1E+384 4E+383 -> 2.00000000000000E+383 Clamped
ddrem426 remainder 1E+384 5E+383 -> 0E+369 Clamped
ddrem427 remainder 1E+384 6E+383 -> 4.00000000000000E+383 Clamped
ddrem428 remainder 1E+384 7E+383 -> 3.00000000000000E+383 Clamped
ddrem429 remainder 1E+384 8E+383 -> 2.00000000000000E+383 Clamped
ddrem430 remainder 1E+384 9E+383 -> 1.00000000000000E+383 Clamped
-- tinies
ddrem431 remainder 1E-397 1E-398 -> 0E-398
ddrem432 remainder 1E-397 2E-398 -> 0E-398
ddrem433 remainder 1E-397 3E-398 -> 1E-398 Subnormal
ddrem434 remainder 1E-397 4E-398 -> 2E-398 Subnormal
ddrem435 remainder 1E-397 5E-398 -> 0E-398
ddrem436 remainder 1E-397 6E-398 -> 4E-398 Subnormal
ddrem437 remainder 1E-397 7E-398 -> 3E-398 Subnormal
ddrem438 remainder 1E-397 8E-398 -> 2E-398 Subnormal
ddrem439 remainder 1E-397 9E-398 -> 1E-398 Subnormal
ddrem440 remainder 1E-397 10E-398 -> 0E-398
ddrem441 remainder 1E-397 11E-398 -> 1.0E-397 Subnormal
ddrem442 remainder 100E-397 11E-398 -> 1.0E-397 Subnormal
ddrem443 remainder 100E-397 20E-398 -> 0E-398
ddrem444 remainder 100E-397 21E-398 -> 1.3E-397 Subnormal
ddrem445 remainder 100E-397 30E-398 -> 1.0E-397 Subnormal
-- zero signs
ddrem650 remainder 1 1 -> 0
ddrem651 remainder -1 1 -> -0
ddrem652 remainder 1 -1 -> 0
ddrem653 remainder -1 -1 -> -0
ddrem654 remainder 0 1 -> 0
ddrem655 remainder -0 1 -> -0
ddrem656 remainder 0 -1 -> 0
ddrem657 remainder -0 -1 -> -0
ddrem658 remainder 0.00 1 -> 0.00
ddrem659 remainder -0.00 1 -> -0.00
-- Specials
ddrem680 remainder Inf -Inf -> NaN Invalid_operation
ddrem681 remainder Inf -1000 -> NaN Invalid_operation
ddrem682 remainder Inf -1 -> NaN Invalid_operation
ddrem683 remainder Inf 0 -> NaN Invalid_operation
ddrem684 remainder Inf -0 -> NaN Invalid_operation
ddrem685 remainder Inf 1 -> NaN Invalid_operation
ddrem686 remainder Inf 1000 -> NaN Invalid_operation
ddrem687 remainder Inf Inf -> NaN Invalid_operation
ddrem688 remainder -1000 Inf -> -1000
ddrem689 remainder -Inf Inf -> NaN Invalid_operation
ddrem691 remainder -1 Inf -> -1
ddrem692 remainder 0 Inf -> 0
ddrem693 remainder -0 Inf -> -0
ddrem694 remainder 1 Inf -> 1
ddrem695 remainder 1000 Inf -> 1000
ddrem696 remainder Inf Inf -> NaN Invalid_operation
ddrem700 remainder -Inf -Inf -> NaN Invalid_operation
ddrem701 remainder -Inf -1000 -> NaN Invalid_operation
ddrem702 remainder -Inf -1 -> NaN Invalid_operation
ddrem703 remainder -Inf -0 -> NaN Invalid_operation
ddrem704 remainder -Inf 0 -> NaN Invalid_operation
ddrem705 remainder -Inf 1 -> NaN Invalid_operation
ddrem706 remainder -Inf 1000 -> NaN Invalid_operation
ddrem707 remainder -Inf Inf -> NaN Invalid_operation
ddrem708 remainder -Inf -Inf -> NaN Invalid_operation
ddrem709 remainder -1000 Inf -> -1000
ddrem710 remainder -1 -Inf -> -1
ddrem711 remainder -0 -Inf -> -0
ddrem712 remainder 0 -Inf -> 0
ddrem713 remainder 1 -Inf -> 1
ddrem714 remainder 1000 -Inf -> 1000
ddrem715 remainder Inf -Inf -> NaN Invalid_operation
ddrem721 remainder NaN -Inf -> NaN
ddrem722 remainder NaN -1000 -> NaN
ddrem723 remainder NaN -1 -> NaN
ddrem724 remainder NaN -0 -> NaN
ddrem725 remainder -NaN 0 -> -NaN
ddrem726 remainder NaN 1 -> NaN
ddrem727 remainder NaN 1000 -> NaN
ddrem728 remainder NaN Inf -> NaN
ddrem729 remainder NaN -NaN -> NaN
ddrem730 remainder -Inf NaN -> NaN
ddrem731 remainder -1000 NaN -> NaN
ddrem732 remainder -1 NaN -> NaN
ddrem733 remainder -0 -NaN -> -NaN
ddrem734 remainder 0 NaN -> NaN
ddrem735 remainder 1 -NaN -> -NaN
ddrem736 remainder 1000 NaN -> NaN
ddrem737 remainder Inf NaN -> NaN
ddrem741 remainder sNaN -Inf -> NaN Invalid_operation
ddrem742 remainder sNaN -1000 -> NaN Invalid_operation
ddrem743 remainder -sNaN -1 -> -NaN Invalid_operation
ddrem744 remainder sNaN -0 -> NaN Invalid_operation
ddrem745 remainder sNaN 0 -> NaN Invalid_operation
ddrem746 remainder sNaN 1 -> NaN Invalid_operation
ddrem747 remainder sNaN 1000 -> NaN Invalid_operation
ddrem749 remainder sNaN NaN -> NaN Invalid_operation
ddrem750 remainder sNaN sNaN -> NaN Invalid_operation
ddrem751 remainder NaN sNaN -> NaN Invalid_operation
ddrem752 remainder -Inf sNaN -> NaN Invalid_operation
ddrem753 remainder -1000 sNaN -> NaN Invalid_operation
ddrem754 remainder -1 sNaN -> NaN Invalid_operation
ddrem755 remainder -0 sNaN -> NaN Invalid_operation
ddrem756 remainder 0 sNaN -> NaN Invalid_operation
ddrem757 remainder 1 sNaN -> NaN Invalid_operation
ddrem758 remainder 1000 sNaN -> NaN Invalid_operation
ddrem759 remainder Inf -sNaN -> -NaN Invalid_operation
-- propaging NaNs
ddrem760 remainder NaN1 NaN7 -> NaN1
ddrem761 remainder sNaN2 NaN8 -> NaN2 Invalid_operation
ddrem762 remainder NaN3 sNaN9 -> NaN9 Invalid_operation
ddrem763 remainder sNaN4 sNaN10 -> NaN4 Invalid_operation
ddrem764 remainder 15 NaN11 -> NaN11
ddrem765 remainder NaN6 NaN12 -> NaN6
ddrem766 remainder Inf NaN13 -> NaN13
ddrem767 remainder NaN14 -Inf -> NaN14
ddrem768 remainder 0 NaN15 -> NaN15
ddrem769 remainder NaN16 -0 -> NaN16
-- edge cases of impossible
ddrem770 remainder 1234567890123456 10 -> 6
ddrem771 remainder 1234567890123456 1 -> 0
ddrem772 remainder 1234567890123456 0.1 -> NaN Division_impossible
ddrem773 remainder 1234567890123456 0.01 -> NaN Division_impossible
-- long operand checks
ddrem801 remainder 12345678000 100 -> 0
ddrem802 remainder 1 12345678000 -> 1
ddrem803 remainder 1234567800 10 -> 0
ddrem804 remainder 1 1234567800 -> 1
ddrem805 remainder 1234567890 10 -> 0
ddrem806 remainder 1 1234567890 -> 1
ddrem807 remainder 1234567891 10 -> 1
ddrem808 remainder 1 1234567891 -> 1
ddrem809 remainder 12345678901 100 -> 1
ddrem810 remainder 1 12345678901 -> 1
ddrem811 remainder 1234567896 10 -> 6
ddrem812 remainder 1 1234567896 -> 1
ddrem821 remainder 12345678000 100 -> 0
ddrem822 remainder 1 12345678000 -> 1
ddrem823 remainder 1234567800 10 -> 0
ddrem824 remainder 1 1234567800 -> 1
ddrem825 remainder 1234567890 10 -> 0
ddrem826 remainder 1 1234567890 -> 1
ddrem827 remainder 1234567891 10 -> 1
ddrem828 remainder 1 1234567891 -> 1
ddrem829 remainder 12345678901 100 -> 1
ddrem830 remainder 1 12345678901 -> 1
ddrem831 remainder 1234567896 10 -> 6
ddrem832 remainder 1 1234567896 -> 1
-- from divideint
ddrem840 remainder 100000000.0 1 -> 0.0
ddrem841 remainder 100000000.4 1 -> 0.4
ddrem842 remainder 100000000.5 1 -> 0.5
ddrem843 remainder 100000000.9 1 -> 0.9
ddrem844 remainder 100000000.999 1 -> 0.999
ddrem850 remainder 100000003 5 -> 3
ddrem851 remainder 10000003 5 -> 3
ddrem852 remainder 1000003 5 -> 3
ddrem853 remainder 100003 5 -> 3
ddrem854 remainder 10003 5 -> 3
ddrem855 remainder 1003 5 -> 3
ddrem856 remainder 103 5 -> 3
ddrem857 remainder 13 5 -> 3
ddrem858 remainder 1 5 -> 1
-- Vladimir's cases 1234567890123456
ddrem860 remainder 123.0e1 1000000000000000 -> 1230
ddrem861 remainder 1230 1000000000000000 -> 1230
ddrem862 remainder 12.3e2 1000000000000000 -> 1230
ddrem863 remainder 1.23e3 1000000000000000 -> 1230
ddrem864 remainder 123e1 1000000000000000 -> 1230
ddrem870 remainder 123e1 1000000000000000 -> 1230
ddrem871 remainder 123e1 100000000000000 -> 1230
ddrem872 remainder 123e1 10000000000000 -> 1230
ddrem873 remainder 123e1 1000000000000 -> 1230
ddrem874 remainder 123e1 100000000000 -> 1230
ddrem875 remainder 123e1 10000000000 -> 1230
ddrem876 remainder 123e1 1000000000 -> 1230
ddrem877 remainder 123e1 100000000 -> 1230
ddrem878 remainder 1230 100000000 -> 1230
ddrem879 remainder 123e1 10000000 -> 1230
ddrem880 remainder 123e1 1000000 -> 1230
ddrem881 remainder 123e1 100000 -> 1230
ddrem882 remainder 123e1 10000 -> 1230
ddrem883 remainder 123e1 1000 -> 230
ddrem884 remainder 123e1 100 -> 30
ddrem885 remainder 123e1 10 -> 0
ddrem886 remainder 123e1 1 -> 0
ddrem890 remainder 123e1 2000000000000000 -> 1230
ddrem891 remainder 123e1 200000000000000 -> 1230
ddrem892 remainder 123e1 20000000000000 -> 1230
ddrem893 remainder 123e1 2000000000000 -> 1230
ddrem894 remainder 123e1 200000000000 -> 1230
ddrem895 remainder 123e1 20000000000 -> 1230
ddrem896 remainder 123e1 2000000000 -> 1230
ddrem897 remainder 123e1 200000000 -> 1230
ddrem899 remainder 123e1 20000000 -> 1230
ddrem900 remainder 123e1 2000000 -> 1230
ddrem901 remainder 123e1 200000 -> 1230
ddrem902 remainder 123e1 20000 -> 1230
ddrem903 remainder 123e1 2000 -> 1230
ddrem904 remainder 123e1 200 -> 30
ddrem905 remainder 123e1 20 -> 10
ddrem906 remainder 123e1 2 -> 0
ddrem910 remainder 123e1 5000000000000000 -> 1230
ddrem911 remainder 123e1 500000000000000 -> 1230
ddrem912 remainder 123e1 50000000000000 -> 1230
ddrem913 remainder 123e1 5000000000000 -> 1230
ddrem914 remainder 123e1 500000000000 -> 1230
ddrem915 remainder 123e1 50000000000 -> 1230
ddrem916 remainder 123e1 5000000000 -> 1230
ddrem917 remainder 123e1 500000000 -> 1230
ddrem919 remainder 123e1 50000000 -> 1230
ddrem920 remainder 123e1 5000000 -> 1230
ddrem921 remainder 123e1 500000 -> 1230
ddrem922 remainder 123e1 50000 -> 1230
ddrem923 remainder 123e1 5000 -> 1230
ddrem924 remainder 123e1 500 -> 230
ddrem925 remainder 123e1 50 -> 30
ddrem926 remainder 123e1 5 -> 0
ddrem930 remainder 123e1 9000000000000000 -> 1230
ddrem931 remainder 123e1 900000000000000 -> 1230
ddrem932 remainder 123e1 90000000000000 -> 1230
ddrem933 remainder 123e1 9000000000000 -> 1230
ddrem934 remainder 123e1 900000000000 -> 1230
ddrem935 remainder 123e1 90000000000 -> 1230
ddrem936 remainder 123e1 9000000000 -> 1230
ddrem937 remainder 123e1 900000000 -> 1230
ddrem939 remainder 123e1 90000000 -> 1230
ddrem940 remainder 123e1 9000000 -> 1230
ddrem941 remainder 123e1 900000 -> 1230
ddrem942 remainder 123e1 90000 -> 1230
ddrem943 remainder 123e1 9000 -> 1230
ddrem944 remainder 123e1 900 -> 330
ddrem945 remainder 123e1 90 -> 60
ddrem946 remainder 123e1 9 -> 6
ddrem950 remainder 123e1 1000000000000000 -> 1230
ddrem961 remainder 123e1 2999999999999999 -> 1230
ddrem962 remainder 123e1 3999999999999999 -> 1230
ddrem963 remainder 123e1 4999999999999999 -> 1230
ddrem964 remainder 123e1 5999999999999999 -> 1230
ddrem965 remainder 123e1 6999999999999999 -> 1230
ddrem966 remainder 123e1 7999999999999999 -> 1230
ddrem967 remainder 123e1 8999999999999999 -> 1230
ddrem968 remainder 123e1 9999999999999999 -> 1230
ddrem969 remainder 123e1 9876543210987654 -> 1230
ddrem980 remainder 123e1 1000E299 -> 1.23E+3 -- 123E+1 internally
-- overflow and underflow tests [from divide]
ddrem1051 remainder 1e+277 1e-311 -> NaN Division_impossible
ddrem1052 remainder 1e+277 -1e-311 -> NaN Division_impossible
ddrem1053 remainder -1e+277 1e-311 -> NaN Division_impossible
ddrem1054 remainder -1e+277 -1e-311 -> NaN Division_impossible
ddrem1055 remainder 1e-277 1e+311 -> 1E-277
ddrem1056 remainder 1e-277 -1e+311 -> 1E-277
ddrem1057 remainder -1e-277 1e+311 -> -1E-277
ddrem1058 remainder -1e-277 -1e+311 -> -1E-277
-- destructive subtract
ddrem1101 remainder 1234567890123456 1.000000000000001 -> 0.765432109876546
ddrem1102 remainder 1234567890123456 1.00000000000001 -> 0.65432109876557
ddrem1103 remainder 1234567890123456 1.0000000000001 -> 0.5432109876668
ddrem1104 remainder 1234567890123455 4.000000000000001 -> 2.691358027469137
ddrem1105 remainder 1234567890123456 4.000000000000001 -> 3.691358027469137
ddrem1106 remainder 1234567890123456 4.9999999999999 -> 0.6913578024696
ddrem1107 remainder 1234567890123456 4.99999999999999 -> 3.46913578024691
ddrem1108 remainder 1234567890123456 4.999999999999999 -> 1.246913578024691
ddrem1109 remainder 1234567890123456 5.000000000000001 -> 0.753086421975309
ddrem1110 remainder 1234567890123456 5.00000000000001 -> 3.53086421975310
ddrem1111 remainder 1234567890123456 5.0000000000001 -> 1.3086421975314
-- Null tests
ddrem1000 remainder 10 # -> NaN Invalid_operation
ddrem1001 remainder # 10 -> NaN Invalid_operation
| 26,387 | 601 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/libregrtest/save_env.py | import builtins
import locale
import logging
import os
import shutil
import sys
import sysconfig
import warnings
from test import support
try:
import _thread
import threading
except ImportError:
threading = None
try:
import _multiprocessing, multiprocessing.process
except ImportError:
multiprocessing = None
# Unit tests are supposed to leave the execution environment unchanged
# once they complete. But sometimes tests have bugs, especially when
# tests fail, and the changes to environment go on to mess up other
# tests. This can cause issues with buildbot stability, since tests
# are run in random order and so problems may appear to come and go.
# There are a few things we can save and restore to mitigate this, and
# the following context manager handles this task.
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
with saved_test_environment(testname, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
the saved items was changed by the test. The attribute 'changed'
is initially False, but is set to True if a change is detected.
If verbose is more than 1, the before and after state of changed
items is also printed.
"""
changed = False
def __init__(self, testname, verbose=0, quiet=False, *, pgo=False):
self.testname = testname
self.verbose = verbose
self.quiet = quiet
self.pgo = pgo
# To add things to save and restore, add a name XXX to the resources list
# and add corresponding get_XXX/restore_XXX functions. get_XXX should
# return the value to be saved and compared against a second call to the
# get function when test execution completes. restore_XXX should accept
# the saved value and restore the resource using it. It will be called if
# and only if a change in the value is detected.
#
# Note: XXX will have any '.' replaced with '_' characters when determining
# the corresponding method names.
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
'os.environ', 'sys.path', 'sys.path_hooks', '__import__',
'warnings.filters', 'asyncore.socket_map',
'logging._handlers', 'logging._handlerList', 'sys.gettrace',
'sys.warnoptions',
# multiprocessing.process._cleanup() may release ref
# to a thread, so check processes first.
'multiprocessing.process._dangling', 'threading._dangling',
'sysconfig._CONFIG_VARS', 'sysconfig._INSTALL_SCHEMES',
'files', 'locale', 'warnings.showwarning',
'shutil_archive_formats', 'shutil_unpack_formats',
)
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
def restore_sys_argv(self, saved_argv):
sys.argv = saved_argv[1]
sys.argv[:] = saved_argv[2]
def get_cwd(self):
return os.getcwd()
def restore_cwd(self, saved_cwd):
os.chdir(saved_cwd)
def get_sys_stdout(self):
return sys.stdout
def restore_sys_stdout(self, saved_stdout):
sys.stdout = saved_stdout
def get_sys_stderr(self):
return sys.stderr
def restore_sys_stderr(self, saved_stderr):
sys.stderr = saved_stderr
def get_sys_stdin(self):
return sys.stdin
def restore_sys_stdin(self, saved_stdin):
sys.stdin = saved_stdin
def get_os_environ(self):
return id(os.environ), os.environ, dict(os.environ)
def restore_os_environ(self, saved_environ):
os.environ = saved_environ[1]
os.environ.clear()
os.environ.update(saved_environ[2])
def get_sys_path(self):
return id(sys.path), sys.path, sys.path[:]
def restore_sys_path(self, saved_path):
sys.path = saved_path[1]
sys.path[:] = saved_path[2]
def get_sys_path_hooks(self):
return id(sys.path_hooks), sys.path_hooks, sys.path_hooks[:]
def restore_sys_path_hooks(self, saved_hooks):
sys.path_hooks = saved_hooks[1]
sys.path_hooks[:] = saved_hooks[2]
def get_sys_gettrace(self):
return sys.gettrace()
def restore_sys_gettrace(self, trace_fxn):
sys.settrace(trace_fxn)
def get___import__(self):
return builtins.__import__
def restore___import__(self, import_):
builtins.__import__ = import_
def get_warnings_filters(self):
return id(warnings.filters), warnings.filters, warnings.filters[:]
def restore_warnings_filters(self, saved_filters):
warnings.filters = saved_filters[1]
warnings.filters[:] = saved_filters[2]
def get_asyncore_socket_map(self):
asyncore = sys.modules.get('asyncore')
# XXX Making a copy keeps objects alive until __exit__ gets called.
return asyncore and asyncore.socket_map.copy() or {}
def restore_asyncore_socket_map(self, saved_map):
asyncore = sys.modules.get('asyncore')
if asyncore is not None:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_shutil_archive_formats(self):
# we could call get_archives_formats() but that only returns the
# registry keys; we want to check the values too (the functions that
# are registered)
return shutil._ARCHIVE_FORMATS, shutil._ARCHIVE_FORMATS.copy()
def restore_shutil_archive_formats(self, saved):
shutil._ARCHIVE_FORMATS = saved[0]
shutil._ARCHIVE_FORMATS.clear()
shutil._ARCHIVE_FORMATS.update(saved[1])
def get_shutil_unpack_formats(self):
return shutil._UNPACK_FORMATS, shutil._UNPACK_FORMATS.copy()
def restore_shutil_unpack_formats(self, saved):
shutil._UNPACK_FORMATS = saved[0]
shutil._UNPACK_FORMATS.clear()
shutil._UNPACK_FORMATS.update(saved[1])
def get_logging__handlers(self):
# _handlers is a WeakValueDictionary
return id(logging._handlers), logging._handlers, logging._handlers.copy()
def restore_logging__handlers(self, saved_handlers):
# Can't easily revert the logging state
pass
def get_logging__handlerList(self):
# _handlerList is a list of weakrefs to handlers
return id(logging._handlerList), logging._handlerList, logging._handlerList[:]
def restore_logging__handlerList(self, saved_handlerList):
# Can't easily revert the logging state
pass
def get_sys_warnoptions(self):
return id(sys.warnoptions), sys.warnoptions, sys.warnoptions[:]
def restore_sys_warnoptions(self, saved_options):
sys.warnoptions = saved_options[1]
sys.warnoptions[:] = saved_options[2]
# Controlling dangling references to Thread objects can make it easier
# to track reference leaks.
def get_threading__dangling(self):
if not threading:
return None
# This copies the weakrefs without making any strong reference
return threading._dangling.copy()
def restore_threading__dangling(self, saved):
if not threading:
return
threading._dangling.clear()
threading._dangling.update(saved)
# Same for Process objects
def get_multiprocessing_process__dangling(self):
if not multiprocessing:
return None
# Unjoined process objects can survive after process exits
multiprocessing.process._cleanup()
# This copies the weakrefs without making any strong reference
return multiprocessing.process._dangling.copy()
def restore_multiprocessing_process__dangling(self, saved):
if not multiprocessing:
return
multiprocessing.process._dangling.clear()
multiprocessing.process._dangling.update(saved)
def get_sysconfig__CONFIG_VARS(self):
# make sure the dict is initialized
sysconfig.get_config_var('prefix')
return (id(sysconfig._CONFIG_VARS), sysconfig._CONFIG_VARS,
dict(sysconfig._CONFIG_VARS))
def restore_sysconfig__CONFIG_VARS(self, saved):
sysconfig._CONFIG_VARS = saved[1]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(saved[2])
def get_sysconfig__INSTALL_SCHEMES(self):
return (id(sysconfig._INSTALL_SCHEMES), sysconfig._INSTALL_SCHEMES,
sysconfig._INSTALL_SCHEMES.copy())
def restore_sysconfig__INSTALL_SCHEMES(self, saved):
sysconfig._INSTALL_SCHEMES = saved[1]
sysconfig._INSTALL_SCHEMES.clear()
sysconfig._INSTALL_SCHEMES.update(saved[2])
def get_files(self):
return sorted(fn + ('/' if os.path.isdir(fn) else '')
for fn in os.listdir())
def restore_files(self, saved_value):
fn = support.TESTFN
if fn not in saved_value and (fn + '/') not in saved_value:
if os.path.isfile(fn):
support.unlink(fn)
elif os.path.isdir(fn):
support.rmtree(fn)
_lc = [getattr(locale, lc) for lc in dir(locale)
if lc.startswith('LC_')]
def get_locale(self):
pairings = []
for lc in self._lc:
try:
pairings.append((lc, locale.setlocale(lc, None)))
except (TypeError, ValueError):
continue
return pairings
def restore_locale(self, saved):
for lc, setting in saved:
locale.setlocale(lc, setting)
def get_warnings_showwarning(self):
return warnings.showwarning
def restore_warnings_showwarning(self, fxn):
warnings.showwarning = fxn
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
get_name = 'get_' + method_suffix
restore_name = 'restore_' + method_suffix
yield name, getattr(self, get_name), getattr(self, restore_name)
def __enter__(self):
self.saved_values = dict((name, get()) for name, get, restore
in self.resource_info())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
# Some resources use weak references
support.gc_collect()
# Read support.environment_altered, set by support helper functions
self.changed |= support.environment_altered
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
# Check for changes to the resource's value
if current != original:
self.changed = True
restore(original)
if not self.quiet and not self.pgo:
print(f"Warning -- {name} was modified by {self.testname}",
file=sys.stderr, flush=True)
print(f" Before: {original}\n After: {current} ",
file=sys.stderr, flush=True)
return False
| 11,186 | 292 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/libregrtest/setup.py | import atexit
import faulthandler
import os
import signal
import sys
import unittest
from test import support
try:
import gc
except ImportError:
gc = None
from test.libregrtest.refleak import warm_caches
def setup_tests(ns):
try:
stderr_fd = sys.__stderr__.fileno()
except (ValueError, AttributeError):
# Catch ValueError to catch io.UnsupportedOperation on TextIOBase
# and ValueError on a closed stream.
#
# Catch AttributeError for stderr being None.
stderr_fd = None
else:
# Display the Python traceback on fatal errors (e.g. segfault)
faulthandler.enable(all_threads=True, file=stderr_fd)
# Display the Python traceback on SIGALRM or SIGUSR1 signal
signals = []
if hasattr(signal, 'SIGALRM'):
signals.append(signal.SIGALRM)
if hasattr(signal, 'SIGUSR1'):
signals.append(signal.SIGUSR1)
for signum in signals:
faulthandler.register(signum, chain=True, file=stderr_fd)
replace_stdout()
support.record_original_stdout(sys.stdout)
if ns.testdir:
# Prepend test directory to sys.path, so runtest() will be able
# to locate tests
sys.path.insert(0, os.path.abspath(ns.testdir))
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
# they are found in the CWD their __file__ and __path__ will be relative (this
# happens before the chdir). All the modules imported after the chdir, are
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
# the packages to prevent later imports to fail when the CWD is different.
for module in sys.modules.values():
if hasattr(module, '__path__'):
for index, path in enumerate(module.__path__):
module.__path__[index] = os.path.abspath(path)
if getattr(module, '__file__', None):
module.__file__ = os.path.abspath(module.__file__)
if ns.huntrleaks:
unittest.BaseTestSuite._cleanup = False
# Avoid false positives due to various caches
# filling slowly with random data:
warm_caches()
if ns.memlimit is not None:
support.set_memlimit(ns.memlimit)
if ns.threshold is not None:
gc.set_threshold(ns.threshold)
try:
import msvcrt
except ImportError:
pass
else:
msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
msvcrt.SEM_NOGPFAULTERRORBOX|
msvcrt.SEM_NOOPENFILEERRORBOX)
try:
msvcrt.CrtSetReportMode
except AttributeError:
# release build
pass
else:
for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
if ns.verbose and ns.verbose >= 2:
msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
else:
msvcrt.CrtSetReportMode(m, 0)
support.use_resources = ns.use_resources
def replace_stdout():
"""Set stdout encoder error handler to backslashreplace (as stderr error
handler) to avoid UnicodeEncodeError when printing a traceback"""
stdout = sys.stdout
try:
fd = stdout.fileno()
except ValueError:
# On IDLE, sys.stdout has no file descriptor and is not a TextIOWrapper
# object. Leaving sys.stdout unchanged.
#
# Catch ValueError to catch io.UnsupportedOperation on TextIOBase
# and ValueError on a closed stream.
return
sys.stdout = open(fd, 'w',
encoding=stdout.encoding,
errors="backslashreplace",
closefd=False,
newline='\n')
def restore_stdout():
sys.stdout.close()
sys.stdout = stdout
atexit.register(restore_stdout)
| 4,387 | 125 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/libregrtest/main.py | import datetime
import faulthandler
import locale
import os
import platform
import random
import re
import sys
import sysconfig
import tempfile
import time
import unittest
from test.libregrtest.cmdline import _parse_args
from test.libregrtest.runtest import (
findtests, runtest, get_abs_module,
STDTESTS, NOTTESTS, PASSED, FAILED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED,
INTERRUPTED, CHILD_ERROR, TEST_DID_NOT_RUN,
PROGRESS_MIN_TIME, format_test_result)
from test.libregrtest.setup import setup_tests
from test.libregrtest.utils import removepy, count, format_duration, printlist
from test import support
try:
import gc
except ImportError:
gc = None
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. This eases the cleanup of leftover
# files using the "make distclean" command.
if sysconfig.is_python_build():
TEMPDIR = sysconfig.get_config_var('abs_builddir')
if TEMPDIR is None:
# bpo-30284: On Windows, only srcdir is available. Using abs_builddir
# mostly matters on UNIX when building Python out of the source tree,
# especially when the source tree is read only.
TEMPDIR = sysconfig.get_config_var('srcdir')
TEMPDIR = os.path.join(TEMPDIR, 'build')
else:
TEMPDIR = tempfile.gettempdir()
TEMPDIR = os.path.abspath(TEMPDIR)
class Regrtest:
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, findleaks, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
def __init__(self):
# Namespace of command line options
self.ns = None
# tests
self.tests = []
self.selected = []
# test results
self.good = []
self.bad = []
self.skipped = []
self.resource_denieds = []
self.environment_changed = []
self.rerun = []
self.run_no_tests = []
self.first_result = None
self.interrupted = False
# used by --slow
self.test_times = []
# used by --coverage, trace.Trace instance
self.tracer = None
# used by --findleaks, store for gc.garbage
self.found_garbage = []
# used to display the progress bar "[ 3/100]"
self.start_time = time.monotonic()
self.test_count = ''
self.test_count_width = 1
# used by --single
self.next_single_test = None
self.next_single_filename = None
# used by --junit-xml
self.testsuite_xml = None
def accumulate_result(self, test, result):
ok, test_time, xml_data = result
if ok not in (CHILD_ERROR, INTERRUPTED):
self.test_times.append((test_time, test))
if ok == PASSED:
self.good.append(test)
elif ok in (FAILED, CHILD_ERROR):
self.bad.append(test)
elif ok == ENV_CHANGED:
self.environment_changed.append(test)
elif ok == SKIPPED:
self.skipped.append(test)
elif ok == RESOURCE_DENIED:
self.skipped.append(test)
self.resource_denieds.append(test)
elif ok == TEST_DID_NOT_RUN:
self.run_no_tests.append(test)
elif ok != INTERRUPTED:
raise ValueError("invalid test result: %r" % ok)
if xml_data:
import xml.etree.ElementTree as ET
for e in xml_data:
try:
self.testsuite_xml.append(ET.fromstring(e))
except ET.ParseError:
print(xml_data, file=sys.__stderr__)
raise
def display_progress(self, test_index, test):
if self.ns.quiet:
return
# "[ 51/405/1] test_tcl passed"
line = f"{test_index:{self.test_count_width}}{self.test_count}"
fails = len(self.bad) + len(self.environment_changed)
if fails and not self.ns.pgo:
line = f"{line}/{fails}"
line = f"[{line}] {test}"
# add the system load prefix: "load avg: 1.80 "
if hasattr(os, 'getloadavg'):
load_avg_1min = os.getloadavg()[0]
line = f"load avg: {load_avg_1min:.2f} {line}"
# add the timestamp prefix: "0:01:05 "
test_time = time.monotonic() - self.start_time
test_time = datetime.timedelta(seconds=int(test_time))
line = f"{test_time} {line}"
print(line, flush=True)
def parse_args(self, kwargs):
ns = _parse_args(sys.argv[1:], **kwargs)
if ns.timeout and not hasattr(faulthandler, 'dump_traceback_later'):
print("Warning: The timeout option requires "
"faulthandler.dump_traceback_later", file=sys.stderr)
ns.timeout = None
if ns.threshold is not None and gc is None:
print('No GC available, ignore --threshold.', file=sys.stderr)
ns.threshold = None
if ns.findleaks:
if gc is not None:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
pass
#gc.set_debug(gc.DEBUG_SAVEALL)
else:
print('No GC available, disabling --findleaks',
file=sys.stderr)
ns.findleaks = False
if ns.xmlpath:
support.junit_xml_list = self.testsuite_xml = []
# Strip .py extensions.
removepy(ns.args)
return ns
def find_tests(self, tests):
self.tests = tests
if self.ns.single:
self.next_single_filename = os.path.join(TEMPDIR, 'pynexttest')
try:
with open(self.next_single_filename, 'r') as fp:
next_test = fp.read().strip()
self.tests = [next_test]
except OSError:
pass
if self.ns.fromfile:
self.tests = []
# regex to match 'test_builtin' in line:
# '0:00:00 [ 4/400] test_builtin -- test_dict took 1 sec'
regex = re.compile(r'\btest_[a-zA-Z0-9_]+\b')
with open(os.path.join(support.SAVEDCWD, self.ns.fromfile)) as fp:
for line in fp:
line = line.split('#', 1)[0]
line = line.strip()
match = regex.search(line)
if match is not None:
self.tests.append(match.group())
removepy(self.tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if self.ns.exclude:
for arg in self.ns.args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
self.ns.args = []
# if testdir is set, then we are not running the python tests suite, so
# don't add default tests to be executed or skipped (pass empty values)
if self.ns.testdir:
alltests = findtests(self.ns.testdir, list(), set())
else:
alltests = findtests(self.ns.testdir, stdtests, nottests)
if not self.ns.fromfile:
self.selected = self.tests or self.ns.args or alltests
else:
self.selected = self.tests
if self.ns.single:
self.selected = self.selected[:1]
try:
pos = alltests.index(self.selected[0])
self.next_single_test = alltests[pos + 1]
except IndexError:
pass
# Remove all the selected tests that precede start if it's set.
if self.ns.start:
try:
del self.selected[:self.selected.index(self.ns.start)]
except ValueError:
print("Couldn't find starting test (%s), using all tests"
% self.ns.start, file=sys.stderr)
if self.ns.randomize:
if self.ns.random_seed is None:
self.ns.random_seed = random.randrange(10000000)
random.seed(self.ns.random_seed)
random.shuffle(self.selected)
def list_tests(self):
for name in self.selected:
print(name)
def _list_cases(self, suite):
for test in suite:
if isinstance(test, unittest.loader._FailedTest):
continue
if isinstance(test, unittest.TestSuite):
self._list_cases(test)
elif isinstance(test, unittest.TestCase):
if support.match_test(test):
print(test.id())
def list_cases(self):
support.verbose = False
support.set_match_tests(self.ns.match_tests)
for test in self.selected:
abstest = get_abs_module(self.ns, test)
try:
suite = unittest.defaultTestLoader.loadTestsFromName(abstest)
self._list_cases(suite)
except unittest.SkipTest:
self.skipped.append(test)
if self.skipped:
print(file=sys.stderr)
print(count(len(self.skipped), "test"), "skipped:", file=sys.stderr)
printlist(self.skipped, file=sys.stderr)
def rerun_failed_tests(self):
self.ns.verbose = True
self.ns.failfast = False
self.ns.verbose3 = False
self.first_result = self.get_tests_result()
print()
print("Re-running failed tests in verbose mode")
self.rerun = self.bad[:]
for test in self.rerun:
print("Re-running test %r in verbose mode" % test, flush=True)
try:
self.ns.verbose = True
ok = runtest(self.ns, test)
except KeyboardInterrupt:
self.interrupted = True
# print a newline separate from the ^C
print()
break
else:
if ok[0] in {PASSED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED}:
self.bad.remove(test)
else:
if self.bad:
print(count(len(self.bad), 'test'), "failed again:")
printlist(self.bad)
self.display_result()
def display_result(self):
# If running the test suite for PGO then no one cares about results.
if self.ns.pgo:
return
print()
print("== Tests result: %s ==" % self.get_tests_result())
if self.interrupted:
print()
# print a newline after ^C
print("Test suite interrupted by signal SIGINT.")
executed = set(self.good) | set(self.bad) | set(self.skipped)
omitted = set(self.selected) - executed
print(count(len(omitted), "test"), "omitted:")
printlist(omitted)
if self.good and not self.ns.quiet:
print()
if (not self.bad
and not self.skipped
and not self.interrupted
and len(self.good) > 1):
print("All", end=' ')
print(count(len(self.good), "test"), "OK.")
printlist(self.good)
if self.ns.print_slow:
self.test_times.sort(reverse=True)
print()
print("10 slowest tests:")
for time, test in self.test_times[:10]:
print("- %s: %s" % (test, format_duration(time)))
if self.bad:
print()
print(count(len(self.bad), "test"), "failed:")
printlist(self.bad)
if self.environment_changed:
print()
print("{} altered the execution environment:".format(
count(len(self.environment_changed), "test")))
printlist(self.environment_changed)
if self.skipped and not self.ns.quiet:
print()
print(count(len(self.skipped), "test"), "skipped:")
printlist(self.skipped)
if self.rerun:
print()
print("%s:" % count(len(self.rerun), "re-run test"))
printlist(self.rerun)
if self.run_no_tests:
print()
print(count(len(self.run_no_tests), "test"), "run no tests:")
printlist(self.run_no_tests)
def run_tests_sequential(self):
if self.ns.trace:
import trace
self.tracer = trace.Trace(trace=False, count=True)
save_modules = sys.modules.keys()
print("Run tests sequentially")
previous_test = None
for test_index, test in enumerate(self.tests, 1):
start_time = time.monotonic()
text = test
if previous_test:
text = '%s -- %s' % (text, previous_test)
self.display_progress(test_index, text)
if self.tracer:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
cmd = ('result = runtest(self.ns, test); '
'self.accumulate_result(test, result)')
ns = dict(locals())
self.tracer.runctx(cmd, globals=globals(), locals=ns)
result = ns['result']
else:
try:
result = runtest(self.ns, test)
except KeyboardInterrupt:
self.interrupted = True
self.accumulate_result(test, (INTERRUPTED, None, None))
break
else:
self.accumulate_result(test, result)
previous_test = format_test_result(test, result[0])
test_time = time.monotonic() - start_time
if test_time >= PROGRESS_MIN_TIME:
previous_test = "%s in %s" % (previous_test, format_duration(test_time))
elif result[0] == PASSED:
# be quiet: say nothing if the test passed shortly
previous_test = None
if self.ns.findleaks:
gc.collect()
if gc.garbage:
print("Warning: test created", len(gc.garbage), end=' ')
print("uncollectable object(s).")
# move the uncollectable objects somewhere so we don't see
# them again
self.found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
support.unload(module)
if previous_test:
print(previous_test)
def _test_forever(self, tests):
while True:
for test in tests:
yield test
if self.bad:
return
if self.ns.fail_env_changed and self.environment_changed:
return
def display_header(self):
# Print basic platform information
print("==", platform.python_implementation(), *sys.version.split())
print("==", platform.platform(aliased=True),
"%s-endian" % sys.byteorder)
print("== cwd:", os.getcwd())
cpu_count = os.cpu_count()
if cpu_count:
print("== CPU count:", cpu_count)
print("== encodings: locale=%s, FS=%s"
% (locale.getpreferredencoding(False),
sys.getfilesystemencoding()))
def get_tests_result(self):
result = []
if self.bad:
result.append("FAILURE")
elif self.ns.fail_env_changed and self.environment_changed:
result.append("ENV CHANGED")
elif not any((self.good, self.bad, self.skipped, self.interrupted,
self.environment_changed)):
result.append("NO TEST RUN")
if self.interrupted:
result.append("INTERRUPTED")
if not result:
result.append("SUCCESS")
result = ', '.join(result)
if self.first_result:
result = '%s then %s' % (self.first_result, result)
return result
def run_tests(self):
# For a partial run, we do not need to clutter the output.
if (self.ns.header
or not(self.ns.pgo or self.ns.quiet or self.ns.single
or self.tests or self.ns.args)):
self.display_header()
if self.ns.huntrleaks:
warmup, repetitions, _ = self.ns.huntrleaks
if warmup < 3:
msg = ("WARNING: Running tests with --huntrleaks/-R and less than "
"3 warmup repetitions can give false positives!")
print(msg, file=sys.stdout, flush=True)
if self.ns.randomize:
print("Using random seed", self.ns.random_seed)
if self.ns.forever:
self.tests = self._test_forever(list(self.selected))
self.test_count = ''
self.test_count_width = 3
else:
self.tests = iter(self.selected)
self.test_count = '/{}'.format(len(self.selected))
self.test_count_width = len(self.test_count) - 1
if self.ns.use_mp:
from test.libregrtest.runtest_mp import run_tests_multiprocess
run_tests_multiprocess(self)
else:
self.run_tests_sequential()
def finalize(self):
if self.next_single_filename:
if self.next_single_test:
with open(self.next_single_filename, 'w') as fp:
fp.write(self.next_single_test + '\n')
else:
os.unlink(self.next_single_filename)
if self.tracer:
r = self.tracer.results()
r.write_results(show_missing=True, summary=True,
coverdir=self.ns.coverdir)
print()
duration = time.monotonic() - self.start_time
print("Total duration: %s" % format_duration(duration))
print("Tests result: %s" % self.get_tests_result())
if self.ns.runleaks:
os.system("leaks %d" % os.getpid())
def save_xml_result(self):
if not self.ns.xmlpath and not self.testsuite_xml:
return
import xml.etree.ElementTree as ET
root = ET.Element("testsuites")
# Manually count the totals for the overall summary
totals = {'tests': 0, 'errors': 0, 'failures': 0}
for suite in self.testsuite_xml:
root.append(suite)
for k in totals:
try:
totals[k] += int(suite.get(k, 0))
except ValueError:
pass
for k, v in totals.items():
root.set(k, str(v))
xmlpath = os.path.join(support.SAVEDCWD, self.ns.xmlpath)
with open(xmlpath, 'wb') as f:
for s in ET.tostringlist(root):
f.write(s)
def main(self, tests=None, **kwargs):
global TEMPDIR
if sysconfig.is_python_build():
try:
os.mkdir(TEMPDIR)
except FileExistsError:
pass
# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
test_cwd = 'test_python_{}'.format(os.getpid())
test_cwd = os.path.join(TEMPDIR, test_cwd)
# Run the tests in a context manager that temporarily changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
# available from support.SAVEDCWD.
with support.temp_cwd(test_cwd, quiet=True):
self._main(tests, kwargs)
def _main(self, tests, kwargs):
self.ns = self.parse_args(kwargs)
if self.ns.huntrleaks:
warmup, repetitions, _ = self.ns.huntrleaks
if warmup < 1 or repetitions < 1:
msg = ("Invalid values for the --huntrleaks/-R parameters. The "
"number of warmups and repetitions must be at least 1 "
"each (1:1).")
print(msg, file=sys.stderr, flush=True)
sys.exit(2)
if self.ns.worker_args is not None:
from test.libregrtest.runtest_mp import run_tests_worker
run_tests_worker(self.ns.worker_args)
if self.ns.wait:
input("Press any key to continue...")
support.PGO = self.ns.pgo
setup_tests(self.ns)
self.find_tests(tests)
if self.ns.list_tests:
self.list_tests()
sys.exit(0)
if self.ns.list_cases:
self.list_cases()
sys.exit(0)
self.run_tests()
self.display_result()
if self.ns.verbose2 and self.bad:
self.rerun_failed_tests()
self.finalize()
self.save_xml_result()
if self.bad:
sys.exit(2)
if self.interrupted:
sys.exit(130)
if self.ns.fail_env_changed and self.environment_changed:
sys.exit(3)
sys.exit(0)
def main(tests=None, **kwargs):
"""Run the Python suite."""
Regrtest().main(tests=tests, **kwargs)
| 22,142 | 640 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/libregrtest/cmdline.py | import argparse
import os
import sys
from test import support
USAGE = """\
python -m test [options] [test_name1 [test_name2 ...]]
python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
"""
DESCRIPTION = """\
Run Python regression tests.
If no arguments or options are provided, finds all files matching
the pattern "test_*" in the Lib/test subdirectory and runs
them in alphabetical order (but see -M and -u, below, for exceptions).
For more rigorous testing, it is useful to use the following
command line:
python -E -Wd -m test [options] [test_name1 ...]
"""
EPILOG = """\
Additional option details:
-r randomizes test execution order. You can use --randseed=int to provide an
int seed value for the randomizer; this is useful for reproducing troublesome
test orders.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
the next test is recorded in a file named pynexttest. If run from the
Python build directory, pynexttest is located in the 'build' subdirectory,
otherwise it is located in tempfile.gettempdir(). On subsequent runs,
the test in pynexttest is run, and the next test is written to pynexttest.
When the last test has been run, pynexttest is deleted. In this way it
is possible to single step through the test files. This is useful when
doing memory analysis on the Python interpreter, which process tends to
consume too many resources to run the full regression test non-stop.
-S is used to continue running tests after an aborted run. It will
maintain the order a standard run (ie, this assumes -r is not used).
This is useful after the tests have prematurely stopped for some external
reason and you want to start running from where you left off rather
than starting from the beginning.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
none - Disable all special resources (this is the default).
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2 GiB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
cpu - Used for certain CPU-heavy tests.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
gui - Run tests that require a running GUI.
tzdata - Run tests that require timezone data.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the gui tests, give the
option '-uall,-gui'.
--matchfile filters tests using a text file, one pattern per line.
Pattern examples:
- test method: test_stat_attributes
- test class: FileTests
- test identifier: test_os.FileTests.test_stat_attributes
"""
ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui')
# Other resources excluded from --use=all:
#
# - extralagefile (ex: test_zipfile64): really too slow to be enabled
# "by default"
# - tzdata: while needed to validate fully test_datetime, it makes
# test_datetime too slow (15-20 min on some buildbots) and so is disabled by
# default (see bpo-30822).
RESOURCE_NAMES = ALL_RESOURCES + ('extralargefile', 'tzdata')
class _ArgParser(argparse.ArgumentParser):
def error(self, message):
super().error(message + "\nPass -h or --help for complete help.")
def _create_parser():
# Set prog to prevent the uninformative "__main__.py" from displaying in
# error messages when using "python -m test ...".
parser = _ArgParser(prog='regrtest.py',
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG,
add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Arguments with this clause added to its help are described further in
# the epilog's "Additional option details" section.
more_details = ' See the section at bottom for more details.'
group = parser.add_argument_group('General options')
# We add help explicitly to control what argument group it renders under.
group.add_argument('-h', '--help', action='help',
help='show this help message and exit')
group.add_argument('--timeout', metavar='TIMEOUT', type=float,
help='dump the traceback and exit if a test takes '
'more than TIMEOUT seconds; disabled if TIMEOUT '
'is negative or equals to zero')
group.add_argument('--wait', action='store_true',
help='wait for user input, e.g., allow a debugger '
'to be attached')
group.add_argument('--worker-args', metavar='ARGS')
group.add_argument('-S', '--start', metavar='START',
help='the name of the test at which to start.' +
more_details)
group = parser.add_argument_group('Verbosity')
group.add_argument('-v', '--verbose', action='count',
help='run tests in verbose mode with output to stdout')
group.add_argument('-w', '--verbose2', action='store_true',
help='re-run failed tests in verbose mode')
group.add_argument('-W', '--verbose3', action='store_true',
help='display test output on failure')
group.add_argument('-q', '--quiet', action='store_true',
help='no output unless one or more tests fail')
group.add_argument('-o', '--slowest', action='store_true', dest='print_slow',
help='print the slowest 10 tests')
group.add_argument('--header', action='store_true',
help='print header with interpreter info')
group = parser.add_argument_group('Selecting tests')
group.add_argument('-r', '--randomize', action='store_true',
help='randomize test execution order.' + more_details)
group.add_argument('--randseed', metavar='SEED',
dest='random_seed', type=int,
help='pass a random seed to reproduce a previous '
'random run')
group.add_argument('-f', '--fromfile', metavar='FILE',
help='read names of tests to run from a file.' +
more_details)
group.add_argument('-x', '--exclude', action='store_true',
help='arguments are tests to *exclude*')
group.add_argument('-s', '--single', action='store_true',
help='single step through a set of tests.' +
more_details)
group.add_argument('-m', '--match', metavar='PAT',
dest='match_tests', action='append',
help='match test cases and methods with glob pattern PAT')
group.add_argument('--matchfile', metavar='FILENAME',
dest='match_filename',
help='similar to --match but get patterns from a '
'text file, one pattern per line')
group.add_argument('-G', '--failfast', action='store_true',
help='fail as soon as a test fails (only with -v or -W)')
group.add_argument('-u', '--use', metavar='RES1,RES2,...',
action='append', type=resources_list,
help='specify which special resource intensive tests '
'to run.' + more_details)
group.add_argument('-M', '--memlimit', metavar='LIMIT',
help='run very large memory-consuming tests.' +
more_details)
group.add_argument('--testdir', metavar='DIR',
type=relative_filename,
help='execute test files in the specified directory '
'(instead of the Python stdlib test suite)')
group = parser.add_argument_group('Special runs')
group.add_argument('-l', '--findleaks', action='store_true',
help='if GC is available detect tests that leak memory')
group.add_argument('-L', '--runleaks', action='store_true',
help='run the leaks(1) command just before exit.' +
more_details)
group.add_argument('-R', '--huntrleaks', metavar='RUNCOUNTS',
type=huntrleaks,
help='search for reference leaks (needs debug build, '
'very slow).' + more_details)
group.add_argument('-j', '--multiprocess', metavar='PROCESSES',
dest='use_mp', type=int,
help='run PROCESSES processes at once')
group.add_argument('-T', '--coverage', action='store_true',
dest='trace',
help='turn on code coverage tracing using the trace '
'module')
group.add_argument('-D', '--coverdir', metavar='DIR',
type=relative_filename,
help='directory where coverage files are put')
group.add_argument('-N', '--nocoverdir',
action='store_const', const=None, dest='coverdir',
help='put coverage files alongside modules')
group.add_argument('-t', '--threshold', metavar='THRESHOLD',
type=int,
help='call gc.set_threshold(THRESHOLD)')
group.add_argument('-n', '--nowindows', action='store_true',
help='suppress error message boxes on Windows')
group.add_argument('-F', '--forever', action='store_true',
help='run the specified tests in a loop, until an '
'error happens')
group.add_argument('--list-tests', action='store_true',
help="only write the name of tests that will be run, "
"don't execute them")
group.add_argument('--list-cases', action='store_true',
help='only write the name of test cases that will be run'
' , don\'t execute them')
group.add_argument('-P', '--pgo', dest='pgo', action='store_true',
help='enable Profile Guided Optimization training')
group.add_argument('--fail-env-changed', action='store_true',
help='if a test file alters the environment, mark '
'the test as failed')
group.add_argument('--junit-xml', dest='xmlpath', metavar='FILENAME',
help='writes JUnit-style XML results to the specified '
'file')
return parser
def relative_filename(string):
# CWD is replaced with a temporary dir before calling main(), so we
# join it with the saved CWD so it ends up where the user expects.
return os.path.join(support.SAVEDCWD, string)
def huntrleaks(string):
args = string.split(':')
if len(args) not in (2, 3):
raise argparse.ArgumentTypeError(
'needs 2 or 3 colon-separated arguments')
nwarmup = int(args[0]) if args[0] else 5
ntracked = int(args[1]) if args[1] else 4
fname = args[2] if len(args) > 2 and args[2] else 'reflog.txt'
return nwarmup, ntracked, fname
def resources_list(string):
u = [x.lower() for x in string.split(',')]
for r in u:
if r == 'all' or r == 'none':
continue
if r[0] == '-':
r = r[1:]
if r not in RESOURCE_NAMES:
raise argparse.ArgumentTypeError('invalid resource: ' + r)
return u
def _parse_args(args, **kwargs):
# Defaults
ns = argparse.Namespace(testdir=None, verbose=0, quiet=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None, use_mp=None, verbose3=False, forever=False,
header=False, failfast=False, match_tests=None, pgo=False)
for k, v in kwargs.items():
if not hasattr(ns, k):
raise TypeError('%r is an invalid keyword argument '
'for this function' % k)
setattr(ns, k, v)
if ns.use_resources is None:
ns.use_resources = []
parser = _create_parser()
# Issue #14191: argparse doesn't support "intermixed" positional and
# optional arguments. Use parse_known_args() as workaround.
ns.args = parser.parse_known_args(args=args, namespace=ns)[1]
for arg in ns.args:
if arg.startswith('-'):
parser.error("unrecognized arguments: %s" % arg)
sys.exit(1)
if ns.single and ns.fromfile:
parser.error("-s and -f don't go together!")
if ns.use_mp is not None and ns.trace:
parser.error("-T and -j don't go together!")
if ns.use_mp is not None and ns.findleaks:
parser.error("-l and -j don't go together!")
if ns.failfast and not (ns.verbose or ns.verbose3):
parser.error("-G/--failfast needs either -v or -W")
if ns.pgo and (ns.verbose or ns.verbose2 or ns.verbose3):
parser.error("--pgo/-v don't go together!")
if ns.nowindows:
print("Warning: the --nowindows (-n) option is deprecated. "
"Use -vv to display assertions in stderr.", file=sys.stderr)
if ns.quiet:
ns.verbose = 0
if ns.timeout is not None:
if ns.timeout <= 0:
ns.timeout = None
if ns.use_mp is not None:
if ns.use_mp <= 0:
# Use all cores + extras for tests that like to sleep
ns.use_mp = 2 + (os.cpu_count() or 1)
if ns.use:
for a in ns.use:
for r in a:
if r == 'all':
ns.use_resources[:] = ALL_RESOURCES
continue
if r == 'none':
del ns.use_resources[:]
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if remove:
if r in ns.use_resources:
ns.use_resources.remove(r)
elif r not in ns.use_resources:
ns.use_resources.append(r)
if ns.random_seed is not None:
ns.randomize = True
if ns.verbose:
ns.header = True
if ns.huntrleaks and ns.verbose3:
ns.verbose3 = False
print("WARNING: Disable --verbose3 because it's incompatible with "
"--huntrleaks: see http://bugs.python.org/issue27103",
file=sys.stderr)
if ns.match_filename:
if ns.match_tests is None:
ns.match_tests = []
filename = os.path.join(support.SAVEDCWD, ns.match_filename)
with open(filename) as fp:
for line in fp:
ns.match_tests.append(line.strip())
return ns
| 17,832 | 392 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/libregrtest/refleak.py | import errno
import os
import re
import sys
import warnings
from inspect import isabstract
from test import support
def dash_R(the_module, test, indirect_test, huntrleaks):
"""Run a test multiple times, looking for reference leaks.
Returns:
False if the test didn't leak references; True if we detected refleaks.
"""
# This code is hackish and inelegant, but it seems to do the job.
import copyreg
import collections.abc
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copyreg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
try:
import zipimport
except ImportError:
zdc = None # Run unmodified on platforms without zipimport support
else:
zdc = zipimport._zip_directory_cache.copy()
abcs = {}
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
# bpo-31217: Integer pool to get a single integer object for the same
# value. The pool is used to prevent false alarm when checking for memory
# block leaks. Fill the pool with values in -1000..1000 which are the most
# common (reference, memory block, file descriptor) differences.
int_pool = {value: value for value in range(-1000, 1000)}
def get_pooled_int(value):
return int_pool.setdefault(value, value)
nwarmup, ntracked, fname = huntrleaks
fname = os.path.join(support.SAVEDCWD, fname)
repcount = nwarmup + ntracked
rc_deltas = [0] * repcount
alloc_deltas = [0] * repcount
fd_deltas = [0] * repcount
print("beginning", repcount, "repetitions", file=sys.stderr)
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr,
flush=True)
# initialize variables to make pyflakes quiet
rc_before = alloc_before = fd_before = 0
for i in range(repcount):
indirect_test()
alloc_after, rc_after, fd_after = dash_R_cleanup(fs, ps, pic, zdc,
abcs)
print('.', end='', file=sys.stderr, flush=True)
if i >= nwarmup:
rc_deltas[i] = get_pooled_int(rc_after - rc_before)
alloc_deltas[i] = get_pooled_int(alloc_after - alloc_before)
fd_deltas[i] = get_pooled_int(fd_after - fd_before)
alloc_before = alloc_after
rc_before = rc_after
fd_before = fd_after
print(file=sys.stderr)
# These checkers return False on success, True on failure
def check_rc_deltas(deltas):
# Checker for reference counters and memomry blocks.
#
# bpo-30776: Try to ignore false positives:
#
# [3, 0, 0]
# [0, 1, 0]
# [8, -8, 1]
#
# Expected leaks:
#
# [5, 5, 6]
# [10, 1, 1]
return all(delta >= 1 for delta in deltas)
def check_fd_deltas(deltas):
return any(deltas)
failed = False
for deltas, item_name, checker in [
(rc_deltas, 'references', check_rc_deltas),
(alloc_deltas, 'memory blocks', check_rc_deltas),
(fd_deltas, 'file descriptors', check_fd_deltas)
]:
# ignore warmup runs
deltas = deltas[nwarmup:]
if checker(deltas):
msg = '%s leaked %s %s, sum=%s' % (
test, deltas, item_name, sum(deltas))
print(msg, file=sys.stderr, flush=True)
with open(fname, "a") as refrep:
print(msg, file=refrep)
refrep.flush()
failed = True
return failed
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
import gc, copyreg
import collections.abc
from weakref import WeakSet
# Restore some original values.
warnings.filters[:] = fs
copyreg.dispatch_table.clear()
copyreg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
try:
import zipimport
except ImportError:
pass # Run unmodified on platforms without zipimport support
else:
zipimport._zip_directory_cache.clear()
zipimport._zip_directory_cache.update(zdc)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
abs_classes = [getattr(collections.abc, a) for a in collections.abc.__all__]
abs_classes = filter(isabstract, abs_classes)
if 'typing' in sys.modules:
t = sys.modules['typing']
# These classes require special treatment because they do not appear
# in direct subclasses of collections.abc classes
abs_classes = list(abs_classes) + [t.ChainMap, t.Counter, t.DefaultDict]
for abc in abs_classes:
for obj in abc.__subclasses__() + [abc]:
obj._abc_registry = abcs.get(obj, WeakSet()).copy()
obj._abc_cache.clear()
obj._abc_negative_cache.clear()
clear_caches()
# Collect cyclic trash and read memory statistics immediately after.
func1 = sys.getallocatedblocks
func2 = sys.gettotalrefcount
gc.collect()
return func1(), func2(), support.fd_count()
def clear_caches():
import gc
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
# Clear assorted module caches.
# Don't worry about resetting the cache if the module is not loaded
try:
distutils_dir_util = sys.modules['distutils.dir_util']
except KeyError:
pass
else:
distutils_dir_util._path_created.clear()
re.purge()
try:
_strptime = sys.modules['_strptime']
except KeyError:
pass
else:
_strptime._regex_cache.clear()
try:
urllib_parse = sys.modules['urllib.parse']
except KeyError:
pass
else:
urllib_parse.clear_cache()
try:
urllib_request = sys.modules['urllib.request']
except KeyError:
pass
else:
urllib_request.urlcleanup()
try:
linecache = sys.modules['linecache']
except KeyError:
pass
else:
linecache.clearcache()
try:
mimetypes = sys.modules['mimetypes']
except KeyError:
pass
else:
mimetypes._default_mime_types()
try:
filecmp = sys.modules['filecmp']
except KeyError:
pass
else:
filecmp._cache.clear()
try:
struct = sys.modules['struct']
except KeyError:
pass
else:
struct._clearcache()
try:
doctest = sys.modules['doctest']
except KeyError:
pass
else:
doctest.master = None
try:
typing = sys.modules['typing']
except KeyError:
pass
else:
for f in typing._cleanups:
f()
gc.collect()
def warm_caches():
# char cache
s = bytes(range(256))
for i in range(256):
s[i:i+1]
# unicode cache
[chr(i) for i in range(256)]
# int cache
list(range(-5, 257))
| 7,637 | 258 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/libregrtest/runtest.py | import faulthandler
import importlib
import io
import os
import sys
import time
import traceback
import unittest
from test import support
from test.libregrtest.refleak import dash_R, clear_caches
from test.libregrtest.save_env import saved_test_environment
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
CHILD_ERROR = -5 # error in a child process
TEST_DID_NOT_RUN = -6 # error in a child process
_FORMAT_TEST_RESULT = {
PASSED: '%s passed',
FAILED: '%s failed',
ENV_CHANGED: '%s failed (env changed)',
SKIPPED: '%s skipped',
RESOURCE_DENIED: '%s skipped (resource denied)',
INTERRUPTED: '%s interrupted',
CHILD_ERROR: '%s crashed',
TEST_DID_NOT_RUN: '%s run no tests',
}
# Minimum duration of a test to display its duration or to mention that
# the test is running in background
PROGRESS_MIN_TIME = 30.0 # seconds
# small set of tests to determine if we have a basically functioning interpreter
# (i.e. if any of these fail, then anything else is likely to follow)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
'test_support'
]
# set of tests that we don't want to be executed when using regrtest
NOTTESTS = set()
def format_test_result(test_name, result):
fmt = _FORMAT_TEST_RESULT.get(result, "%s")
return fmt % test_name
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
names = os.listdir(testdir)
tests = []
others = set(stdtests) | nottests
for name in names:
mod, ext = os.path.splitext(name)
if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
tests.append(mod)
return stdtests + sorted(tests)
def get_abs_module(ns, test):
if test.startswith('test.') or ns.testdir:
return test
else:
# Always import it from the test package
return 'test.' + test
def runtest(ns, test):
"""Run a single test.
ns -- regrtest namespace of options
test -- the name of the test
Returns the tuple (result, test_time, xml_data), where result is one
of the constants:
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
EMPTY_TEST_SUITE test ran no subtests.
If ns.xmlpath is not None, xml_data is a list containing each
generated testsuite element.
"""
output_on_failure = ns.verbose3
use_timeout = (ns.timeout is not None)
if use_timeout:
faulthandler.dump_traceback_later(ns.timeout, exit=True)
try:
support.set_match_tests(ns.match_tests)
# reset the environment_altered flag to detect if a test altered
# the environment
support.environment_altered = False
support.junit_xml_list = xml_list = [] if ns.xmlpath else None
if ns.failfast:
support.failfast = True
if output_on_failure:
support.verbose = True
stream = io.StringIO()
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = stream
sys.stderr = stream
result = runtest_inner(ns, test, display_failure=False)
if result[0] != PASSED:
output = stream.getvalue()
orig_stderr.write(output)
orig_stderr.flush()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
else:
support.verbose = ns.verbose # Tell tests to be moderately quiet
result = runtest_inner(ns, test, display_failure=not ns.verbose)
if xml_list:
import xml.etree.ElementTree as ET
xml_data = [ET.tostring(x).decode('us-ascii') for x in xml_list]
else:
xml_data = None
return result + (xml_data,)
finally:
if use_timeout:
faulthandler.cancel_dump_traceback_later()
cleanup_test_droppings(test, ns.verbose)
support.junit_xml_list = None
def post_test_cleanup():
support.reap_children()
def runtest_inner(ns, test, display_failure=True):
support.unload(test)
test_time = 0.0
refleak = False # True if the test leaked references.
try:
abstest = get_abs_module(ns, test)
clear_caches()
with saved_test_environment(test, ns.verbose, ns.quiet, pgo=ns.pgo) as environment:
start_time = time.time()
the_module = importlib.import_module(abstest)
# If the test has a test_main, that will run the appropriate
# tests. If not, use normal unittest test loading.
test_runner = getattr(the_module, "test_main", None)
if test_runner is None:
def test_runner():
loader = unittest.TestLoader()
tests = loader.loadTestsFromModule(the_module)
for error in loader.errors:
print(error, file=sys.stderr)
if loader.errors:
raise Exception("errors while loading tests")
support.run_unittest(tests)
if ns.huntrleaks:
refleak = dash_R(the_module, test, test_runner, ns.huntrleaks)
else:
test_runner()
test_time = time.time() - start_time
post_test_cleanup()
except support.ResourceDenied as msg:
if not ns.quiet and not ns.pgo:
print(test, "skipped --", msg, flush=True)
return RESOURCE_DENIED, test_time
except unittest.SkipTest as msg:
if not ns.quiet and not ns.pgo:
print(test, "skipped --", msg, flush=True)
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except support.TestFailed as msg:
if not ns.pgo:
if display_failure:
print("test", test, "failed --", msg, file=sys.stderr,
flush=True)
else:
print("test", test, "failed", file=sys.stderr, flush=True)
return FAILED, test_time
except support.TestDidNotRun:
return TEST_DID_NOT_RUN, test_time
except:
msg = traceback.format_exc()
if not ns.pgo:
print("test", test, "crashed --", msg, file=sys.stderr,
flush=True)
return FAILED, test_time
else:
if refleak:
return FAILED, test_time
if environment.changed:
return ENV_CHANGED, test_time
return PASSED, test_time
def cleanup_test_droppings(testname, verbose):
import shutil
import stat
import gc
# First kill any dangling references to open files etc.
# This can also issue some ResourceWarnings which would otherwise get
# triggered during the following test run, and possibly produce failures.
gc.collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print("%r left behind %s %r" % (testname, kind, name))
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception as msg:
print(("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
def findtestdir(path=None):
return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
| 8,904 | 264 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/libregrtest/runtest_mp.py | import faulthandler
import json
import os
import queue
import sys
import time
import traceback
import types
from test import support
try:
import _thread
import threading
except ImportError:
print("Multiprocess option requires thread support")
sys.exit(2)
from test.libregrtest.runtest import (
runtest, INTERRUPTED, CHILD_ERROR, PROGRESS_MIN_TIME,
format_test_result)
from test.libregrtest.setup import setup_tests
from test.libregrtest.utils import format_duration
# Display the running tests if nothing happened last N seconds
PROGRESS_UPDATE = 30.0 # seconds
# If interrupted, display the wait progress every N seconds
WAIT_PROGRESS = 2.0 # seconds
def run_test_in_subprocess(testname, ns):
"""Run the given test in a subprocess with --worker-args.
ns is the option Namespace parsed from command-line arguments. regrtest
is invoked in a subprocess with the --worker-args argument; when the
subprocess exits, its return code, stdout and stderr are returned as a
3-tuple.
"""
from subprocess import Popen, PIPE
ns_dict = vars(ns)
worker_args = (ns_dict, testname)
worker_args = json.dumps(worker_args)
cmd = [sys.executable, *support.args_from_interpreter_flags(),
'-u', # Unbuffered stdout and stderr
'-m', 'test.regrtest',
'--worker-args', worker_args]
if ns.pgo:
cmd += ['--pgo']
# Running the child from the same working directory as regrtest's original
# invocation ensures that TEMPDIR for the child is the same when
# sysconfig.is_python_build() is true. See issue 15300.
popen = Popen(cmd,
stdout=PIPE, stderr=PIPE,
universal_newlines=True,
close_fds=(os.name != 'nt'),
cwd=support.SAVEDCWD)
with popen:
stdout, stderr = popen.communicate()
retcode = popen.wait()
return retcode, stdout, stderr
def run_tests_worker(worker_args):
ns_dict, testname = json.loads(worker_args)
ns = types.SimpleNamespace(**ns_dict)
setup_tests(ns)
try:
result = runtest(ns, testname)
except KeyboardInterrupt:
result = INTERRUPTED, '', None
except BaseException as e:
traceback.print_exc()
result = CHILD_ERROR, str(e)
print() # Force a newline (just in case)
print(json.dumps(result), flush=True)
sys.exit(0)
# We do not use a generator so multiple threads can call next().
class MultiprocessIterator:
"""A thread-safe iterator over tests for multiprocess mode."""
def __init__(self, tests):
self.interrupted = False
self.lock = threading.Lock()
self.tests = tests
def __iter__(self):
return self
def __next__(self):
with self.lock:
if self.interrupted:
raise StopIteration('tests interrupted')
return next(self.tests)
class MultiprocessThread(threading.Thread):
def __init__(self, pending, output, ns):
super().__init__()
self.pending = pending
self.output = output
self.ns = ns
self.current_test = None
self.start_time = None
def _runtest(self):
try:
test = next(self.pending)
except StopIteration:
self.output.put((None, None, None, None))
return True
try:
self.start_time = time.monotonic()
self.current_test = test
retcode, stdout, stderr = run_test_in_subprocess(test, self.ns)
finally:
self.current_test = None
if retcode != 0:
result = (CHILD_ERROR, "Exit code %s" % retcode, None)
self.output.put((test, stdout.rstrip(), stderr.rstrip(),
result))
return False
stdout, _, result = stdout.strip().rpartition("\n")
if not result:
self.output.put((None, None, None, None))
return True
result = json.loads(result)
assert len(result) == 3, f"Invalid result tuple: {result!r}"
self.output.put((test, stdout.rstrip(), stderr.rstrip(),
result))
return False
def run(self):
try:
stop = False
while not stop:
stop = self._runtest()
except BaseException:
self.output.put((None, None, None, None))
raise
def run_tests_multiprocess(regrtest):
output = queue.Queue()
pending = MultiprocessIterator(regrtest.tests)
test_timeout = regrtest.ns.timeout
use_timeout = (test_timeout is not None)
workers = [MultiprocessThread(pending, output, regrtest.ns)
for i in range(regrtest.ns.use_mp)]
print("Run tests in parallel using %s child processes"
% len(workers))
for worker in workers:
worker.start()
def get_running(workers):
running = []
for worker in workers:
current_test = worker.current_test
if not current_test:
continue
dt = time.monotonic() - worker.start_time
if dt >= PROGRESS_MIN_TIME:
text = '%s (%s)' % (current_test, format_duration(dt))
running.append(text)
return running
finished = 0
test_index = 1
get_timeout = max(PROGRESS_UPDATE, PROGRESS_MIN_TIME)
try:
while finished < regrtest.ns.use_mp:
if use_timeout:
faulthandler.dump_traceback_later(test_timeout, exit=True)
try:
item = output.get(timeout=get_timeout)
except queue.Empty:
running = get_running(workers)
if running and not regrtest.ns.pgo:
print('running: %s' % ', '.join(running), flush=True)
continue
test, stdout, stderr, result = item
if test is None:
finished += 1
continue
regrtest.accumulate_result(test, result)
# Display progress
ok, test_time, xml_data = result
text = format_test_result(test, ok)
if (ok not in (CHILD_ERROR, INTERRUPTED)
and test_time >= PROGRESS_MIN_TIME
and not regrtest.ns.pgo):
text += ' (%s)' % format_duration(test_time)
elif ok == CHILD_ERROR:
text = '%s (%s)' % (text, test_time)
running = get_running(workers)
if running and not regrtest.ns.pgo:
text += ' -- running: %s' % ', '.join(running)
regrtest.display_progress(test_index, text)
# Copy stdout and stderr from the child process
if stdout:
print(stdout, flush=True)
if stderr and not regrtest.ns.pgo:
print(stderr, file=sys.stderr, flush=True)
if result[0] == INTERRUPTED:
raise KeyboardInterrupt
test_index += 1
except KeyboardInterrupt:
regrtest.interrupted = True
pending.interrupted = True
print()
finally:
if use_timeout:
faulthandler.cancel_dump_traceback_later()
# If tests are interrupted, wait until tests complete
wait_start = time.monotonic()
while True:
running = [worker.current_test for worker in workers]
running = list(filter(bool, running))
if not running:
break
dt = time.monotonic() - wait_start
line = "Waiting for %s (%s tests)" % (', '.join(running), len(running))
if dt >= WAIT_PROGRESS:
line = "%s since %.0f sec" % (line, dt)
print(line, flush=True)
for worker in workers:
worker.join(WAIT_PROGRESS)
| 7,802 | 249 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/libregrtest/__init__.py | # We import importlib *ASAP* in order to test #15386
import importlib
from test.libregrtest.cmdline import _parse_args, RESOURCE_NAMES, ALL_RESOURCES
from test.libregrtest.main import main
| 190 | 6 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/libregrtest/utils.py | import os.path
import math
import textwrap
def format_duration(seconds):
ms = math.ceil(seconds * 1e3)
seconds, ms = divmod(ms, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
parts = []
if hours:
parts.append('%s hour' % hours)
if minutes:
parts.append('%s min' % minutes)
if seconds:
parts.append('%s sec' % seconds)
if ms:
parts.append('%s ms' % ms)
if not parts:
return '0 ms'
parts = parts[:2]
return ' '.join(parts)
def removepy(names):
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4, file=None):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks),
file=file)
| 1,400 | 57 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/tracedmodules/testmod.py | def func(x):
b = x + 1
return b + 2
def func2():
"""Test function for issue 9936 """
return (1,
2,
3)
| 143 | 10 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/tracedmodules/__init__.py | """This package contains modules that help testing the trace.py module. Note
that the exact location of functions in these modules is important, as trace.py
takes the real line numbers into account.
"""
| 203 | 5 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/test_util.py | from . import util
abc = util.import_importlib('importlib.abc')
init = util.import_importlib('importlib')
machinery = util.import_importlib('importlib.machinery')
importlib_util = util.import_importlib('importlib.util')
import importlib.util
import os
import pathlib
import string
import sys
from test import support
import types
import unittest
import warnings
class DecodeSourceBytesTests:
source = "string ='ü'"
def test_ut8_default(self):
source_bytes = self.source.encode('utf-8')
self.assertEqual(self.util.decode_source(source_bytes), self.source)
def test_specified_encoding(self):
source = '# coding=latin-1\n' + self.source
source_bytes = source.encode('latin-1')
assert source_bytes != source.encode('utf-8')
self.assertEqual(self.util.decode_source(source_bytes), source)
def test_universal_newlines(self):
source = '\r\n'.join([self.source, self.source])
source_bytes = source.encode('utf-8')
self.assertEqual(self.util.decode_source(source_bytes),
'\n'.join([self.source, self.source]))
(Frozen_DecodeSourceBytesTests,
Source_DecodeSourceBytesTests
) = util.test_both(DecodeSourceBytesTests, util=importlib_util)
class ModuleFromSpecTests:
def test_no_create_module(self):
class Loader:
def exec_module(self, module):
pass
spec = self.machinery.ModuleSpec('test', Loader())
with self.assertRaises(ImportError):
module = self.util.module_from_spec(spec)
def test_create_module_returns_None(self):
class Loader(self.abc.Loader):
def create_module(self, spec):
return None
spec = self.machinery.ModuleSpec('test', Loader())
module = self.util.module_from_spec(spec)
self.assertIsInstance(module, types.ModuleType)
self.assertEqual(module.__name__, spec.name)
def test_create_module(self):
name = 'already set'
class CustomModule(types.ModuleType):
pass
class Loader(self.abc.Loader):
def create_module(self, spec):
module = CustomModule(spec.name)
module.__name__ = name
return module
spec = self.machinery.ModuleSpec('test', Loader())
module = self.util.module_from_spec(spec)
self.assertIsInstance(module, CustomModule)
self.assertEqual(module.__name__, name)
def test___name__(self):
spec = self.machinery.ModuleSpec('test', object())
module = self.util.module_from_spec(spec)
self.assertEqual(module.__name__, spec.name)
def test___spec__(self):
spec = self.machinery.ModuleSpec('test', object())
module = self.util.module_from_spec(spec)
self.assertEqual(module.__spec__, spec)
def test___loader__(self):
loader = object()
spec = self.machinery.ModuleSpec('test', loader)
module = self.util.module_from_spec(spec)
self.assertIs(module.__loader__, loader)
def test___package__(self):
spec = self.machinery.ModuleSpec('test.pkg', object())
module = self.util.module_from_spec(spec)
self.assertEqual(module.__package__, spec.parent)
def test___path__(self):
spec = self.machinery.ModuleSpec('test', object(), is_package=True)
module = self.util.module_from_spec(spec)
self.assertEqual(module.__path__, spec.submodule_search_locations)
def test___file__(self):
spec = self.machinery.ModuleSpec('test', object(), origin='some/path')
spec.has_location = True
module = self.util.module_from_spec(spec)
self.assertEqual(module.__file__, spec.origin)
def test___cached__(self):
spec = self.machinery.ModuleSpec('test', object())
spec.cached = 'some/path'
spec.has_location = True
module = self.util.module_from_spec(spec)
self.assertEqual(module.__cached__, spec.cached)
(Frozen_ModuleFromSpecTests,
Source_ModuleFromSpecTests
) = util.test_both(ModuleFromSpecTests, abc=abc, machinery=machinery,
util=importlib_util)
class ModuleForLoaderTests:
"""Tests for importlib.util.module_for_loader."""
@classmethod
def module_for_loader(cls, func):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
return cls.util.module_for_loader(func)
def test_warning(self):
# Should raise a PendingDeprecationWarning when used.
with warnings.catch_warnings():
warnings.simplefilter('error', DeprecationWarning)
with self.assertRaises(DeprecationWarning):
func = self.util.module_for_loader(lambda x: x)
def return_module(self, name):
fxn = self.module_for_loader(lambda self, module: module)
return fxn(self, name)
def raise_exception(self, name):
def to_wrap(self, module):
raise ImportError
fxn = self.module_for_loader(to_wrap)
try:
fxn(self, name)
except ImportError:
pass
def test_new_module(self):
# Test that when no module exists in sys.modules a new module is
# created.
module_name = 'a.b.c'
with util.uncache(module_name):
module = self.return_module(module_name)
self.assertIn(module_name, sys.modules)
self.assertIsInstance(module, types.ModuleType)
self.assertEqual(module.__name__, module_name)
def test_reload(self):
# Test that a module is reused if already in sys.modules.
class FakeLoader:
def is_package(self, name):
return True
@self.module_for_loader
def load_module(self, module):
return module
name = 'a.b.c'
module = types.ModuleType('a.b.c')
module.__loader__ = 42
module.__package__ = 42
with util.uncache(name):
sys.modules[name] = module
loader = FakeLoader()
returned_module = loader.load_module(name)
self.assertIs(returned_module, sys.modules[name])
self.assertEqual(module.__loader__, loader)
self.assertEqual(module.__package__, name)
def test_new_module_failure(self):
# Test that a module is removed from sys.modules if added but an
# exception is raised.
name = 'a.b.c'
with util.uncache(name):
self.raise_exception(name)
self.assertNotIn(name, sys.modules)
def test_reload_failure(self):
# Test that a failure on reload leaves the module in-place.
name = 'a.b.c'
module = types.ModuleType(name)
with util.uncache(name):
sys.modules[name] = module
self.raise_exception(name)
self.assertIs(module, sys.modules[name])
def test_decorator_attrs(self):
def fxn(self, module): pass
wrapped = self.module_for_loader(fxn)
self.assertEqual(wrapped.__name__, fxn.__name__)
self.assertEqual(wrapped.__qualname__, fxn.__qualname__)
def test_false_module(self):
# If for some odd reason a module is considered false, still return it
# from sys.modules.
class FalseModule(types.ModuleType):
def __bool__(self): return False
name = 'mod'
module = FalseModule(name)
with util.uncache(name):
self.assertFalse(module)
sys.modules[name] = module
given = self.return_module(name)
self.assertIs(given, module)
def test_attributes_set(self):
# __name__, __loader__, and __package__ should be set (when
# is_package() is defined; undefined implicitly tested elsewhere).
class FakeLoader:
def __init__(self, is_package):
self._pkg = is_package
def is_package(self, name):
return self._pkg
@self.module_for_loader
def load_module(self, module):
return module
name = 'pkg.mod'
with util.uncache(name):
loader = FakeLoader(False)
module = loader.load_module(name)
self.assertEqual(module.__name__, name)
self.assertIs(module.__loader__, loader)
self.assertEqual(module.__package__, 'pkg')
name = 'pkg.sub'
with util.uncache(name):
loader = FakeLoader(True)
module = loader.load_module(name)
self.assertEqual(module.__name__, name)
self.assertIs(module.__loader__, loader)
self.assertEqual(module.__package__, name)
(Frozen_ModuleForLoaderTests,
Source_ModuleForLoaderTests
) = util.test_both(ModuleForLoaderTests, util=importlib_util)
class SetPackageTests:
"""Tests for importlib.util.set_package."""
def verify(self, module, expect):
"""Verify the module has the expected value for __package__ after
passing through set_package."""
fxn = lambda: module
wrapped = self.util.set_package(fxn)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
wrapped()
self.assertTrue(hasattr(module, '__package__'))
self.assertEqual(expect, module.__package__)
def test_top_level(self):
# __package__ should be set to the empty string if a top-level module.
# Implicitly tests when package is set to None.
module = types.ModuleType('module')
module.__package__ = None
self.verify(module, '')
def test_package(self):
# Test setting __package__ for a package.
module = types.ModuleType('pkg')
module.__path__ = ['<path>']
module.__package__ = None
self.verify(module, 'pkg')
def test_submodule(self):
# Test __package__ for a module in a package.
module = types.ModuleType('pkg.mod')
module.__package__ = None
self.verify(module, 'pkg')
def test_setting_if_missing(self):
# __package__ should be set if it is missing.
module = types.ModuleType('mod')
if hasattr(module, '__package__'):
delattr(module, '__package__')
self.verify(module, '')
def test_leaving_alone(self):
# If __package__ is set and not None then leave it alone.
for value in (True, False):
module = types.ModuleType('mod')
module.__package__ = value
self.verify(module, value)
def test_decorator_attrs(self):
def fxn(module): pass
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
wrapped = self.util.set_package(fxn)
self.assertEqual(wrapped.__name__, fxn.__name__)
self.assertEqual(wrapped.__qualname__, fxn.__qualname__)
(Frozen_SetPackageTests,
Source_SetPackageTests
) = util.test_both(SetPackageTests, util=importlib_util)
class SetLoaderTests:
"""Tests importlib.util.set_loader()."""
@property
def DummyLoader(self):
# Set DummyLoader on the class lazily.
class DummyLoader:
@self.util.set_loader
def load_module(self, module):
return self.module
self.__class__.DummyLoader = DummyLoader
return DummyLoader
def test_no_attribute(self):
loader = self.DummyLoader()
loader.module = types.ModuleType('blah')
try:
del loader.module.__loader__
except AttributeError:
pass
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertEqual(loader, loader.load_module('blah').__loader__)
def test_attribute_is_None(self):
loader = self.DummyLoader()
loader.module = types.ModuleType('blah')
loader.module.__loader__ = None
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertEqual(loader, loader.load_module('blah').__loader__)
def test_not_reset(self):
loader = self.DummyLoader()
loader.module = types.ModuleType('blah')
loader.module.__loader__ = 42
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertEqual(42, loader.load_module('blah').__loader__)
(Frozen_SetLoaderTests,
Source_SetLoaderTests
) = util.test_both(SetLoaderTests, util=importlib_util)
class ResolveNameTests:
"""Tests importlib.util.resolve_name()."""
def test_absolute(self):
# bacon
self.assertEqual('bacon', self.util.resolve_name('bacon', None))
def test_absolute_within_package(self):
# bacon in spam
self.assertEqual('bacon', self.util.resolve_name('bacon', 'spam'))
def test_no_package(self):
# .bacon in ''
with self.assertRaises(ValueError):
self.util.resolve_name('.bacon', '')
def test_in_package(self):
# .bacon in spam
self.assertEqual('spam.eggs.bacon',
self.util.resolve_name('.bacon', 'spam.eggs'))
def test_other_package(self):
# ..bacon in spam.bacon
self.assertEqual('spam.bacon',
self.util.resolve_name('..bacon', 'spam.eggs'))
def test_escape(self):
# ..bacon in spam
with self.assertRaises(ValueError):
self.util.resolve_name('..bacon', 'spam')
(Frozen_ResolveNameTests,
Source_ResolveNameTests
) = util.test_both(ResolveNameTests, util=importlib_util)
class FindSpecTests:
class FakeMetaFinder:
@staticmethod
def find_spec(name, path=None, target=None): return name, path, target
def test_sys_modules(self):
name = 'some_mod'
with util.uncache(name):
module = types.ModuleType(name)
loader = 'a loader!'
spec = self.machinery.ModuleSpec(name, loader)
module.__loader__ = loader
module.__spec__ = spec
sys.modules[name] = module
found = self.util.find_spec(name)
self.assertEqual(found, spec)
def test_sys_modules_without___loader__(self):
name = 'some_mod'
with util.uncache(name):
module = types.ModuleType(name)
del module.__loader__
loader = 'a loader!'
spec = self.machinery.ModuleSpec(name, loader)
module.__spec__ = spec
sys.modules[name] = module
found = self.util.find_spec(name)
self.assertEqual(found, spec)
def test_sys_modules_spec_is_None(self):
name = 'some_mod'
with util.uncache(name):
module = types.ModuleType(name)
module.__spec__ = None
sys.modules[name] = module
with self.assertRaises(ValueError):
self.util.find_spec(name)
def test_sys_modules_loader_is_None(self):
name = 'some_mod'
with util.uncache(name):
module = types.ModuleType(name)
spec = self.machinery.ModuleSpec(name, None)
module.__spec__ = spec
sys.modules[name] = module
found = self.util.find_spec(name)
self.assertEqual(found, spec)
def test_sys_modules_spec_is_not_set(self):
name = 'some_mod'
with util.uncache(name):
module = types.ModuleType(name)
try:
del module.__spec__
except AttributeError:
pass
sys.modules[name] = module
with self.assertRaises(ValueError):
self.util.find_spec(name)
def test_success(self):
name = 'some_mod'
with util.uncache(name):
with util.import_state(meta_path=[self.FakeMetaFinder]):
self.assertEqual((name, None, None),
self.util.find_spec(name))
def test_nothing(self):
# None is returned upon failure to find a loader.
self.assertIsNone(self.util.find_spec('nevergoingtofindthismodule'))
def test_find_submodule(self):
name = 'spam'
subname = 'ham'
with util.temp_module(name, pkg=True) as pkg_dir:
fullname, _ = util.submodule(name, subname, pkg_dir)
spec = self.util.find_spec(fullname)
self.assertIsNot(spec, None)
self.assertIn(name, sorted(sys.modules))
self.assertNotIn(fullname, sorted(sys.modules))
# Ensure successive calls behave the same.
spec_again = self.util.find_spec(fullname)
self.assertEqual(spec_again, spec)
def test_find_submodule_parent_already_imported(self):
name = 'spam'
subname = 'ham'
with util.temp_module(name, pkg=True) as pkg_dir:
self.init.import_module(name)
fullname, _ = util.submodule(name, subname, pkg_dir)
spec = self.util.find_spec(fullname)
self.assertIsNot(spec, None)
self.assertIn(name, sorted(sys.modules))
self.assertNotIn(fullname, sorted(sys.modules))
# Ensure successive calls behave the same.
spec_again = self.util.find_spec(fullname)
self.assertEqual(spec_again, spec)
def test_find_relative_module(self):
name = 'spam'
subname = 'ham'
with util.temp_module(name, pkg=True) as pkg_dir:
fullname, _ = util.submodule(name, subname, pkg_dir)
relname = '.' + subname
spec = self.util.find_spec(relname, name)
self.assertIsNot(spec, None)
self.assertIn(name, sorted(sys.modules))
self.assertNotIn(fullname, sorted(sys.modules))
# Ensure successive calls behave the same.
spec_again = self.util.find_spec(fullname)
self.assertEqual(spec_again, spec)
def test_find_relative_module_missing_package(self):
name = 'spam'
subname = 'ham'
with util.temp_module(name, pkg=True) as pkg_dir:
fullname, _ = util.submodule(name, subname, pkg_dir)
relname = '.' + subname
with self.assertRaises(ValueError):
self.util.find_spec(relname)
self.assertNotIn(name, sorted(sys.modules))
self.assertNotIn(fullname, sorted(sys.modules))
(Frozen_FindSpecTests,
Source_FindSpecTests
) = util.test_both(FindSpecTests, init=init, util=importlib_util,
machinery=machinery)
class MagicNumberTests:
def test_length(self):
# Should be 4 bytes.
self.assertEqual(len(self.util.MAGIC_NUMBER), 4)
def test_incorporates_rn(self):
# The magic number uses \r\n to come out wrong when splitting on lines.
self.assertTrue(self.util.MAGIC_NUMBER.endswith(b'\r\n'))
(Frozen_MagicNumberTests,
Source_MagicNumberTests
) = util.test_both(MagicNumberTests, util=importlib_util)
class PEP3147Tests:
"""Tests of PEP 3147-related functions: cache_from_source and source_from_cache."""
tag = sys.implementation.cache_tag
@unittest.skipUnless(sys.implementation.cache_tag is not None,
'requires sys.implementation.cache_tag not be None')
def test_cache_from_source(self):
# Given the path to a .py file, return the path to its PEP 3147
# defined .pyc file (i.e. under __pycache__).
path = os.path.join('foo', 'bar', 'baz', 'qux.py')
expect = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
self.assertEqual(self.util.cache_from_source(path, optimization=''),
expect)
def test_cache_from_source_no_cache_tag(self):
# No cache tag means NotImplementedError.
with support.swap_attr(sys.implementation, 'cache_tag', None):
with self.assertRaises(NotImplementedError):
self.util.cache_from_source('whatever.py')
def test_cache_from_source_no_dot(self):
# Directory with a dot, filename without dot.
path = os.path.join('foo.bar', 'file')
expect = os.path.join('foo.bar', '__pycache__',
'file{}.pyc'.format(self.tag))
self.assertEqual(self.util.cache_from_source(path, optimization=''),
expect)
def test_cache_from_source_debug_override(self):
# Given the path to a .py file, return the path to its PEP 3147/PEP 488
# defined .pyc file (i.e. under __pycache__).
path = os.path.join('foo', 'bar', 'baz', 'qux.py')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.assertEqual(self.util.cache_from_source(path, False),
self.util.cache_from_source(path, optimization=1))
self.assertEqual(self.util.cache_from_source(path, True),
self.util.cache_from_source(path, optimization=''))
with warnings.catch_warnings():
warnings.simplefilter('error')
with self.assertRaises(DeprecationWarning):
self.util.cache_from_source(path, False)
with self.assertRaises(DeprecationWarning):
self.util.cache_from_source(path, True)
def test_cache_from_source_cwd(self):
path = 'foo.py'
expect = os.path.join('__pycache__', 'foo.{}.pyc'.format(self.tag))
self.assertEqual(self.util.cache_from_source(path, optimization=''),
expect)
def test_cache_from_source_override(self):
# When debug_override is not None, it can be any true-ish or false-ish
# value.
path = os.path.join('foo', 'bar', 'baz.py')
# However if the bool-ishness can't be determined, the exception
# propagates.
class Bearish:
def __bool__(self): raise RuntimeError
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.assertEqual(self.util.cache_from_source(path, []),
self.util.cache_from_source(path, optimization=1))
self.assertEqual(self.util.cache_from_source(path, [17]),
self.util.cache_from_source(path, optimization=''))
with self.assertRaises(RuntimeError):
self.util.cache_from_source('/foo/bar/baz.py', Bearish())
def test_cache_from_source_optimization_empty_string(self):
# Setting 'optimization' to '' leads to no optimization tag (PEP 488).
path = 'foo.py'
expect = os.path.join('__pycache__', 'foo.{}.pyc'.format(self.tag))
self.assertEqual(self.util.cache_from_source(path, optimization=''),
expect)
def test_cache_from_source_optimization_None(self):
# Setting 'optimization' to None uses the interpreter's optimization.
# (PEP 488)
path = 'foo.py'
optimization_level = sys.flags.optimize
almost_expect = os.path.join('__pycache__', 'foo.{}'.format(self.tag))
if optimization_level == 0:
expect = almost_expect + '.pyc'
elif optimization_level <= 2:
expect = almost_expect + '.opt-{}.pyc'.format(optimization_level)
else:
msg = '{!r} is a non-standard optimization level'.format(optimization_level)
self.skipTest(msg)
self.assertEqual(self.util.cache_from_source(path, optimization=None),
expect)
def test_cache_from_source_optimization_set(self):
# The 'optimization' parameter accepts anything that has a string repr
# that passes str.alnum().
path = 'foo.py'
valid_characters = string.ascii_letters + string.digits
almost_expect = os.path.join('__pycache__', 'foo.{}'.format(self.tag))
got = self.util.cache_from_source(path, optimization=valid_characters)
# Test all valid characters are accepted.
self.assertEqual(got,
almost_expect + '.opt-{}.pyc'.format(valid_characters))
# str() should be called on argument.
self.assertEqual(self.util.cache_from_source(path, optimization=42),
almost_expect + '.opt-42.pyc')
# Invalid characters raise ValueError.
with self.assertRaises(ValueError):
self.util.cache_from_source(path, optimization='path/is/bad')
def test_cache_from_source_debug_override_optimization_both_set(self):
# Can only set one of the optimization-related parameters.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with self.assertRaises(TypeError):
self.util.cache_from_source('foo.py', False, optimization='')
@unittest.skipUnless(os.sep == '\\' and os.altsep == '/',
'test meaningful only where os.altsep is defined')
def test_sep_altsep_and_sep_cache_from_source(self):
# Windows path and PEP 3147 where sep is right of altsep.
self.assertEqual(
self.util.cache_from_source('\\foo\\bar\\baz/qux.py', optimization=''),
'\\foo\\bar\\baz\\__pycache__\\qux.{}.pyc'.format(self.tag))
@unittest.skipUnless(sys.implementation.cache_tag is not None,
'requires sys.implementation.cache_tag not be None')
def test_source_from_cache_path_like_arg(self):
path = pathlib.PurePath('foo', 'bar', 'baz', 'qux.py')
expect = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
self.assertEqual(self.util.cache_from_source(path, optimization=''),
expect)
@unittest.skipUnless(sys.implementation.cache_tag is not None,
'requires sys.implementation.cache_tag to not be '
'None')
def test_source_from_cache(self):
# Given the path to a PEP 3147 defined .pyc file, return the path to
# its source. This tests the good path.
path = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
expect = os.path.join('foo', 'bar', 'baz', 'qux.py')
self.assertEqual(self.util.source_from_cache(path), expect)
def test_source_from_cache_no_cache_tag(self):
# If sys.implementation.cache_tag is None, raise NotImplementedError.
path = os.path.join('blah', '__pycache__', 'whatever.pyc')
with support.swap_attr(sys.implementation, 'cache_tag', None):
with self.assertRaises(NotImplementedError):
self.util.source_from_cache(path)
def test_source_from_cache_bad_path(self):
# When the path to a pyc file is not in PEP 3147 format, a ValueError
# is raised.
self.assertRaises(
ValueError, self.util.source_from_cache, '/foo/bar/bazqux.pyc')
def test_source_from_cache_no_slash(self):
# No slashes at all in path -> ValueError
self.assertRaises(
ValueError, self.util.source_from_cache, 'foo.cpython-32.pyc')
def test_source_from_cache_too_few_dots(self):
# Too few dots in final path component -> ValueError
self.assertRaises(
ValueError, self.util.source_from_cache, '__pycache__/foo.pyc')
def test_source_from_cache_too_many_dots(self):
with self.assertRaises(ValueError):
self.util.source_from_cache(
'__pycache__/foo.cpython-32.opt-1.foo.pyc')
def test_source_from_cache_not_opt(self):
# Non-`opt-` path component -> ValueError
self.assertRaises(
ValueError, self.util.source_from_cache,
'__pycache__/foo.cpython-32.foo.pyc')
def test_source_from_cache_no__pycache__(self):
# Another problem with the path -> ValueError
self.assertRaises(
ValueError, self.util.source_from_cache,
'/foo/bar/foo.cpython-32.foo.pyc')
def test_source_from_cache_optimized_bytecode(self):
# Optimized bytecode is not an issue.
path = os.path.join('__pycache__', 'foo.{}.opt-1.pyc'.format(self.tag))
self.assertEqual(self.util.source_from_cache(path), 'foo.py')
def test_source_from_cache_missing_optimization(self):
# An empty optimization level is a no-no.
path = os.path.join('__pycache__', 'foo.{}.opt-.pyc'.format(self.tag))
with self.assertRaises(ValueError):
self.util.source_from_cache(path)
@unittest.skipUnless(sys.implementation.cache_tag is not None,
'requires sys.implementation.cache_tag to not be '
'None')
def test_source_from_cache_path_like_arg(self):
path = pathlib.PurePath('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
expect = os.path.join('foo', 'bar', 'baz', 'qux.py')
self.assertEqual(self.util.source_from_cache(path), expect)
(Frozen_PEP3147Tests,
Source_PEP3147Tests
) = util.test_both(PEP3147Tests, util=importlib_util)
class MagicNumberTests(unittest.TestCase):
"""
Test release compatibility issues relating to importlib
"""
@unittest.skipUnless(
sys.version_info.releaselevel in ('candidate', 'final'),
'only applies to candidate or final python release levels'
)
def test_magic_number(self):
"""
Each python minor release should generally have a MAGIC_NUMBER
that does not change once the release reaches candidate status.
Once a release reaches candidate status, the value of the constant
EXPECTED_MAGIC_NUMBER in this test should be changed.
This test will then check that the actual MAGIC_NUMBER matches
the expected value for the release.
In exceptional cases, it may be required to change the MAGIC_NUMBER
for a maintenance release. In this case the change should be
discussed in python-dev. If a change is required, community
stakeholders such as OS package maintainers must be notified
in advance. Such exceptional releases will then require an
adjustment to this test case.
"""
EXPECTED_MAGIC_NUMBER = 3379
actual = int.from_bytes(importlib.util.MAGIC_NUMBER[:2], 'little')
msg = (
"To avoid breaking backwards compatibility with cached bytecode "
"files that can't be automatically regenerated by the current "
"user, candidate and final releases require the current "
"importlib.util.MAGIC_NUMBER to match the expected "
"magic number in this test. Set the expected "
"magic number in this test to the current MAGIC_NUMBER to "
"continue with the release.\n\n"
"Changing the MAGIC_NUMBER for a maintenance release "
"requires discussion in python-dev and notification of "
"community stakeholders."
)
self.assertEqual(EXPECTED_MAGIC_NUMBER, actual, msg)
if __name__ == '__main__':
unittest.main()
| 31,373 | 806 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/test_abc.py | import contextlib
import inspect
import io
import marshal
import os
import sys
from test import support
import types
import unittest
from unittest import mock
import warnings
from . import util as test_util
init = test_util.import_importlib('importlib')
abc = test_util.import_importlib('importlib.abc')
machinery = test_util.import_importlib('importlib.machinery')
util = test_util.import_importlib('importlib.util')
##### Inheritance ##############################################################
class InheritanceTests:
"""Test that the specified class is a subclass/superclass of the expected
classes."""
subclasses = []
superclasses = []
def setUp(self):
self.superclasses = [getattr(self.abc, class_name)
for class_name in self.superclass_names]
if hasattr(self, 'subclass_names'):
# Because test.support.import_fresh_module() creates a new
# importlib._bootstrap per module, inheritance checks fail when
# checking across module boundaries (i.e. the _bootstrap in abc is
# not the same as the one in machinery). That means stealing one of
# the modules from the other to make sure the same instance is used.
machinery = self.abc.machinery
self.subclasses = [getattr(machinery, class_name)
for class_name in self.subclass_names]
assert self.subclasses or self.superclasses, self.__class__
self.__test = getattr(self.abc, self._NAME)
def test_subclasses(self):
# Test that the expected subclasses inherit.
for subclass in self.subclasses:
self.assertTrue(issubclass(subclass, self.__test),
"{0} is not a subclass of {1}".format(subclass, self.__test))
def test_superclasses(self):
# Test that the class inherits from the expected superclasses.
for superclass in self.superclasses:
self.assertTrue(issubclass(self.__test, superclass),
"{0} is not a superclass of {1}".format(superclass, self.__test))
class MetaPathFinder(InheritanceTests):
superclass_names = ['Finder']
subclass_names = ['BuiltinImporter', 'FrozenImporter', 'PathFinder',
'WindowsRegistryFinder']
(Frozen_MetaPathFinderInheritanceTests,
Source_MetaPathFinderInheritanceTests
) = test_util.test_both(MetaPathFinder, abc=abc)
class PathEntryFinder(InheritanceTests):
superclass_names = ['Finder']
subclass_names = ['FileFinder']
(Frozen_PathEntryFinderInheritanceTests,
Source_PathEntryFinderInheritanceTests
) = test_util.test_both(PathEntryFinder, abc=abc)
class ResourceLoader(InheritanceTests):
superclass_names = ['Loader']
(Frozen_ResourceLoaderInheritanceTests,
Source_ResourceLoaderInheritanceTests
) = test_util.test_both(ResourceLoader, abc=abc)
class InspectLoader(InheritanceTests):
superclass_names = ['Loader']
subclass_names = ['BuiltinImporter', 'FrozenImporter', 'ExtensionFileLoader']
(Frozen_InspectLoaderInheritanceTests,
Source_InspectLoaderInheritanceTests
) = test_util.test_both(InspectLoader, abc=abc)
class ExecutionLoader(InheritanceTests):
superclass_names = ['InspectLoader']
subclass_names = ['ExtensionFileLoader']
(Frozen_ExecutionLoaderInheritanceTests,
Source_ExecutionLoaderInheritanceTests
) = test_util.test_both(ExecutionLoader, abc=abc)
class FileLoader(InheritanceTests):
superclass_names = ['ResourceLoader', 'ExecutionLoader']
subclass_names = ['SourceFileLoader', 'SourcelessFileLoader']
(Frozen_FileLoaderInheritanceTests,
Source_FileLoaderInheritanceTests
) = test_util.test_both(FileLoader, abc=abc)
class SourceLoader(InheritanceTests):
superclass_names = ['ResourceLoader', 'ExecutionLoader']
subclass_names = ['SourceFileLoader']
(Frozen_SourceLoaderInheritanceTests,
Source_SourceLoaderInheritanceTests
) = test_util.test_both(SourceLoader, abc=abc)
##### Default return values ####################################################
def make_abc_subclasses(base_class, name=None, inst=False, **kwargs):
if name is None:
name = base_class.__name__
base = {kind: getattr(splitabc, name)
for kind, splitabc in abc.items()}
return {cls._KIND: cls() if inst else cls
for cls in test_util.split_frozen(base_class, base, **kwargs)}
class ABCTestHarness:
@property
def ins(self):
# Lazily set ins on the class.
cls = self.SPLIT[self._KIND]
ins = cls()
self.__class__.ins = ins
return ins
class MetaPathFinder:
def find_module(self, fullname, path):
return super().find_module(fullname, path)
class MetaPathFinderDefaultsTests(ABCTestHarness):
SPLIT = make_abc_subclasses(MetaPathFinder)
def test_find_module(self):
# Default should return None.
self.assertIsNone(self.ins.find_module('something', None))
def test_invalidate_caches(self):
# Calling the method is a no-op.
self.ins.invalidate_caches()
(Frozen_MPFDefaultTests,
Source_MPFDefaultTests
) = test_util.test_both(MetaPathFinderDefaultsTests)
class PathEntryFinder:
def find_loader(self, fullname):
return super().find_loader(fullname)
class PathEntryFinderDefaultsTests(ABCTestHarness):
SPLIT = make_abc_subclasses(PathEntryFinder)
def test_find_loader(self):
self.assertEqual((None, []), self.ins.find_loader('something'))
def find_module(self):
self.assertEqual(None, self.ins.find_module('something'))
def test_invalidate_caches(self):
# Should be a no-op.
self.ins.invalidate_caches()
(Frozen_PEFDefaultTests,
Source_PEFDefaultTests
) = test_util.test_both(PathEntryFinderDefaultsTests)
class Loader:
def load_module(self, fullname):
return super().load_module(fullname)
class LoaderDefaultsTests(ABCTestHarness):
SPLIT = make_abc_subclasses(Loader)
def test_create_module(self):
spec = 'a spec'
self.assertIsNone(self.ins.create_module(spec))
def test_load_module(self):
with self.assertRaises(ImportError):
self.ins.load_module('something')
def test_module_repr(self):
mod = types.ModuleType('blah')
with self.assertRaises(NotImplementedError):
self.ins.module_repr(mod)
original_repr = repr(mod)
mod.__loader__ = self.ins
# Should still return a proper repr.
self.assertTrue(repr(mod))
(Frozen_LDefaultTests,
SourceLDefaultTests
) = test_util.test_both(LoaderDefaultsTests)
class ResourceLoader(Loader):
def get_data(self, path):
return super().get_data(path)
class ResourceLoaderDefaultsTests(ABCTestHarness):
SPLIT = make_abc_subclasses(ResourceLoader)
def test_get_data(self):
with self.assertRaises(IOError):
self.ins.get_data('/some/path')
(Frozen_RLDefaultTests,
Source_RLDefaultTests
) = test_util.test_both(ResourceLoaderDefaultsTests)
class InspectLoader(Loader):
def is_package(self, fullname):
return super().is_package(fullname)
def get_source(self, fullname):
return super().get_source(fullname)
SPLIT_IL = make_abc_subclasses(InspectLoader)
class InspectLoaderDefaultsTests(ABCTestHarness):
SPLIT = SPLIT_IL
def test_is_package(self):
with self.assertRaises(ImportError):
self.ins.is_package('blah')
def test_get_source(self):
with self.assertRaises(ImportError):
self.ins.get_source('blah')
(Frozen_ILDefaultTests,
Source_ILDefaultTests
) = test_util.test_both(InspectLoaderDefaultsTests)
class ExecutionLoader(InspectLoader):
def get_filename(self, fullname):
return super().get_filename(fullname)
SPLIT_EL = make_abc_subclasses(ExecutionLoader)
class ExecutionLoaderDefaultsTests(ABCTestHarness):
SPLIT = SPLIT_EL
def test_get_filename(self):
with self.assertRaises(ImportError):
self.ins.get_filename('blah')
(Frozen_ELDefaultTests,
Source_ELDefaultsTests
) = test_util.test_both(InspectLoaderDefaultsTests)
##### MetaPathFinder concrete methods ##########################################
class MetaPathFinderFindModuleTests:
@classmethod
def finder(cls, spec):
class MetaPathSpecFinder(cls.abc.MetaPathFinder):
def find_spec(self, fullname, path, target=None):
self.called_for = fullname, path
return spec
return MetaPathSpecFinder()
def test_no_spec(self):
finder = self.finder(None)
path = ['a', 'b', 'c']
name = 'blah'
found = finder.find_module(name, path)
self.assertIsNone(found)
self.assertEqual(name, finder.called_for[0])
self.assertEqual(path, finder.called_for[1])
def test_spec(self):
loader = object()
spec = self.util.spec_from_loader('blah', loader)
finder = self.finder(spec)
found = finder.find_module('blah', None)
self.assertIs(found, spec.loader)
(Frozen_MPFFindModuleTests,
Source_MPFFindModuleTests
) = test_util.test_both(MetaPathFinderFindModuleTests, abc=abc, util=util)
##### PathEntryFinder concrete methods #########################################
class PathEntryFinderFindLoaderTests:
@classmethod
def finder(cls, spec):
class PathEntrySpecFinder(cls.abc.PathEntryFinder):
def find_spec(self, fullname, target=None):
self.called_for = fullname
return spec
return PathEntrySpecFinder()
def test_no_spec(self):
finder = self.finder(None)
name = 'blah'
found = finder.find_loader(name)
self.assertIsNone(found[0])
self.assertEqual([], found[1])
self.assertEqual(name, finder.called_for)
def test_spec_with_loader(self):
loader = object()
spec = self.util.spec_from_loader('blah', loader)
finder = self.finder(spec)
found = finder.find_loader('blah')
self.assertIs(found[0], spec.loader)
def test_spec_with_portions(self):
spec = self.machinery.ModuleSpec('blah', None)
paths = ['a', 'b', 'c']
spec.submodule_search_locations = paths
finder = self.finder(spec)
found = finder.find_loader('blah')
self.assertIsNone(found[0])
self.assertEqual(paths, found[1])
(Frozen_PEFFindLoaderTests,
Source_PEFFindLoaderTests
) = test_util.test_both(PathEntryFinderFindLoaderTests, abc=abc, util=util,
machinery=machinery)
##### Loader concrete methods ##################################################
class LoaderLoadModuleTests:
def loader(self):
class SpecLoader(self.abc.Loader):
found = None
def exec_module(self, module):
self.found = module
def is_package(self, fullname):
"""Force some non-default module state to be set."""
return True
return SpecLoader()
def test_fresh(self):
loader = self.loader()
name = 'blah'
with test_util.uncache(name):
loader.load_module(name)
module = loader.found
self.assertIs(sys.modules[name], module)
self.assertEqual(loader, module.__loader__)
self.assertEqual(loader, module.__spec__.loader)
self.assertEqual(name, module.__name__)
self.assertEqual(name, module.__spec__.name)
self.assertIsNotNone(module.__path__)
self.assertIsNotNone(module.__path__,
module.__spec__.submodule_search_locations)
def test_reload(self):
name = 'blah'
loader = self.loader()
module = types.ModuleType(name)
module.__spec__ = self.util.spec_from_loader(name, loader)
module.__loader__ = loader
with test_util.uncache(name):
sys.modules[name] = module
loader.load_module(name)
found = loader.found
self.assertIs(found, sys.modules[name])
self.assertIs(module, sys.modules[name])
(Frozen_LoaderLoadModuleTests,
Source_LoaderLoadModuleTests
) = test_util.test_both(LoaderLoadModuleTests, abc=abc, util=util)
##### InspectLoader concrete methods ###########################################
class InspectLoaderSourceToCodeTests:
def source_to_module(self, data, path=None):
"""Help with source_to_code() tests."""
module = types.ModuleType('blah')
loader = self.InspectLoaderSubclass()
if path is None:
code = loader.source_to_code(data)
else:
code = loader.source_to_code(data, path)
exec(code, module.__dict__)
return module
def test_source_to_code_source(self):
# Since compile() can handle strings, so should source_to_code().
source = 'attr = 42'
module = self.source_to_module(source)
self.assertTrue(hasattr(module, 'attr'))
self.assertEqual(module.attr, 42)
def test_source_to_code_bytes(self):
# Since compile() can handle bytes, so should source_to_code().
source = b'attr = 42'
module = self.source_to_module(source)
self.assertTrue(hasattr(module, 'attr'))
self.assertEqual(module.attr, 42)
def test_source_to_code_path(self):
# Specifying a path should set it for the code object.
path = 'path/to/somewhere'
loader = self.InspectLoaderSubclass()
code = loader.source_to_code('', path)
self.assertEqual(code.co_filename, path)
def test_source_to_code_no_path(self):
# Not setting a path should still work and be set to <string> since that
# is a pre-existing practice as a default to compile().
loader = self.InspectLoaderSubclass()
code = loader.source_to_code('')
self.assertEqual(code.co_filename, '<string>')
(Frozen_ILSourceToCodeTests,
Source_ILSourceToCodeTests
) = test_util.test_both(InspectLoaderSourceToCodeTests,
InspectLoaderSubclass=SPLIT_IL)
class InspectLoaderGetCodeTests:
def test_get_code(self):
# Test success.
module = types.ModuleType('blah')
with mock.patch.object(self.InspectLoaderSubclass, 'get_source') as mocked:
mocked.return_value = 'attr = 42'
loader = self.InspectLoaderSubclass()
code = loader.get_code('blah')
exec(code, module.__dict__)
self.assertEqual(module.attr, 42)
def test_get_code_source_is_None(self):
# If get_source() is None then this should be None.
with mock.patch.object(self.InspectLoaderSubclass, 'get_source') as mocked:
mocked.return_value = None
loader = self.InspectLoaderSubclass()
code = loader.get_code('blah')
self.assertIsNone(code)
def test_get_code_source_not_found(self):
# If there is no source then there is no code object.
loader = self.InspectLoaderSubclass()
with self.assertRaises(ImportError):
loader.get_code('blah')
(Frozen_ILGetCodeTests,
Source_ILGetCodeTests
) = test_util.test_both(InspectLoaderGetCodeTests,
InspectLoaderSubclass=SPLIT_IL)
class InspectLoaderLoadModuleTests:
"""Test InspectLoader.load_module()."""
module_name = 'blah'
def setUp(self):
support.unload(self.module_name)
self.addCleanup(support.unload, self.module_name)
def load(self, loader):
spec = self.util.spec_from_loader(self.module_name, loader)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
return self.init._bootstrap._load_unlocked(spec)
def mock_get_code(self):
return mock.patch.object(self.InspectLoaderSubclass, 'get_code')
def test_get_code_ImportError(self):
# If get_code() raises ImportError, it should propagate.
with self.mock_get_code() as mocked_get_code:
mocked_get_code.side_effect = ImportError
with self.assertRaises(ImportError):
loader = self.InspectLoaderSubclass()
self.load(loader)
def test_get_code_None(self):
# If get_code() returns None, raise ImportError.
with self.mock_get_code() as mocked_get_code:
mocked_get_code.return_value = None
with self.assertRaises(ImportError):
loader = self.InspectLoaderSubclass()
self.load(loader)
def test_module_returned(self):
# The loaded module should be returned.
code = compile('attr = 42', '<string>', 'exec')
with self.mock_get_code() as mocked_get_code:
mocked_get_code.return_value = code
loader = self.InspectLoaderSubclass()
module = self.load(loader)
self.assertEqual(module, sys.modules[self.module_name])
(Frozen_ILLoadModuleTests,
Source_ILLoadModuleTests
) = test_util.test_both(InspectLoaderLoadModuleTests,
InspectLoaderSubclass=SPLIT_IL,
init=init,
util=util)
##### ExecutionLoader concrete methods #########################################
class ExecutionLoaderGetCodeTests:
def mock_methods(self, *, get_source=False, get_filename=False):
source_mock_context, filename_mock_context = None, None
if get_source:
source_mock_context = mock.patch.object(self.ExecutionLoaderSubclass,
'get_source')
if get_filename:
filename_mock_context = mock.patch.object(self.ExecutionLoaderSubclass,
'get_filename')
return source_mock_context, filename_mock_context
def test_get_code(self):
path = 'blah.py'
source_mock_context, filename_mock_context = self.mock_methods(
get_source=True, get_filename=True)
with source_mock_context as source_mock, filename_mock_context as name_mock:
source_mock.return_value = 'attr = 42'
name_mock.return_value = path
loader = self.ExecutionLoaderSubclass()
code = loader.get_code('blah')
self.assertEqual(code.co_filename, path)
module = types.ModuleType('blah')
exec(code, module.__dict__)
self.assertEqual(module.attr, 42)
def test_get_code_source_is_None(self):
# If get_source() is None then this should be None.
source_mock_context, _ = self.mock_methods(get_source=True)
with source_mock_context as mocked:
mocked.return_value = None
loader = self.ExecutionLoaderSubclass()
code = loader.get_code('blah')
self.assertIsNone(code)
def test_get_code_source_not_found(self):
# If there is no source then there is no code object.
loader = self.ExecutionLoaderSubclass()
with self.assertRaises(ImportError):
loader.get_code('blah')
def test_get_code_no_path(self):
# If get_filename() raises ImportError then simply skip setting the path
# on the code object.
source_mock_context, filename_mock_context = self.mock_methods(
get_source=True, get_filename=True)
with source_mock_context as source_mock, filename_mock_context as name_mock:
source_mock.return_value = 'attr = 42'
name_mock.side_effect = ImportError
loader = self.ExecutionLoaderSubclass()
code = loader.get_code('blah')
self.assertEqual(code.co_filename, '<string>')
module = types.ModuleType('blah')
exec(code, module.__dict__)
self.assertEqual(module.attr, 42)
(Frozen_ELGetCodeTests,
Source_ELGetCodeTests
) = test_util.test_both(ExecutionLoaderGetCodeTests,
ExecutionLoaderSubclass=SPLIT_EL)
##### SourceLoader concrete methods ############################################
class SourceOnlyLoader:
# Globals that should be defined for all modules.
source = (b"_ = '::'.join([__name__, __file__, __cached__, __package__, "
b"repr(__loader__)])")
def __init__(self, path):
self.path = path
def get_data(self, path):
if path != self.path:
raise IOError
return self.source
def get_filename(self, fullname):
return self.path
def module_repr(self, module):
return '<module>'
SPLIT_SOL = make_abc_subclasses(SourceOnlyLoader, 'SourceLoader')
class SourceLoader(SourceOnlyLoader):
source_mtime = 1
def __init__(self, path, magic=None):
super().__init__(path)
self.bytecode_path = self.util.cache_from_source(self.path)
self.source_size = len(self.source)
if magic is None:
magic = self.util.MAGIC_NUMBER
data = bytearray(magic)
data.extend(self.init._w_long(self.source_mtime))
data.extend(self.init._w_long(self.source_size))
code_object = compile(self.source, self.path, 'exec',
dont_inherit=True)
data.extend(marshal.dumps(code_object))
self.bytecode = bytes(data)
self.written = {}
def get_data(self, path):
if path == self.path:
return super().get_data(path)
elif path == self.bytecode_path:
return self.bytecode
else:
raise OSError
def path_stats(self, path):
if path != self.path:
raise IOError
return {'mtime': self.source_mtime, 'size': self.source_size}
def set_data(self, path, data):
self.written[path] = bytes(data)
return path == self.bytecode_path
SPLIT_SL = make_abc_subclasses(SourceLoader, util=util, init=init)
class SourceLoaderTestHarness:
def setUp(self, *, is_package=True, **kwargs):
self.package = 'pkg'
if is_package:
self.path = os.path.join(self.package, '__init__.py')
self.name = self.package
else:
module_name = 'mod'
self.path = os.path.join(self.package, '.'.join(['mod', 'py']))
self.name = '.'.join([self.package, module_name])
self.cached = self.util.cache_from_source(self.path)
self.loader = self.loader_mock(self.path, **kwargs)
def verify_module(self, module):
self.assertEqual(module.__name__, self.name)
self.assertEqual(module.__file__, self.path)
self.assertEqual(module.__cached__, self.cached)
self.assertEqual(module.__package__, self.package)
self.assertEqual(module.__loader__, self.loader)
values = module._.split('::')
self.assertEqual(values[0], self.name)
self.assertEqual(values[1], self.path)
self.assertEqual(values[2], self.cached)
self.assertEqual(values[3], self.package)
self.assertEqual(values[4], repr(self.loader))
def verify_code(self, code_object):
module = types.ModuleType(self.name)
module.__file__ = self.path
module.__cached__ = self.cached
module.__package__ = self.package
module.__loader__ = self.loader
module.__path__ = []
exec(code_object, module.__dict__)
self.verify_module(module)
class SourceOnlyLoaderTests(SourceLoaderTestHarness):
"""Test importlib.abc.SourceLoader for source-only loading.
Reload testing is subsumed by the tests for
importlib.util.module_for_loader.
"""
def test_get_source(self):
# Verify the source code is returned as a string.
# If an OSError is raised by get_data then raise ImportError.
expected_source = self.loader.source.decode('utf-8')
self.assertEqual(self.loader.get_source(self.name), expected_source)
def raise_OSError(path):
raise OSError
self.loader.get_data = raise_OSError
with self.assertRaises(ImportError) as cm:
self.loader.get_source(self.name)
self.assertEqual(cm.exception.name, self.name)
def test_is_package(self):
# Properly detect when loading a package.
self.setUp(is_package=False)
self.assertFalse(self.loader.is_package(self.name))
self.setUp(is_package=True)
self.assertTrue(self.loader.is_package(self.name))
self.assertFalse(self.loader.is_package(self.name + '.__init__'))
def test_get_code(self):
# Verify the code object is created.
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
def test_source_to_code(self):
# Verify the compiled code object.
code = self.loader.source_to_code(self.loader.source, self.path)
self.verify_code(code)
def test_load_module(self):
# Loading a module should set __name__, __loader__, __package__,
# __path__ (for packages), __file__, and __cached__.
# The module should also be put into sys.modules.
with test_util.uncache(self.name):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = self.loader.load_module(self.name)
self.verify_module(module)
self.assertEqual(module.__path__, [os.path.dirname(self.path)])
self.assertIn(self.name, sys.modules)
def test_package_settings(self):
# __package__ needs to be set, while __path__ is set on if the module
# is a package.
# Testing the values for a package are covered by test_load_module.
self.setUp(is_package=False)
with test_util.uncache(self.name):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = self.loader.load_module(self.name)
self.verify_module(module)
self.assertFalse(hasattr(module, '__path__'))
def test_get_source_encoding(self):
# Source is considered encoded in UTF-8 by default unless otherwise
# specified by an encoding line.
source = "_ = 'ü'"
self.loader.source = source.encode('utf-8')
returned_source = self.loader.get_source(self.name)
self.assertEqual(returned_source, source)
source = "# coding: latin-1\n_ = ü"
self.loader.source = source.encode('latin-1')
returned_source = self.loader.get_source(self.name)
self.assertEqual(returned_source, source)
(Frozen_SourceOnlyLoaderTests,
Source_SourceOnlyLoaderTests
) = test_util.test_both(SourceOnlyLoaderTests, util=util,
loader_mock=SPLIT_SOL)
@unittest.skipIf(sys.dont_write_bytecode, "sys.dont_write_bytecode is true")
class SourceLoaderBytecodeTests(SourceLoaderTestHarness):
"""Test importlib.abc.SourceLoader's use of bytecode.
Source-only testing handled by SourceOnlyLoaderTests.
"""
def verify_code(self, code_object, *, bytecode_written=False):
super().verify_code(code_object)
if bytecode_written:
self.assertIn(self.cached, self.loader.written)
data = bytearray(self.util.MAGIC_NUMBER)
data.extend(self.init._w_long(self.loader.source_mtime))
data.extend(self.init._w_long(self.loader.source_size))
data.extend(marshal.dumps(code_object))
self.assertEqual(self.loader.written[self.cached], bytes(data))
def test_code_with_everything(self):
# When everything should work.
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
def test_no_bytecode(self):
# If no bytecode exists then move on to the source.
self.loader.bytecode_path = "<does not exist>"
# Sanity check
with self.assertRaises(OSError):
bytecode_path = self.util.cache_from_source(self.path)
self.loader.get_data(bytecode_path)
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
def test_code_bad_timestamp(self):
# Bytecode is only used when the timestamp matches the source EXACTLY.
for source_mtime in (0, 2):
assert source_mtime != self.loader.source_mtime
original = self.loader.source_mtime
self.loader.source_mtime = source_mtime
# If bytecode is used then EOFError would be raised by marshal.
self.loader.bytecode = self.loader.bytecode[8:]
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
self.loader.source_mtime = original
def test_code_bad_magic(self):
# Skip over bytecode with a bad magic number.
self.setUp(magic=b'0000')
# If bytecode is used then EOFError would be raised by marshal.
self.loader.bytecode = self.loader.bytecode[8:]
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
def test_dont_write_bytecode(self):
# Bytecode is not written if sys.dont_write_bytecode is true.
# Can assume it is false already thanks to the skipIf class decorator.
try:
sys.dont_write_bytecode = True
self.loader.bytecode_path = "<does not exist>"
code_object = self.loader.get_code(self.name)
self.assertNotIn(self.cached, self.loader.written)
finally:
sys.dont_write_bytecode = False
def test_no_set_data(self):
# If set_data is not defined, one can still read bytecode.
self.setUp(magic=b'0000')
original_set_data = self.loader.__class__.mro()[1].set_data
try:
del self.loader.__class__.mro()[1].set_data
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
finally:
self.loader.__class__.mro()[1].set_data = original_set_data
def test_set_data_raises_exceptions(self):
# Raising NotImplementedError or OSError is okay for set_data.
def raise_exception(exc):
def closure(*args, **kwargs):
raise exc
return closure
self.setUp(magic=b'0000')
self.loader.set_data = raise_exception(NotImplementedError)
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
(Frozen_SLBytecodeTests,
SourceSLBytecodeTests
) = test_util.test_both(SourceLoaderBytecodeTests, init=init, util=util,
loader_mock=SPLIT_SL)
class SourceLoaderGetSourceTests:
"""Tests for importlib.abc.SourceLoader.get_source()."""
def test_default_encoding(self):
# Should have no problems with UTF-8 text.
name = 'mod'
mock = self.SourceOnlyLoaderMock('mod.file')
source = 'x = "ü"'
mock.source = source.encode('utf-8')
returned_source = mock.get_source(name)
self.assertEqual(returned_source, source)
def test_decoded_source(self):
# Decoding should work.
name = 'mod'
mock = self.SourceOnlyLoaderMock("mod.file")
source = "# coding: Latin-1\nx='ü'"
assert source.encode('latin-1') != source.encode('utf-8')
mock.source = source.encode('latin-1')
returned_source = mock.get_source(name)
self.assertEqual(returned_source, source)
def test_universal_newlines(self):
# PEP 302 says universal newlines should be used.
name = 'mod'
mock = self.SourceOnlyLoaderMock('mod.file')
source = "x = 42\r\ny = -13\r\n"
mock.source = source.encode('utf-8')
expect = io.IncrementalNewlineDecoder(None, True).decode(source)
self.assertEqual(mock.get_source(name), expect)
(Frozen_SourceOnlyLoaderGetSourceTests,
Source_SourceOnlyLoaderGetSourceTests
) = test_util.test_both(SourceLoaderGetSourceTests,
SourceOnlyLoaderMock=SPLIT_SOL)
if __name__ == '__main__':
unittest.main()
| 32,239 | 954 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/__main__.py | from . import load_tests
import unittest
unittest.main()
| 58 | 5 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/test_locks.py | from . import util as test_util
init = test_util.import_importlib('importlib')
import sys
import unittest
import weakref
from test import support
try:
import _thread
import threading
except ImportError:
threading = None
else:
from test import lock_tests
if threading is not None:
class ModuleLockAsRLockTests:
locktype = classmethod(lambda cls: cls.LockType("some_lock"))
# _is_owned() unsupported
test__is_owned = None
# acquire(blocking=False) unsupported
test_try_acquire = None
test_try_acquire_contended = None
# `with` unsupported
test_with = None
# acquire(timeout=...) unsupported
test_timeout = None
# _release_save() unsupported
test_release_save_unacquired = None
# lock status in repr unsupported
test_repr = None
test_locked_repr = None
LOCK_TYPES = {kind: splitinit._bootstrap._ModuleLock
for kind, splitinit in init.items()}
(Frozen_ModuleLockAsRLockTests,
Source_ModuleLockAsRLockTests
) = test_util.test_both(ModuleLockAsRLockTests, lock_tests.RLockTests,
LockType=LOCK_TYPES)
else:
LOCK_TYPES = {}
class Frozen_ModuleLockAsRLockTests(unittest.TestCase):
pass
class Source_ModuleLockAsRLockTests(unittest.TestCase):
pass
if threading is not None:
class DeadlockAvoidanceTests:
def setUp(self):
try:
self.old_switchinterval = sys.getswitchinterval()
support.setswitchinterval(0.000001)
except AttributeError:
self.old_switchinterval = None
def tearDown(self):
if self.old_switchinterval is not None:
sys.setswitchinterval(self.old_switchinterval)
def run_deadlock_avoidance_test(self, create_deadlock):
NLOCKS = 10
locks = [self.LockType(str(i)) for i in range(NLOCKS)]
pairs = [(locks[i], locks[(i+1)%NLOCKS]) for i in range(NLOCKS)]
if create_deadlock:
NTHREADS = NLOCKS
else:
NTHREADS = NLOCKS - 1
barrier = threading.Barrier(NTHREADS)
results = []
def _acquire(lock):
"""Try to acquire the lock. Return True on success,
False on deadlock."""
try:
lock.acquire()
except self.DeadlockError:
return False
else:
return True
def f():
a, b = pairs.pop()
ra = _acquire(a)
barrier.wait()
rb = _acquire(b)
results.append((ra, rb))
if rb:
b.release()
if ra:
a.release()
lock_tests.Bunch(f, NTHREADS).wait_for_finished()
self.assertEqual(len(results), NTHREADS)
return results
def test_deadlock(self):
results = self.run_deadlock_avoidance_test(True)
# At least one of the threads detected a potential deadlock on its
# second acquire() call. It may be several of them, because the
# deadlock avoidance mechanism is conservative.
nb_deadlocks = results.count((True, False))
self.assertGreaterEqual(nb_deadlocks, 1)
self.assertEqual(results.count((True, True)), len(results) - nb_deadlocks)
def test_no_deadlock(self):
results = self.run_deadlock_avoidance_test(False)
self.assertEqual(results.count((True, False)), 0)
self.assertEqual(results.count((True, True)), len(results))
DEADLOCK_ERRORS = {kind: splitinit._bootstrap._DeadlockError
for kind, splitinit in init.items()}
(Frozen_DeadlockAvoidanceTests,
Source_DeadlockAvoidanceTests
) = test_util.test_both(DeadlockAvoidanceTests,
LockType=LOCK_TYPES,
DeadlockError=DEADLOCK_ERRORS)
else:
DEADLOCK_ERRORS = {}
class Frozen_DeadlockAvoidanceTests(unittest.TestCase):
pass
class Source_DeadlockAvoidanceTests(unittest.TestCase):
pass
class LifetimeTests:
@property
def bootstrap(self):
return self.init._bootstrap
def test_lock_lifetime(self):
name = "xyzzy"
self.assertNotIn(name, self.bootstrap._module_locks)
lock = self.bootstrap._get_module_lock(name)
self.assertIn(name, self.bootstrap._module_locks)
wr = weakref.ref(lock)
del lock
support.gc_collect()
self.assertNotIn(name, self.bootstrap._module_locks)
self.assertIsNone(wr())
def test_all_locks(self):
support.gc_collect()
self.assertEqual(0, len(self.bootstrap._module_locks),
self.bootstrap._module_locks)
(Frozen_LifetimeTests,
Source_LifetimeTests
) = test_util.test_both(LifetimeTests, init=init)
@support.reap_threads
def test_main():
support.run_unittest(Frozen_ModuleLockAsRLockTests,
Source_ModuleLockAsRLockTests,
Frozen_DeadlockAvoidanceTests,
Source_DeadlockAvoidanceTests,
Frozen_LifetimeTests,
Source_LifetimeTests)
if __name__ == '__main__':
test_main()
| 5,492 | 177 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/abc.py | import abc
import unittest
class FinderTests(metaclass=abc.ABCMeta):
"""Basic tests for a finder to pass."""
@abc.abstractmethod
def test_module(self):
# Test importing a top-level module.
pass
@abc.abstractmethod
def test_package(self):
# Test importing a package.
pass
@abc.abstractmethod
def test_module_in_package(self):
# Test importing a module contained within a package.
# A value for 'path' should be used if for a meta_path finder.
pass
@abc.abstractmethod
def test_package_in_package(self):
# Test importing a subpackage.
# A value for 'path' should be used if for a meta_path finder.
pass
@abc.abstractmethod
def test_package_over_module(self):
# Test that packages are chosen over modules.
pass
@abc.abstractmethod
def test_failure(self):
# Test trying to find a module that cannot be handled.
pass
class LoaderTests(metaclass=abc.ABCMeta):
@abc.abstractmethod
def test_module(self):
"""A module should load without issue.
After the loader returns the module should be in sys.modules.
Attributes to verify:
* __file__
* __loader__
* __name__
* No __path__
"""
pass
@abc.abstractmethod
def test_package(self):
"""Loading a package should work.
After the loader returns the module should be in sys.modules.
Attributes to verify:
* __name__
* __file__
* __package__
* __path__
* __loader__
"""
pass
@abc.abstractmethod
def test_lacking_parent(self):
"""A loader should not be dependent on it's parent package being
imported."""
pass
@abc.abstractmethod
def test_state_after_failure(self):
"""If a module is already in sys.modules and a reload fails
(e.g. a SyntaxError), the module should be in the state it was before
the reload began."""
pass
@abc.abstractmethod
def test_unloadable(self):
"""Test ImportError is raised when the loader is asked to load a module
it can't."""
pass
| 2,288 | 95 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/test_spec.py | from . import util as test_util
init = test_util.import_importlib('importlib')
machinery = test_util.import_importlib('importlib.machinery')
util = test_util.import_importlib('importlib.util')
import os.path
import pathlib
from test.support import CleanImport
import unittest
import sys
import warnings
class TestLoader:
def __init__(self, path=None, is_package=None):
self.path = path
self.package = is_package
def __repr__(self):
return '<TestLoader object>'
def __getattr__(self, name):
if name == 'get_filename' and self.path is not None:
return self._get_filename
if name == 'is_package':
return self._is_package
raise AttributeError(name)
def _get_filename(self, name):
return self.path
def _is_package(self, name):
return self.package
def create_module(self, spec):
return None
class NewLoader(TestLoader):
EGGS = 1
def exec_module(self, module):
module.eggs = self.EGGS
class LegacyLoader(TestLoader):
HAM = -1
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
frozen_util = util['Frozen']
@frozen_util.module_for_loader
def load_module(self, module):
module.ham = self.HAM
return module
class ModuleSpecTests:
def setUp(self):
self.name = 'spam'
self.path = 'spam.py'
self.cached = self.util.cache_from_source(self.path)
self.loader = TestLoader()
self.spec = self.machinery.ModuleSpec(self.name, self.loader)
self.loc_spec = self.machinery.ModuleSpec(self.name, self.loader,
origin=self.path)
self.loc_spec._set_fileattr = True
def test_default(self):
spec = self.machinery.ModuleSpec(self.name, self.loader)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.loader)
self.assertIs(spec.origin, None)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertIs(spec.cached, None)
self.assertFalse(spec.has_location)
def test_default_no_loader(self):
spec = self.machinery.ModuleSpec(self.name, None)
self.assertEqual(spec.name, self.name)
self.assertIs(spec.loader, None)
self.assertIs(spec.origin, None)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertIs(spec.cached, None)
self.assertFalse(spec.has_location)
def test_default_is_package_false(self):
spec = self.machinery.ModuleSpec(self.name, self.loader,
is_package=False)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.loader)
self.assertIs(spec.origin, None)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertIs(spec.cached, None)
self.assertFalse(spec.has_location)
def test_default_is_package_true(self):
spec = self.machinery.ModuleSpec(self.name, self.loader,
is_package=True)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.loader)
self.assertIs(spec.origin, None)
self.assertIs(spec.loader_state, None)
self.assertEqual(spec.submodule_search_locations, [])
self.assertIs(spec.cached, None)
self.assertFalse(spec.has_location)
def test_has_location_setter(self):
spec = self.machinery.ModuleSpec(self.name, self.loader,
origin='somewhere')
self.assertFalse(spec.has_location)
spec.has_location = True
self.assertTrue(spec.has_location)
def test_equality(self):
other = type(sys.implementation)(name=self.name,
loader=self.loader,
origin=None,
submodule_search_locations=None,
has_location=False,
cached=None,
)
self.assertTrue(self.spec == other)
def test_equality_location(self):
other = type(sys.implementation)(name=self.name,
loader=self.loader,
origin=self.path,
submodule_search_locations=None,
has_location=True,
cached=self.cached,
)
self.assertEqual(self.loc_spec, other)
def test_inequality(self):
other = type(sys.implementation)(name='ham',
loader=self.loader,
origin=None,
submodule_search_locations=None,
has_location=False,
cached=None,
)
self.assertNotEqual(self.spec, other)
def test_inequality_incomplete(self):
other = type(sys.implementation)(name=self.name,
loader=self.loader,
)
self.assertNotEqual(self.spec, other)
def test_package(self):
spec = self.machinery.ModuleSpec('spam.eggs', self.loader)
self.assertEqual(spec.parent, 'spam')
def test_package_is_package(self):
spec = self.machinery.ModuleSpec('spam.eggs', self.loader,
is_package=True)
self.assertEqual(spec.parent, 'spam.eggs')
# cached
def test_cached_set(self):
before = self.spec.cached
self.spec.cached = 'there'
after = self.spec.cached
self.assertIs(before, None)
self.assertEqual(after, 'there')
def test_cached_no_origin(self):
spec = self.machinery.ModuleSpec(self.name, self.loader)
self.assertIs(spec.cached, None)
def test_cached_with_origin_not_location(self):
spec = self.machinery.ModuleSpec(self.name, self.loader,
origin=self.path)
self.assertIs(spec.cached, None)
def test_cached_source(self):
expected = self.util.cache_from_source(self.path)
self.assertEqual(self.loc_spec.cached, expected)
def test_cached_source_unknown_suffix(self):
self.loc_spec.origin = 'spam.spamspamspam'
self.assertIs(self.loc_spec.cached, None)
def test_cached_source_missing_cache_tag(self):
original = sys.implementation.cache_tag
sys.implementation.cache_tag = None
try:
cached = self.loc_spec.cached
finally:
sys.implementation.cache_tag = original
self.assertIs(cached, None)
def test_cached_sourceless(self):
self.loc_spec.origin = 'spam.pyc'
self.assertEqual(self.loc_spec.cached, 'spam.pyc')
(Frozen_ModuleSpecTests,
Source_ModuleSpecTests
) = test_util.test_both(ModuleSpecTests, util=util, machinery=machinery)
class ModuleSpecMethodsTests:
@property
def bootstrap(self):
return self.init._bootstrap
def setUp(self):
self.name = 'spam'
self.path = 'spam.py'
self.cached = self.util.cache_from_source(self.path)
self.loader = TestLoader()
self.spec = self.machinery.ModuleSpec(self.name, self.loader)
self.loc_spec = self.machinery.ModuleSpec(self.name, self.loader,
origin=self.path)
self.loc_spec._set_fileattr = True
# exec()
def test_exec(self):
self.spec.loader = NewLoader()
module = self.util.module_from_spec(self.spec)
sys.modules[self.name] = module
self.assertFalse(hasattr(module, 'eggs'))
self.bootstrap._exec(self.spec, module)
self.assertEqual(module.eggs, 1)
# load()
def test_load(self):
self.spec.loader = NewLoader()
with CleanImport(self.spec.name):
loaded = self.bootstrap._load(self.spec)
installed = sys.modules[self.spec.name]
self.assertEqual(loaded.eggs, 1)
self.assertIs(loaded, installed)
def test_load_replaced(self):
replacement = object()
class ReplacingLoader(TestLoader):
def exec_module(self, module):
sys.modules[module.__name__] = replacement
self.spec.loader = ReplacingLoader()
with CleanImport(self.spec.name):
loaded = self.bootstrap._load(self.spec)
installed = sys.modules[self.spec.name]
self.assertIs(loaded, replacement)
self.assertIs(installed, replacement)
def test_load_failed(self):
class FailedLoader(TestLoader):
def exec_module(self, module):
raise RuntimeError
self.spec.loader = FailedLoader()
with CleanImport(self.spec.name):
with self.assertRaises(RuntimeError):
loaded = self.bootstrap._load(self.spec)
self.assertNotIn(self.spec.name, sys.modules)
def test_load_failed_removed(self):
class FailedLoader(TestLoader):
def exec_module(self, module):
del sys.modules[module.__name__]
raise RuntimeError
self.spec.loader = FailedLoader()
with CleanImport(self.spec.name):
with self.assertRaises(RuntimeError):
loaded = self.bootstrap._load(self.spec)
self.assertNotIn(self.spec.name, sys.modules)
def test_load_legacy(self):
self.spec.loader = LegacyLoader()
with CleanImport(self.spec.name):
loaded = self.bootstrap._load(self.spec)
self.assertEqual(loaded.ham, -1)
def test_load_legacy_attributes(self):
self.spec.loader = LegacyLoader()
with CleanImport(self.spec.name):
loaded = self.bootstrap._load(self.spec)
self.assertIs(loaded.__loader__, self.spec.loader)
self.assertEqual(loaded.__package__, self.spec.parent)
self.assertIs(loaded.__spec__, self.spec)
def test_load_legacy_attributes_immutable(self):
module = object()
class ImmutableLoader(TestLoader):
def load_module(self, name):
sys.modules[name] = module
return module
self.spec.loader = ImmutableLoader()
with CleanImport(self.spec.name):
loaded = self.bootstrap._load(self.spec)
self.assertIs(sys.modules[self.spec.name], module)
# reload()
def test_reload(self):
self.spec.loader = NewLoader()
with CleanImport(self.spec.name):
loaded = self.bootstrap._load(self.spec)
reloaded = self.bootstrap._exec(self.spec, loaded)
installed = sys.modules[self.spec.name]
self.assertEqual(loaded.eggs, 1)
self.assertIs(reloaded, loaded)
self.assertIs(installed, loaded)
def test_reload_modified(self):
self.spec.loader = NewLoader()
with CleanImport(self.spec.name):
loaded = self.bootstrap._load(self.spec)
loaded.eggs = 2
reloaded = self.bootstrap._exec(self.spec, loaded)
self.assertEqual(loaded.eggs, 1)
self.assertIs(reloaded, loaded)
def test_reload_extra_attributes(self):
self.spec.loader = NewLoader()
with CleanImport(self.spec.name):
loaded = self.bootstrap._load(self.spec)
loaded.available = False
reloaded = self.bootstrap._exec(self.spec, loaded)
self.assertFalse(loaded.available)
self.assertIs(reloaded, loaded)
def test_reload_init_module_attrs(self):
self.spec.loader = NewLoader()
with CleanImport(self.spec.name):
loaded = self.bootstrap._load(self.spec)
loaded.__name__ = 'ham'
del loaded.__loader__
del loaded.__package__
del loaded.__spec__
self.bootstrap._exec(self.spec, loaded)
self.assertEqual(loaded.__name__, self.spec.name)
self.assertIs(loaded.__loader__, self.spec.loader)
self.assertEqual(loaded.__package__, self.spec.parent)
self.assertIs(loaded.__spec__, self.spec)
self.assertFalse(hasattr(loaded, '__path__'))
self.assertFalse(hasattr(loaded, '__file__'))
self.assertFalse(hasattr(loaded, '__cached__'))
def test_reload_legacy(self):
self.spec.loader = LegacyLoader()
with CleanImport(self.spec.name):
loaded = self.bootstrap._load(self.spec)
reloaded = self.bootstrap._exec(self.spec, loaded)
installed = sys.modules[self.spec.name]
self.assertEqual(loaded.ham, -1)
self.assertIs(reloaded, loaded)
self.assertIs(installed, loaded)
(Frozen_ModuleSpecMethodsTests,
Source_ModuleSpecMethodsTests
) = test_util.test_both(ModuleSpecMethodsTests, init=init, util=util,
machinery=machinery)
class ModuleReprTests:
@property
def bootstrap(self):
return self.init._bootstrap
def setUp(self):
self.module = type(os)('spam')
self.spec = self.machinery.ModuleSpec('spam', TestLoader())
def test_module___loader___module_repr(self):
class Loader:
def module_repr(self, module):
return '<delicious {}>'.format(module.__name__)
self.module.__loader__ = Loader()
modrepr = self.bootstrap._module_repr(self.module)
self.assertEqual(modrepr, '<delicious spam>')
def test_module___loader___module_repr_bad(self):
class Loader(TestLoader):
def module_repr(self, module):
raise Exception
self.module.__loader__ = Loader()
modrepr = self.bootstrap._module_repr(self.module)
self.assertEqual(modrepr,
'<module {!r} (<TestLoader object>)>'.format('spam'))
def test_module___spec__(self):
origin = 'in a hole, in the ground'
self.spec.origin = origin
self.module.__spec__ = self.spec
modrepr = self.bootstrap._module_repr(self.module)
self.assertEqual(modrepr, '<module {!r} ({})>'.format('spam', origin))
def test_module___spec___location(self):
location = 'in_a_galaxy_far_far_away.py'
self.spec.origin = location
self.spec._set_fileattr = True
self.module.__spec__ = self.spec
modrepr = self.bootstrap._module_repr(self.module)
self.assertEqual(modrepr,
'<module {!r} from {!r}>'.format('spam', location))
def test_module___spec___no_origin(self):
self.spec.loader = TestLoader()
self.module.__spec__ = self.spec
modrepr = self.bootstrap._module_repr(self.module)
self.assertEqual(modrepr,
'<module {!r} (<TestLoader object>)>'.format('spam'))
def test_module___spec___no_origin_no_loader(self):
self.spec.loader = None
self.module.__spec__ = self.spec
modrepr = self.bootstrap._module_repr(self.module)
self.assertEqual(modrepr, '<module {!r}>'.format('spam'))
def test_module_no_name(self):
del self.module.__name__
modrepr = self.bootstrap._module_repr(self.module)
self.assertEqual(modrepr, '<module {!r}>'.format('?'))
def test_module_with_file(self):
filename = 'e/i/e/i/o/spam.py'
self.module.__file__ = filename
modrepr = self.bootstrap._module_repr(self.module)
self.assertEqual(modrepr,
'<module {!r} from {!r}>'.format('spam', filename))
def test_module_no_file(self):
self.module.__loader__ = TestLoader()
modrepr = self.bootstrap._module_repr(self.module)
self.assertEqual(modrepr,
'<module {!r} (<TestLoader object>)>'.format('spam'))
def test_module_no_file_no_loader(self):
modrepr = self.bootstrap._module_repr(self.module)
self.assertEqual(modrepr, '<module {!r}>'.format('spam'))
(Frozen_ModuleReprTests,
Source_ModuleReprTests
) = test_util.test_both(ModuleReprTests, init=init, util=util,
machinery=machinery)
class FactoryTests:
def setUp(self):
self.name = 'spam'
self.path = 'spam.py'
self.cached = self.util.cache_from_source(self.path)
self.loader = TestLoader()
self.fileloader = TestLoader(self.path)
self.pkgloader = TestLoader(self.path, True)
# spec_from_loader()
def test_spec_from_loader_default(self):
spec = self.util.spec_from_loader(self.name, self.loader)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.loader)
self.assertIs(spec.origin, None)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertIs(spec.cached, None)
self.assertFalse(spec.has_location)
def test_spec_from_loader_default_with_bad_is_package(self):
class Loader:
def is_package(self, name):
raise ImportError
loader = Loader()
spec = self.util.spec_from_loader(self.name, loader)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, loader)
self.assertIs(spec.origin, None)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertIs(spec.cached, None)
self.assertFalse(spec.has_location)
def test_spec_from_loader_origin(self):
origin = 'somewhere over the rainbow'
spec = self.util.spec_from_loader(self.name, self.loader,
origin=origin)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.loader)
self.assertIs(spec.origin, origin)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertIs(spec.cached, None)
self.assertFalse(spec.has_location)
def test_spec_from_loader_is_package_false(self):
spec = self.util.spec_from_loader(self.name, self.loader,
is_package=False)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.loader)
self.assertIs(spec.origin, None)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertIs(spec.cached, None)
self.assertFalse(spec.has_location)
def test_spec_from_loader_is_package_true(self):
spec = self.util.spec_from_loader(self.name, self.loader,
is_package=True)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.loader)
self.assertIs(spec.origin, None)
self.assertIs(spec.loader_state, None)
self.assertEqual(spec.submodule_search_locations, [])
self.assertIs(spec.cached, None)
self.assertFalse(spec.has_location)
def test_spec_from_loader_origin_and_is_package(self):
origin = 'where the streets have no name'
spec = self.util.spec_from_loader(self.name, self.loader,
origin=origin, is_package=True)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.loader)
self.assertIs(spec.origin, origin)
self.assertIs(spec.loader_state, None)
self.assertEqual(spec.submodule_search_locations, [])
self.assertIs(spec.cached, None)
self.assertFalse(spec.has_location)
def test_spec_from_loader_is_package_with_loader_false(self):
loader = TestLoader(is_package=False)
spec = self.util.spec_from_loader(self.name, loader)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, loader)
self.assertIs(spec.origin, None)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertIs(spec.cached, None)
self.assertFalse(spec.has_location)
def test_spec_from_loader_is_package_with_loader_true(self):
loader = TestLoader(is_package=True)
spec = self.util.spec_from_loader(self.name, loader)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, loader)
self.assertIs(spec.origin, None)
self.assertIs(spec.loader_state, None)
self.assertEqual(spec.submodule_search_locations, [])
self.assertIs(spec.cached, None)
self.assertFalse(spec.has_location)
def test_spec_from_loader_default_with_file_loader(self):
spec = self.util.spec_from_loader(self.name, self.fileloader)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.fileloader)
self.assertEqual(spec.origin, self.path)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertEqual(spec.cached, self.cached)
self.assertTrue(spec.has_location)
def test_spec_from_loader_is_package_false_with_fileloader(self):
spec = self.util.spec_from_loader(self.name, self.fileloader,
is_package=False)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.fileloader)
self.assertEqual(spec.origin, self.path)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertEqual(spec.cached, self.cached)
self.assertTrue(spec.has_location)
def test_spec_from_loader_is_package_true_with_fileloader(self):
spec = self.util.spec_from_loader(self.name, self.fileloader,
is_package=True)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.fileloader)
self.assertEqual(spec.origin, self.path)
self.assertIs(spec.loader_state, None)
self.assertEqual(spec.submodule_search_locations, [''])
self.assertEqual(spec.cached, self.cached)
self.assertTrue(spec.has_location)
# spec_from_file_location()
def test_spec_from_file_location_default(self):
spec = self.util.spec_from_file_location(self.name, self.path)
self.assertEqual(spec.name, self.name)
# Need to use a circuitous route to get at importlib.machinery to make
# sure the same class object is used in the isinstance() check as
# would have been used to create the loader.
self.assertIsInstance(spec.loader,
self.util.abc.machinery.SourceFileLoader)
self.assertEqual(spec.loader.name, self.name)
self.assertEqual(spec.loader.path, self.path)
self.assertEqual(spec.origin, self.path)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertEqual(spec.cached, self.cached)
self.assertTrue(spec.has_location)
def test_spec_from_file_location_path_like_arg(self):
spec = self.util.spec_from_file_location(self.name,
pathlib.PurePath(self.path))
self.assertEqual(spec.origin, self.path)
def test_spec_from_file_location_default_without_location(self):
spec = self.util.spec_from_file_location(self.name)
self.assertIs(spec, None)
def test_spec_from_file_location_default_bad_suffix(self):
spec = self.util.spec_from_file_location(self.name, 'spam.eggs')
self.assertIs(spec, None)
def test_spec_from_file_location_loader_no_location(self):
spec = self.util.spec_from_file_location(self.name,
loader=self.fileloader)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.fileloader)
self.assertEqual(spec.origin, self.path)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertEqual(spec.cached, self.cached)
self.assertTrue(spec.has_location)
def test_spec_from_file_location_loader_no_location_no_get_filename(self):
spec = self.util.spec_from_file_location(self.name,
loader=self.loader)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.loader)
self.assertEqual(spec.origin, '<unknown>')
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertIs(spec.cached, None)
self.assertTrue(spec.has_location)
def test_spec_from_file_location_loader_no_location_bad_get_filename(self):
class Loader:
def get_filename(self, name):
raise ImportError
loader = Loader()
spec = self.util.spec_from_file_location(self.name, loader=loader)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, loader)
self.assertEqual(spec.origin, '<unknown>')
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertIs(spec.cached, None)
self.assertTrue(spec.has_location)
def test_spec_from_file_location_smsl_none(self):
spec = self.util.spec_from_file_location(self.name, self.path,
loader=self.fileloader,
submodule_search_locations=None)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.fileloader)
self.assertEqual(spec.origin, self.path)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertEqual(spec.cached, self.cached)
self.assertTrue(spec.has_location)
def test_spec_from_file_location_smsl_empty(self):
spec = self.util.spec_from_file_location(self.name, self.path,
loader=self.fileloader,
submodule_search_locations=[])
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.fileloader)
self.assertEqual(spec.origin, self.path)
self.assertIs(spec.loader_state, None)
self.assertEqual(spec.submodule_search_locations, [''])
self.assertEqual(spec.cached, self.cached)
self.assertTrue(spec.has_location)
def test_spec_from_file_location_smsl_not_empty(self):
spec = self.util.spec_from_file_location(self.name, self.path,
loader=self.fileloader,
submodule_search_locations=['eggs'])
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.fileloader)
self.assertEqual(spec.origin, self.path)
self.assertIs(spec.loader_state, None)
self.assertEqual(spec.submodule_search_locations, ['eggs'])
self.assertEqual(spec.cached, self.cached)
self.assertTrue(spec.has_location)
def test_spec_from_file_location_smsl_default(self):
spec = self.util.spec_from_file_location(self.name, self.path,
loader=self.pkgloader)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.pkgloader)
self.assertEqual(spec.origin, self.path)
self.assertIs(spec.loader_state, None)
self.assertEqual(spec.submodule_search_locations, [''])
self.assertEqual(spec.cached, self.cached)
self.assertTrue(spec.has_location)
def test_spec_from_file_location_smsl_default_not_package(self):
class Loader:
def is_package(self, name):
return False
loader = Loader()
spec = self.util.spec_from_file_location(self.name, self.path,
loader=loader)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, loader)
self.assertEqual(spec.origin, self.path)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertEqual(spec.cached, self.cached)
self.assertTrue(spec.has_location)
def test_spec_from_file_location_smsl_default_no_is_package(self):
spec = self.util.spec_from_file_location(self.name, self.path,
loader=self.fileloader)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, self.fileloader)
self.assertEqual(spec.origin, self.path)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertEqual(spec.cached, self.cached)
self.assertTrue(spec.has_location)
def test_spec_from_file_location_smsl_default_bad_is_package(self):
class Loader:
def is_package(self, name):
raise ImportError
loader = Loader()
spec = self.util.spec_from_file_location(self.name, self.path,
loader=loader)
self.assertEqual(spec.name, self.name)
self.assertEqual(spec.loader, loader)
self.assertEqual(spec.origin, self.path)
self.assertIs(spec.loader_state, None)
self.assertIs(spec.submodule_search_locations, None)
self.assertEqual(spec.cached, self.cached)
self.assertTrue(spec.has_location)
(Frozen_FactoryTests,
Source_FactoryTests
) = test_util.test_both(FactoryTests, util=util, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| 30,498 | 820 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/util.py | import builtins
import contextlib
import errno
import functools
import importlib
from importlib import machinery, util, invalidate_caches
import os
import os.path
from test import support
import unittest
import sys
import tempfile
import types
BUILTINS = types.SimpleNamespace()
BUILTINS.good_name = None
BUILTINS.bad_name = None
if 'errno' in sys.builtin_module_names:
BUILTINS.good_name = 'errno'
if 'importlib' not in sys.builtin_module_names:
BUILTINS.bad_name = 'importlib'
EXTENSIONS = types.SimpleNamespace()
EXTENSIONS.path = None
EXTENSIONS.ext = None
EXTENSIONS.filename = None
EXTENSIONS.file_path = None
EXTENSIONS.name = '_testcapi'
def _extension_details():
global EXTENSIONS
for path in sys.path:
for ext in machinery.EXTENSION_SUFFIXES:
filename = EXTENSIONS.name + ext
file_path = os.path.join(path, filename)
if os.path.exists(file_path):
EXTENSIONS.path = path
EXTENSIONS.ext = ext
EXTENSIONS.filename = filename
EXTENSIONS.file_path = file_path
return
_extension_details()
def import_importlib(module_name):
"""Import a module from importlib both w/ and w/o _frozen_importlib."""
fresh = ('importlib',) if '.' in module_name else ()
frozen = support.import_fresh_module(module_name)
source = support.import_fresh_module(module_name, fresh=fresh,
blocked=('_frozen_importlib', '_frozen_importlib_external'))
return {'Frozen': frozen, 'Source': source}
def specialize_class(cls, kind, base=None, **kwargs):
# XXX Support passing in submodule names--load (and cache) them?
# That would clean up the test modules a bit more.
if base is None:
base = unittest.TestCase
elif not isinstance(base, type):
base = base[kind]
name = '{}_{}'.format(kind, cls.__name__)
bases = (cls, base)
specialized = types.new_class(name, bases)
specialized.__module__ = cls.__module__
specialized._NAME = cls.__name__
specialized._KIND = kind
for attr, values in kwargs.items():
value = values[kind]
setattr(specialized, attr, value)
return specialized
def split_frozen(cls, base=None, **kwargs):
frozen = specialize_class(cls, 'Frozen', base, **kwargs)
source = specialize_class(cls, 'Source', base, **kwargs)
return frozen, source
def test_both(test_class, base=None, **kwargs):
return split_frozen(test_class, base, **kwargs)
CASE_INSENSITIVE_FS = True
# Windows is the only OS that is *always* case-insensitive
# (OS X *can* be case-sensitive).
if sys.platform not in ('win32', 'cygwin'):
changed_name = __file__.upper()
if changed_name == __file__:
changed_name = __file__.lower()
if not os.path.exists(changed_name):
CASE_INSENSITIVE_FS = False
source_importlib = import_importlib('importlib')['Source']
__import__ = {'Frozen': staticmethod(builtins.__import__),
'Source': staticmethod(source_importlib.__import__)}
def case_insensitive_tests(test):
"""Class decorator that nullifies tests requiring a case-insensitive
file system."""
return unittest.skipIf(not CASE_INSENSITIVE_FS,
"requires a case-insensitive filesystem")(test)
def submodule(parent, name, pkg_dir, content=''):
path = os.path.join(pkg_dir, name + '.py')
with open(path, 'w') as subfile:
subfile.write(content)
return '{}.{}'.format(parent, name), path
@contextlib.contextmanager
def uncache(*names):
"""Uncache a module from sys.modules.
A basic sanity check is performed to prevent uncaching modules that either
cannot/shouldn't be uncached.
"""
for name in names:
if name in ('sys', 'marshal', 'imp'):
raise ValueError(
"cannot uncache {0}".format(name))
try:
del sys.modules[name]
except KeyError:
pass
try:
yield
finally:
for name in names:
try:
del sys.modules[name]
except KeyError:
pass
@contextlib.contextmanager
def temp_module(name, content='', *, pkg=False):
conflicts = [n for n in sys.modules if n.partition('.')[0] == name]
with support.temp_cwd(None) as cwd:
with uncache(name, *conflicts):
with support.DirsOnSysPath(cwd):
invalidate_caches()
location = os.path.join(cwd, name)
if pkg:
modpath = os.path.join(location, '__init__.py')
os.mkdir(name)
else:
modpath = location + '.py'
if content is None:
# Make sure the module file gets created.
content = ''
if content is not None:
# not a namespace package
with open(modpath, 'w') as modfile:
modfile.write(content)
yield location
@contextlib.contextmanager
def import_state(**kwargs):
"""Context manager to manage the various importers and stored state in the
sys module.
The 'modules' attribute is not supported as the interpreter state stores a
pointer to the dict that the interpreter uses internally;
reassigning to sys.modules does not have the desired effect.
"""
originals = {}
try:
for attr, default in (('meta_path', []), ('path', []),
('path_hooks', []),
('path_importer_cache', {})):
originals[attr] = getattr(sys, attr)
if attr in kwargs:
new_value = kwargs[attr]
del kwargs[attr]
else:
new_value = default
setattr(sys, attr, new_value)
if len(kwargs):
raise ValueError(
'unrecognized arguments: {0}'.format(kwargs.keys()))
yield
finally:
for attr, value in originals.items():
setattr(sys, attr, value)
class _ImporterMock:
"""Base class to help with creating importer mocks."""
def __init__(self, *names, module_code={}):
self.modules = {}
self.module_code = {}
for name in names:
if not name.endswith('.__init__'):
import_name = name
else:
import_name = name[:-len('.__init__')]
if '.' not in name:
package = None
elif import_name == name:
package = name.rsplit('.', 1)[0]
else:
package = import_name
module = types.ModuleType(import_name)
module.__loader__ = self
module.__file__ = '<mock __file__>'
module.__package__ = package
module.attr = name
if import_name != name:
module.__path__ = ['<mock __path__>']
self.modules[import_name] = module
if import_name in module_code:
self.module_code[import_name] = module_code[import_name]
def __getitem__(self, name):
return self.modules[name]
def __enter__(self):
self._uncache = uncache(*self.modules.keys())
self._uncache.__enter__()
return self
def __exit__(self, *exc_info):
self._uncache.__exit__(None, None, None)
class mock_modules(_ImporterMock):
"""Importer mock using PEP 302 APIs."""
def find_module(self, fullname, path=None):
if fullname not in self.modules:
return None
else:
return self
def load_module(self, fullname):
if fullname not in self.modules:
raise ImportError
else:
sys.modules[fullname] = self.modules[fullname]
if fullname in self.module_code:
try:
self.module_code[fullname]()
except Exception:
del sys.modules[fullname]
raise
return self.modules[fullname]
class mock_spec(_ImporterMock):
"""Importer mock using PEP 451 APIs."""
def find_spec(self, fullname, path=None, parent=None):
try:
module = self.modules[fullname]
except KeyError:
return None
spec = util.spec_from_file_location(
fullname, module.__file__, loader=self,
submodule_search_locations=getattr(module, '__path__', None))
return spec
def create_module(self, spec):
if spec.name not in self.modules:
raise ImportError
return self.modules[spec.name]
def exec_module(self, module):
try:
self.module_code[module.__spec__.name]()
except KeyError:
pass
def writes_bytecode_files(fxn):
"""Decorator to protect sys.dont_write_bytecode from mutation and to skip
tests that require it to be set to False."""
if sys.dont_write_bytecode:
return lambda *args, **kwargs: None
@functools.wraps(fxn)
def wrapper(*args, **kwargs):
original = sys.dont_write_bytecode
sys.dont_write_bytecode = False
try:
to_return = fxn(*args, **kwargs)
finally:
sys.dont_write_bytecode = original
return to_return
return wrapper
def ensure_bytecode_path(bytecode_path):
"""Ensure that the __pycache__ directory for PEP 3147 pyc file exists.
:param bytecode_path: File system path to PEP 3147 pyc file.
"""
try:
os.mkdir(os.path.dirname(bytecode_path))
except OSError as error:
if error.errno != errno.EEXIST:
raise
@contextlib.contextmanager
def create_modules(*names):
"""Temporarily create each named module with an attribute (named 'attr')
that contains the name passed into the context manager that caused the
creation of the module.
All files are created in a temporary directory returned by
tempfile.mkdtemp(). This directory is inserted at the beginning of
sys.path. When the context manager exits all created files (source and
bytecode) are explicitly deleted.
No magic is performed when creating packages! This means that if you create
a module within a package you must also create the package's __init__ as
well.
"""
source = 'attr = {0!r}'
created_paths = []
mapping = {}
state_manager = None
uncache_manager = None
try:
temp_dir = tempfile.mkdtemp()
mapping['.root'] = temp_dir
import_names = set()
for name in names:
if not name.endswith('__init__'):
import_name = name
else:
import_name = name[:-len('.__init__')]
import_names.add(import_name)
if import_name in sys.modules:
del sys.modules[import_name]
name_parts = name.split('.')
file_path = temp_dir
for directory in name_parts[:-1]:
file_path = os.path.join(file_path, directory)
if not os.path.exists(file_path):
os.mkdir(file_path)
created_paths.append(file_path)
file_path = os.path.join(file_path, name_parts[-1] + '.py')
with open(file_path, 'w') as file:
file.write(source.format(name))
created_paths.append(file_path)
mapping[name] = file_path
uncache_manager = uncache(*import_names)
uncache_manager.__enter__()
state_manager = import_state(path=[temp_dir])
state_manager.__enter__()
yield mapping
finally:
if state_manager is not None:
state_manager.__exit__(None, None, None)
if uncache_manager is not None:
uncache_manager.__exit__(None, None, None)
support.rmtree(temp_dir)
def mock_path_hook(*entries, importer):
"""A mock sys.path_hooks entry."""
def hook(entry):
if entry not in entries:
raise ImportError
return importer
return hook
class CASEOKTestBase:
def caseok_env_changed(self, *, should_exist):
possibilities = b'PYTHONCASEOK', 'PYTHONCASEOK'
if any(x in self.importlib._bootstrap_external._os.environ
for x in possibilities) != should_exist:
self.skipTest('os.environ changes not reflected in _os.environ')
| 12,511 | 389 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/test_api.py | from . import util as test_util
init = test_util.import_importlib('importlib')
util = test_util.import_importlib('importlib.util')
machinery = test_util.import_importlib('importlib.machinery')
import os.path
import sys
from test import support
import types
import unittest
import warnings
class ImportModuleTests:
"""Test importlib.import_module."""
def test_module_import(self):
# Test importing a top-level module.
with test_util.mock_modules('top_level') as mock:
with test_util.import_state(meta_path=[mock]):
module = self.init.import_module('top_level')
self.assertEqual(module.__name__, 'top_level')
def test_absolute_package_import(self):
# Test importing a module from a package with an absolute name.
pkg_name = 'pkg'
pkg_long_name = '{0}.__init__'.format(pkg_name)
name = '{0}.mod'.format(pkg_name)
with test_util.mock_modules(pkg_long_name, name) as mock:
with test_util.import_state(meta_path=[mock]):
module = self.init.import_module(name)
self.assertEqual(module.__name__, name)
def test_shallow_relative_package_import(self):
# Test importing a module from a package through a relative import.
pkg_name = 'pkg'
pkg_long_name = '{0}.__init__'.format(pkg_name)
module_name = 'mod'
absolute_name = '{0}.{1}'.format(pkg_name, module_name)
relative_name = '.{0}'.format(module_name)
with test_util.mock_modules(pkg_long_name, absolute_name) as mock:
with test_util.import_state(meta_path=[mock]):
self.init.import_module(pkg_name)
module = self.init.import_module(relative_name, pkg_name)
self.assertEqual(module.__name__, absolute_name)
def test_deep_relative_package_import(self):
modules = ['a.__init__', 'a.b.__init__', 'a.c']
with test_util.mock_modules(*modules) as mock:
with test_util.import_state(meta_path=[mock]):
self.init.import_module('a')
self.init.import_module('a.b')
module = self.init.import_module('..c', 'a.b')
self.assertEqual(module.__name__, 'a.c')
def test_absolute_import_with_package(self):
# Test importing a module from a package with an absolute name with
# the 'package' argument given.
pkg_name = 'pkg'
pkg_long_name = '{0}.__init__'.format(pkg_name)
name = '{0}.mod'.format(pkg_name)
with test_util.mock_modules(pkg_long_name, name) as mock:
with test_util.import_state(meta_path=[mock]):
self.init.import_module(pkg_name)
module = self.init.import_module(name, pkg_name)
self.assertEqual(module.__name__, name)
def test_relative_import_wo_package(self):
# Relative imports cannot happen without the 'package' argument being
# set.
with self.assertRaises(TypeError):
self.init.import_module('.support')
def test_loaded_once(self):
# Issue #13591: Modules should only be loaded once when
# initializing the parent package attempts to import the
# module currently being imported.
b_load_count = 0
def load_a():
self.init.import_module('a.b')
def load_b():
nonlocal b_load_count
b_load_count += 1
code = {'a': load_a, 'a.b': load_b}
modules = ['a.__init__', 'a.b']
with test_util.mock_modules(*modules, module_code=code) as mock:
with test_util.import_state(meta_path=[mock]):
self.init.import_module('a.b')
self.assertEqual(b_load_count, 1)
(Frozen_ImportModuleTests,
Source_ImportModuleTests
) = test_util.test_both(ImportModuleTests, init=init)
class FindLoaderTests:
FakeMetaFinder = None
def test_sys_modules(self):
# If a module with __loader__ is in sys.modules, then return it.
name = 'some_mod'
with test_util.uncache(name):
module = types.ModuleType(name)
loader = 'a loader!'
module.__loader__ = loader
sys.modules[name] = module
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
found = self.init.find_loader(name)
self.assertEqual(loader, found)
def test_sys_modules_loader_is_None(self):
# If sys.modules[name].__loader__ is None, raise ValueError.
name = 'some_mod'
with test_util.uncache(name):
module = types.ModuleType(name)
module.__loader__ = None
sys.modules[name] = module
with self.assertRaises(ValueError):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.init.find_loader(name)
def test_sys_modules_loader_is_not_set(self):
# Should raise ValueError
# Issue #17099
name = 'some_mod'
with test_util.uncache(name):
module = types.ModuleType(name)
try:
del module.__loader__
except AttributeError:
pass
sys.modules[name] = module
with self.assertRaises(ValueError):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.init.find_loader(name)
def test_success(self):
# Return the loader found on sys.meta_path.
name = 'some_mod'
with test_util.uncache(name):
with test_util.import_state(meta_path=[self.FakeMetaFinder]):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertEqual((name, None), self.init.find_loader(name))
def test_success_path(self):
# Searching on a path should work.
name = 'some_mod'
path = 'path to some place'
with test_util.uncache(name):
with test_util.import_state(meta_path=[self.FakeMetaFinder]):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertEqual((name, path),
self.init.find_loader(name, path))
def test_nothing(self):
# None is returned upon failure to find a loader.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertIsNone(self.init.find_loader('nevergoingtofindthismodule'))
class FindLoaderPEP451Tests(FindLoaderTests):
class FakeMetaFinder:
@staticmethod
def find_spec(name, path=None, target=None):
return machinery['Source'].ModuleSpec(name, (name, path))
(Frozen_FindLoaderPEP451Tests,
Source_FindLoaderPEP451Tests
) = test_util.test_both(FindLoaderPEP451Tests, init=init)
class FindLoaderPEP302Tests(FindLoaderTests):
class FakeMetaFinder:
@staticmethod
def find_module(name, path=None):
return name, path
(Frozen_FindLoaderPEP302Tests,
Source_FindLoaderPEP302Tests
) = test_util.test_both(FindLoaderPEP302Tests, init=init)
class ReloadTests:
"""Test module reloading for builtin and extension modules."""
def test_reload_modules(self):
for mod in ('tokenize', 'time', 'marshal'):
with self.subTest(module=mod):
with support.CleanImport(mod):
module = self.init.import_module(mod)
self.init.reload(module)
def test_module_replaced(self):
def code():
import sys
module = type(sys)('top_level')
module.spam = 3
sys.modules['top_level'] = module
mock = test_util.mock_modules('top_level',
module_code={'top_level': code})
with mock:
with test_util.import_state(meta_path=[mock]):
module = self.init.import_module('top_level')
reloaded = self.init.reload(module)
actual = sys.modules['top_level']
self.assertEqual(actual.spam, 3)
self.assertEqual(reloaded.spam, 3)
def test_reload_missing_loader(self):
with support.CleanImport('types'):
import types
loader = types.__loader__
del types.__loader__
reloaded = self.init.reload(types)
self.assertIs(reloaded, types)
self.assertIs(sys.modules['types'], types)
self.assertEqual(reloaded.__loader__.path, loader.path)
def test_reload_loader_replaced(self):
with support.CleanImport('types'):
import types
types.__loader__ = None
self.init.invalidate_caches()
reloaded = self.init.reload(types)
self.assertIsNot(reloaded.__loader__, None)
self.assertIs(reloaded, types)
self.assertIs(sys.modules['types'], types)
def test_reload_location_changed(self):
name = 'spam'
with support.temp_cwd(None) as cwd:
with test_util.uncache('spam'):
with support.DirsOnSysPath(cwd):
# Start as a plain module.
self.init.invalidate_caches()
path = os.path.join(cwd, name + '.py')
cached = self.util.cache_from_source(path)
expected = {'__name__': name,
'__package__': '',
'__file__': path,
'__cached__': cached,
'__doc__': None,
}
support.create_empty_file(path)
module = self.init.import_module(name)
ns = vars(module).copy()
loader = ns.pop('__loader__')
spec = ns.pop('__spec__')
ns.pop('__builtins__', None) # An implementation detail.
self.assertEqual(spec.name, name)
self.assertEqual(spec.loader, loader)
self.assertEqual(loader.path, path)
self.assertEqual(ns, expected)
# Change to a package.
self.init.invalidate_caches()
init_path = os.path.join(cwd, name, '__init__.py')
cached = self.util.cache_from_source(init_path)
expected = {'__name__': name,
'__package__': name,
'__file__': init_path,
'__cached__': cached,
'__path__': [os.path.dirname(init_path)],
'__doc__': None,
}
os.mkdir(name)
os.rename(path, init_path)
reloaded = self.init.reload(module)
ns = vars(reloaded).copy()
loader = ns.pop('__loader__')
spec = ns.pop('__spec__')
ns.pop('__builtins__', None) # An implementation detail.
self.assertEqual(spec.name, name)
self.assertEqual(spec.loader, loader)
self.assertIs(reloaded, module)
self.assertEqual(loader.path, init_path)
self.maxDiff = None
self.assertEqual(ns, expected)
def test_reload_namespace_changed(self):
name = 'spam'
with support.temp_cwd(None) as cwd:
with test_util.uncache('spam'):
with support.DirsOnSysPath(cwd):
# Start as a namespace package.
self.init.invalidate_caches()
bad_path = os.path.join(cwd, name, '__init.py')
cached = self.util.cache_from_source(bad_path)
expected = {'__name__': name,
'__package__': name,
'__doc__': None,
}
os.mkdir(name)
with open(bad_path, 'w') as init_file:
init_file.write('eggs = None')
module = self.init.import_module(name)
ns = vars(module).copy()
loader = ns.pop('__loader__')
path = ns.pop('__path__')
spec = ns.pop('__spec__')
ns.pop('__builtins__', None) # An implementation detail.
self.assertEqual(spec.name, name)
self.assertIs(spec.loader, None)
self.assertIsNot(loader, None)
self.assertEqual(set(path),
set([os.path.dirname(bad_path)]))
with self.assertRaises(AttributeError):
# a NamespaceLoader
loader.path
self.assertEqual(ns, expected)
# Change to a regular package.
self.init.invalidate_caches()
init_path = os.path.join(cwd, name, '__init__.py')
cached = self.util.cache_from_source(init_path)
expected = {'__name__': name,
'__package__': name,
'__file__': init_path,
'__cached__': cached,
'__path__': [os.path.dirname(init_path)],
'__doc__': None,
'eggs': None,
}
os.rename(bad_path, init_path)
reloaded = self.init.reload(module)
ns = vars(reloaded).copy()
loader = ns.pop('__loader__')
spec = ns.pop('__spec__')
ns.pop('__builtins__', None) # An implementation detail.
self.assertEqual(spec.name, name)
self.assertEqual(spec.loader, loader)
self.assertIs(reloaded, module)
self.assertEqual(loader.path, init_path)
self.assertEqual(ns, expected)
def test_reload_submodule(self):
# See #19851.
name = 'spam'
subname = 'ham'
with test_util.temp_module(name, pkg=True) as pkg_dir:
fullname, _ = test_util.submodule(name, subname, pkg_dir)
ham = self.init.import_module(fullname)
reloaded = self.init.reload(ham)
self.assertIs(reloaded, ham)
(Frozen_ReloadTests,
Source_ReloadTests
) = test_util.test_both(ReloadTests, init=init, util=util)
class InvalidateCacheTests:
def test_method_called(self):
# If defined the method should be called.
class InvalidatingNullFinder:
def __init__(self, *ignored):
self.called = False
def find_module(self, *args):
return None
def invalidate_caches(self):
self.called = True
key = 'gobledeegook'
meta_ins = InvalidatingNullFinder()
path_ins = InvalidatingNullFinder()
sys.meta_path.insert(0, meta_ins)
self.addCleanup(lambda: sys.path_importer_cache.__delitem__(key))
sys.path_importer_cache[key] = path_ins
self.addCleanup(lambda: sys.meta_path.remove(meta_ins))
self.init.invalidate_caches()
self.assertTrue(meta_ins.called)
self.assertTrue(path_ins.called)
def test_method_lacking(self):
# There should be no issues if the method is not defined.
key = 'gobbledeegook'
sys.path_importer_cache[key] = None
self.addCleanup(lambda: sys.path_importer_cache.__delitem__(key))
self.init.invalidate_caches() # Shouldn't trigger an exception.
(Frozen_InvalidateCacheTests,
Source_InvalidateCacheTests
) = test_util.test_both(InvalidateCacheTests, init=init)
class FrozenImportlibTests(unittest.TestCase):
def test_no_frozen_importlib(self):
# Should be able to import w/o _frozen_importlib being defined.
# Can't do an isinstance() check since separate copies of importlib
# may have been used for import, so just check the name is not for the
# frozen loader.
source_init = init['Source']
self.assertNotEqual(source_init.__loader__.__class__.__name__,
'FrozenImporter')
class StartupTests:
def test_everyone_has___loader__(self):
# Issue #17098: all modules should have __loader__ defined.
for name, module in sys.modules.items():
if isinstance(module, types.ModuleType):
with self.subTest(name=name):
self.assertTrue(hasattr(module, '__loader__'),
'{!r} lacks a __loader__ attribute'.format(name))
if self.machinery.BuiltinImporter.find_module(name):
self.assertIsNot(module.__loader__, None)
elif self.machinery.FrozenImporter.find_module(name):
self.assertIsNot(module.__loader__, None)
def test_everyone_has___spec__(self):
for name, module in sys.modules.items():
if isinstance(module, types.ModuleType):
with self.subTest(name=name):
self.assertTrue(hasattr(module, '__spec__'))
if self.machinery.BuiltinImporter.find_module(name):
self.assertIsNot(module.__spec__, None)
elif self.machinery.FrozenImporter.find_module(name):
self.assertIsNot(module.__spec__, None)
(Frozen_StartupTests,
Source_StartupTests
) = test_util.test_both(StartupTests, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| 18,230 | 450 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/test_lazy.py | import importlib
from importlib import abc
from importlib import util
import sys
import types
import unittest
from . import util as test_util
class CollectInit:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def exec_module(self, module):
return self
class LazyLoaderFactoryTests(unittest.TestCase):
def test_init(self):
factory = util.LazyLoader.factory(CollectInit)
# E.g. what importlib.machinery.FileFinder instantiates loaders with
# plus keyword arguments.
lazy_loader = factory('module name', 'module path', kw='kw')
loader = lazy_loader.loader
self.assertEqual(('module name', 'module path'), loader.args)
self.assertEqual({'kw': 'kw'}, loader.kwargs)
def test_validation(self):
# No exec_module(), no lazy loading.
with self.assertRaises(TypeError):
util.LazyLoader.factory(object)
class TestingImporter(abc.MetaPathFinder, abc.Loader):
module_name = 'lazy_loader_test'
mutated_name = 'changed'
loaded = None
source_code = 'attr = 42; __name__ = {!r}'.format(mutated_name)
def find_spec(self, name, path, target=None):
if name != self.module_name:
return None
return util.spec_from_loader(name, util.LazyLoader(self))
def exec_module(self, module):
exec(self.source_code, module.__dict__)
self.loaded = module
class LazyLoaderTests(unittest.TestCase):
def test_init(self):
with self.assertRaises(TypeError):
# Classes that dono't define exec_module() trigger TypeError.
util.LazyLoader(object)
def new_module(self, source_code=None):
loader = TestingImporter()
if source_code is not None:
loader.source_code = source_code
spec = util.spec_from_loader(TestingImporter.module_name,
util.LazyLoader(loader))
module = spec.loader.create_module(spec)
if module is None:
module = types.ModuleType(TestingImporter.module_name)
module.__spec__ = spec
module.__loader__ = spec.loader
spec.loader.exec_module(module)
# Module is now lazy.
self.assertIsNone(loader.loaded)
return module
def test_e2e(self):
# End-to-end test to verify the load is in fact lazy.
importer = TestingImporter()
assert importer.loaded is None
with test_util.uncache(importer.module_name):
with test_util.import_state(meta_path=[importer]):
module = importlib.import_module(importer.module_name)
self.assertIsNone(importer.loaded)
# Trigger load.
self.assertEqual(module.__loader__, importer)
self.assertIsNotNone(importer.loaded)
self.assertEqual(module, importer.loaded)
def test_attr_unchanged(self):
# An attribute only mutated as a side-effect of import should not be
# changed needlessly.
module = self.new_module()
self.assertEqual(TestingImporter.mutated_name, module.__name__)
def test_new_attr(self):
# A new attribute should persist.
module = self.new_module()
module.new_attr = 42
self.assertEqual(42, module.new_attr)
def test_mutated_preexisting_attr(self):
# Changing an attribute that already existed on the module --
# e.g. __name__ -- should persist.
module = self.new_module()
module.__name__ = 'bogus'
self.assertEqual('bogus', module.__name__)
def test_mutated_attr(self):
# Changing an attribute that comes into existence after an import
# should persist.
module = self.new_module()
module.attr = 6
self.assertEqual(6, module.attr)
def test_delete_eventual_attr(self):
# Deleting an attribute should stay deleted.
module = self.new_module()
del module.attr
self.assertFalse(hasattr(module, 'attr'))
def test_delete_preexisting_attr(self):
module = self.new_module()
del module.__name__
self.assertFalse(hasattr(module, '__name__'))
def test_module_substitution_error(self):
with test_util.uncache(TestingImporter.module_name):
fresh_module = types.ModuleType(TestingImporter.module_name)
sys.modules[TestingImporter.module_name] = fresh_module
module = self.new_module()
with self.assertRaisesRegex(ValueError, "substituted"):
module.__name__
def test_module_already_in_sys(self):
with test_util.uncache(TestingImporter.module_name):
module = self.new_module()
sys.modules[TestingImporter.module_name] = module
# Force the load; just care that no exception is raised.
module.__name__
if __name__ == '__main__':
unittest.main()
| 4,930 | 146 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/test_namespace_pkgs.py | import contextlib
import importlib
import os
import sys
import unittest
from test.test_importlib import util
# needed tests:
#
# need to test when nested, so that the top-level path isn't sys.path
# need to test dynamic path detection, both at top-level and nested
# with dynamic path, check when a loader is returned on path reload (that is,
# trying to switch from a namespace package to a regular package)
@contextlib.contextmanager
def sys_modules_context():
"""
Make sure sys.modules is the same object and has the same content
when exiting the context as when entering.
Similar to importlib.test.util.uncache, but doesn't require explicit
names.
"""
sys_modules_saved = sys.modules
sys_modules_copy = sys.modules.copy()
try:
yield
finally:
sys.modules = sys_modules_saved
sys.modules.clear()
sys.modules.update(sys_modules_copy)
@contextlib.contextmanager
def namespace_tree_context(**kwargs):
"""
Save import state and sys.modules cache and restore it on exit.
Typical usage:
>>> with namespace_tree_context(path=['/tmp/xxyy/portion1',
... '/tmp/xxyy/portion2']):
... pass
"""
# use default meta_path and path_hooks unless specified otherwise
kwargs.setdefault('meta_path', sys.meta_path)
kwargs.setdefault('path_hooks', sys.path_hooks)
import_context = util.import_state(**kwargs)
with import_context, sys_modules_context():
yield
class NamespacePackageTest(unittest.TestCase):
"""
Subclasses should define self.root and self.paths (under that root)
to be added to sys.path.
"""
root = os.path.join(os.path.dirname(__file__), 'namespace_pkgs')
def setUp(self):
self.resolved_paths = [
os.path.join(self.root, path) for path in self.paths
]
self.ctx = namespace_tree_context(path=self.resolved_paths)
self.ctx.__enter__()
def tearDown(self):
# TODO: will we ever want to pass exc_info to __exit__?
self.ctx.__exit__(None, None, None)
class SingleNamespacePackage(NamespacePackageTest):
paths = ['portion1']
def test_simple_package(self):
import foo.one
self.assertEqual(foo.one.attr, 'portion1 foo one')
def test_cant_import_other(self):
with self.assertRaises(ImportError):
import foo.two
def test_module_repr(self):
import foo.one
self.assertEqual(repr(foo), "<module 'foo' (namespace)>")
class DynamicPathNamespacePackage(NamespacePackageTest):
paths = ['portion1']
def test_dynamic_path(self):
# Make sure only 'foo.one' can be imported
import foo.one
self.assertEqual(foo.one.attr, 'portion1 foo one')
with self.assertRaises(ImportError):
import foo.two
# Now modify sys.path
sys.path.append(os.path.join(self.root, 'portion2'))
# And make sure foo.two is now importable
import foo.two
self.assertEqual(foo.two.attr, 'portion2 foo two')
class CombinedNamespacePackages(NamespacePackageTest):
paths = ['both_portions']
def test_imports(self):
import foo.one
import foo.two
self.assertEqual(foo.one.attr, 'both_portions foo one')
self.assertEqual(foo.two.attr, 'both_portions foo two')
class SeparatedNamespacePackages(NamespacePackageTest):
paths = ['portion1', 'portion2']
def test_imports(self):
import foo.one
import foo.two
self.assertEqual(foo.one.attr, 'portion1 foo one')
self.assertEqual(foo.two.attr, 'portion2 foo two')
class SeparatedOverlappingNamespacePackages(NamespacePackageTest):
paths = ['portion1', 'both_portions']
def test_first_path_wins(self):
import foo.one
import foo.two
self.assertEqual(foo.one.attr, 'portion1 foo one')
self.assertEqual(foo.two.attr, 'both_portions foo two')
def test_first_path_wins_again(self):
sys.path.reverse()
import foo.one
import foo.two
self.assertEqual(foo.one.attr, 'both_portions foo one')
self.assertEqual(foo.two.attr, 'both_portions foo two')
def test_first_path_wins_importing_second_first(self):
import foo.two
import foo.one
self.assertEqual(foo.one.attr, 'portion1 foo one')
self.assertEqual(foo.two.attr, 'both_portions foo two')
class SingleZipNamespacePackage(NamespacePackageTest):
paths = ['top_level_portion1.zip']
def test_simple_package(self):
import foo.one
self.assertEqual(foo.one.attr, 'portion1 foo one')
def test_cant_import_other(self):
with self.assertRaises(ImportError):
import foo.two
class SeparatedZipNamespacePackages(NamespacePackageTest):
paths = ['top_level_portion1.zip', 'portion2']
def test_imports(self):
import foo.one
import foo.two
self.assertEqual(foo.one.attr, 'portion1 foo one')
self.assertEqual(foo.two.attr, 'portion2 foo two')
self.assertIn('top_level_portion1.zip', foo.one.__file__)
self.assertNotIn('.zip', foo.two.__file__)
class SingleNestedZipNamespacePackage(NamespacePackageTest):
paths = ['nested_portion1.zip/nested_portion1']
def test_simple_package(self):
import foo.one
self.assertEqual(foo.one.attr, 'portion1 foo one')
def test_cant_import_other(self):
with self.assertRaises(ImportError):
import foo.two
class SeparatedNestedZipNamespacePackages(NamespacePackageTest):
paths = ['nested_portion1.zip/nested_portion1', 'portion2']
def test_imports(self):
import foo.one
import foo.two
self.assertEqual(foo.one.attr, 'portion1 foo one')
self.assertEqual(foo.two.attr, 'portion2 foo two')
fn = os.path.join('nested_portion1.zip', 'nested_portion1')
self.assertIn(fn, foo.one.__file__)
self.assertNotIn('.zip', foo.two.__file__)
class LegacySupport(NamespacePackageTest):
paths = ['not_a_namespace_pkg', 'portion1', 'portion2', 'both_portions']
def test_non_namespace_package_takes_precedence(self):
import foo.one
with self.assertRaises(ImportError):
import foo.two
self.assertIn('__init__', foo.__file__)
self.assertNotIn('namespace', str(foo.__loader__).lower())
class DynamicPathCalculation(NamespacePackageTest):
paths = ['project1', 'project2']
def test_project3_fails(self):
import parent.child.one
self.assertEqual(len(parent.__path__), 2)
self.assertEqual(len(parent.child.__path__), 2)
import parent.child.two
self.assertEqual(len(parent.__path__), 2)
self.assertEqual(len(parent.child.__path__), 2)
self.assertEqual(parent.child.one.attr, 'parent child one')
self.assertEqual(parent.child.two.attr, 'parent child two')
with self.assertRaises(ImportError):
import parent.child.three
self.assertEqual(len(parent.__path__), 2)
self.assertEqual(len(parent.child.__path__), 2)
def test_project3_succeeds(self):
import parent.child.one
self.assertEqual(len(parent.__path__), 2)
self.assertEqual(len(parent.child.__path__), 2)
import parent.child.two
self.assertEqual(len(parent.__path__), 2)
self.assertEqual(len(parent.child.__path__), 2)
self.assertEqual(parent.child.one.attr, 'parent child one')
self.assertEqual(parent.child.two.attr, 'parent child two')
with self.assertRaises(ImportError):
import parent.child.three
# now add project3
sys.path.append(os.path.join(self.root, 'project3'))
import parent.child.three
# the paths dynamically get longer, to include the new directories
self.assertEqual(len(parent.__path__), 3)
self.assertEqual(len(parent.child.__path__), 3)
self.assertEqual(parent.child.three.attr, 'parent child three')
class ZipWithMissingDirectory(NamespacePackageTest):
paths = ['missing_directory.zip']
@unittest.expectedFailure
def test_missing_directory(self):
# This will fail because missing_directory.zip contains:
# Length Date Time Name
# --------- ---------- ----- ----
# 29 2012-05-03 18:13 foo/one.py
# 0 2012-05-03 20:57 bar/
# 38 2012-05-03 20:57 bar/two.py
# --------- -------
# 67 3 files
# Because there is no 'foo/', the zipimporter currently doesn't
# know that foo is a namespace package
import foo.one
def test_present_directory(self):
# This succeeds because there is a "bar/" in the zip file
import bar.two
self.assertEqual(bar.two.attr, 'missing_directory foo two')
class ModuleAndNamespacePackageInSameDir(NamespacePackageTest):
paths = ['module_and_namespace_package']
def test_module_before_namespace_package(self):
# Make sure we find the module in preference to the
# namespace package.
import a_test
self.assertEqual(a_test.attr, 'in module')
class ReloadTests(NamespacePackageTest):
paths = ['portion1']
def test_simple_package(self):
import foo.one
foo = importlib.reload(foo)
self.assertEqual(foo.one.attr, 'portion1 foo one')
def test_cant_import_other(self):
import foo
with self.assertRaises(ImportError):
import foo.two
foo = importlib.reload(foo)
with self.assertRaises(ImportError):
import foo.two
def test_dynamic_path(self):
import foo.one
with self.assertRaises(ImportError):
import foo.two
# Now modify sys.path and reload.
sys.path.append(os.path.join(self.root, 'portion2'))
foo = importlib.reload(foo)
# And make sure foo.two is now importable
import foo.two
self.assertEqual(foo.two.attr, 'portion2 foo two')
if __name__ == "__main__":
unittest.main()
| 10,182 | 322 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/__init__.py | import os
from test.support import load_package_tests
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)
| 142 | 6 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/test_windows.py | from . import util as test_util
machinery = test_util.import_importlib('importlib.machinery')
import os
import re
import sys
import unittest
from test import support
from distutils.util import get_platform
from contextlib import contextmanager
from .util import temp_module
support.import_module('winreg', required_on=['win'])
from winreg import (
CreateKey, HKEY_CURRENT_USER,
SetValue, REG_SZ, KEY_ALL_ACCESS,
EnumKey, CloseKey, DeleteKey, OpenKey
)
def delete_registry_tree(root, subkey):
try:
hkey = OpenKey(root, subkey, access=KEY_ALL_ACCESS)
except OSError:
# subkey does not exist
return
while True:
try:
subsubkey = EnumKey(hkey, 0)
except OSError:
# no more subkeys
break
delete_registry_tree(hkey, subsubkey)
CloseKey(hkey)
DeleteKey(root, subkey)
@contextmanager
def setup_module(machinery, name, path=None):
if machinery.WindowsRegistryFinder.DEBUG_BUILD:
root = machinery.WindowsRegistryFinder.REGISTRY_KEY_DEBUG
else:
root = machinery.WindowsRegistryFinder.REGISTRY_KEY
key = root.format(fullname=name,
sys_version='%d.%d' % sys.version_info[:2])
try:
with temp_module(name, "a = 1") as location:
subkey = CreateKey(HKEY_CURRENT_USER, key)
if path is None:
path = location + ".py"
SetValue(subkey, "", REG_SZ, path)
yield
finally:
if machinery.WindowsRegistryFinder.DEBUG_BUILD:
key = os.path.dirname(key)
delete_registry_tree(HKEY_CURRENT_USER, key)
@unittest.skipUnless(sys.platform.startswith('win'), 'requires Windows')
class WindowsRegistryFinderTests:
# The module name is process-specific, allowing for
# simultaneous runs of the same test on a single machine.
test_module = "spamham{}".format(os.getpid())
def test_find_spec_missing(self):
spec = self.machinery.WindowsRegistryFinder.find_spec('spam')
self.assertIs(spec, None)
def test_find_module_missing(self):
loader = self.machinery.WindowsRegistryFinder.find_module('spam')
self.assertIs(loader, None)
def test_module_found(self):
with setup_module(self.machinery, self.test_module):
loader = self.machinery.WindowsRegistryFinder.find_module(self.test_module)
spec = self.machinery.WindowsRegistryFinder.find_spec(self.test_module)
self.assertIsNot(loader, None)
self.assertIsNot(spec, None)
def test_module_not_found(self):
with setup_module(self.machinery, self.test_module, path="."):
loader = self.machinery.WindowsRegistryFinder.find_module(self.test_module)
spec = self.machinery.WindowsRegistryFinder.find_spec(self.test_module)
self.assertIsNone(loader)
self.assertIsNone(spec)
(Frozen_WindowsRegistryFinderTests,
Source_WindowsRegistryFinderTests
) = test_util.test_both(WindowsRegistryFinderTests, machinery=machinery)
@unittest.skipUnless(sys.platform.startswith('win'), 'requires Windows')
class WindowsExtensionSuffixTests:
def test_tagged_suffix(self):
suffixes = self.machinery.EXTENSION_SUFFIXES
expected_tag = ".cp{0.major}{0.minor}-{1}.pyd".format(sys.version_info,
re.sub('[^a-zA-Z0-9]', '_', get_platform()))
try:
untagged_i = suffixes.index(".pyd")
except ValueError:
untagged_i = suffixes.index("_d.pyd")
expected_tag = "_d" + expected_tag
self.assertIn(expected_tag, suffixes)
# Ensure the tags are in the correct order
tagged_i = suffixes.index(expected_tag)
self.assertLess(tagged_i, untagged_i)
(Frozen_WindowsExtensionSuffixTests,
Source_WindowsExtensionSuffixTests
) = test_util.test_both(WindowsExtensionSuffixTests, machinery=machinery)
| 3,927 | 110 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/extension/test_loader.py | from .. import abc
from .. import util
machinery = util.import_importlib('importlib.machinery')
import os.path
import sys
import types
import unittest
import importlib.util
import importlib
from test.support.script_helper import assert_python_failure
class LoaderTests(abc.LoaderTests):
"""Test load_module() for extension modules."""
def setUp(self):
self.loader = self.machinery.ExtensionFileLoader(util.EXTENSIONS.name,
util.EXTENSIONS.file_path)
def load_module(self, fullname):
return self.loader.load_module(fullname)
def test_load_module_API(self):
# Test the default argument for load_module().
self.loader.load_module()
self.loader.load_module(None)
with self.assertRaises(ImportError):
self.load_module('XXX')
def test_equality(self):
other = self.machinery.ExtensionFileLoader(util.EXTENSIONS.name,
util.EXTENSIONS.file_path)
self.assertEqual(self.loader, other)
def test_inequality(self):
other = self.machinery.ExtensionFileLoader('_' + util.EXTENSIONS.name,
util.EXTENSIONS.file_path)
self.assertNotEqual(self.loader, other)
def test_module(self):
with util.uncache(util.EXTENSIONS.name):
module = self.load_module(util.EXTENSIONS.name)
for attr, value in [('__name__', util.EXTENSIONS.name),
('__file__', util.EXTENSIONS.file_path),
('__package__', '')]:
self.assertEqual(getattr(module, attr), value)
self.assertIn(util.EXTENSIONS.name, sys.modules)
self.assertIsInstance(module.__loader__,
self.machinery.ExtensionFileLoader)
# No extension module as __init__ available for testing.
test_package = None
# No extension module in a package available for testing.
test_lacking_parent = None
def test_module_reuse(self):
with util.uncache(util.EXTENSIONS.name):
module1 = self.load_module(util.EXTENSIONS.name)
module2 = self.load_module(util.EXTENSIONS.name)
self.assertIs(module1, module2)
# No easy way to trigger a failure after a successful import.
test_state_after_failure = None
def test_unloadable(self):
name = 'asdfjkl;'
with self.assertRaises(ImportError) as cm:
self.load_module(name)
self.assertEqual(cm.exception.name, name)
def test_is_package(self):
self.assertFalse(self.loader.is_package(util.EXTENSIONS.name))
for suffix in self.machinery.EXTENSION_SUFFIXES:
path = os.path.join('some', 'path', 'pkg', '__init__' + suffix)
loader = self.machinery.ExtensionFileLoader('pkg', path)
self.assertTrue(loader.is_package('pkg'))
(Frozen_LoaderTests,
Source_LoaderTests
) = util.test_both(LoaderTests, machinery=machinery)
class MultiPhaseExtensionModuleTests(abc.LoaderTests):
"""Test loading extension modules with multi-phase initialization (PEP 489)
"""
def setUp(self):
self.name = '_testmultiphase'
finder = self.machinery.FileFinder(None)
self.spec = importlib.util.find_spec(self.name)
assert self.spec
self.loader = self.machinery.ExtensionFileLoader(
self.name, self.spec.origin)
# No extension module as __init__ available for testing.
test_package = None
# No extension module in a package available for testing.
test_lacking_parent = None
# Handling failure on reload is the up to the module.
test_state_after_failure = None
def test_module(self):
'''Test loading an extension module'''
with util.uncache(self.name):
module = self.load_module()
for attr, value in [('__name__', self.name),
('__file__', self.spec.origin),
('__package__', '')]:
self.assertEqual(getattr(module, attr), value)
with self.assertRaises(AttributeError):
module.__path__
self.assertIs(module, sys.modules[self.name])
self.assertIsInstance(module.__loader__,
self.machinery.ExtensionFileLoader)
def test_functionality(self):
'''Test basic functionality of stuff defined in an extension module'''
with util.uncache(self.name):
module = self.load_module()
self.assertIsInstance(module, types.ModuleType)
ex = module.Example()
self.assertEqual(ex.demo('abcd'), 'abcd')
self.assertEqual(ex.demo(), None)
with self.assertRaises(AttributeError):
ex.abc
ex.abc = 0
self.assertEqual(ex.abc, 0)
self.assertEqual(module.foo(9, 9), 18)
self.assertIsInstance(module.Str(), str)
self.assertEqual(module.Str(1) + '23', '123')
with self.assertRaises(module.error):
raise module.error()
self.assertEqual(module.int_const, 1969)
self.assertEqual(module.str_const, 'something different')
def test_reload(self):
'''Test that reload didn't re-set the module's attributes'''
with util.uncache(self.name):
module = self.load_module()
ex_class = module.Example
importlib.reload(module)
self.assertIs(ex_class, module.Example)
def test_try_registration(self):
'''Assert that the PyState_{Find,Add,Remove}Module C API doesn't work'''
module = self.load_module()
with self.subTest('PyState_FindModule'):
self.assertEqual(module.call_state_registration_func(0), None)
with self.subTest('PyState_AddModule'):
with self.assertRaises(SystemError):
module.call_state_registration_func(1)
with self.subTest('PyState_RemoveModule'):
with self.assertRaises(SystemError):
module.call_state_registration_func(2)
def load_module(self):
'''Load the module from the test extension'''
return self.loader.load_module(self.name)
def load_module_by_name(self, fullname):
'''Load a module from the test extension by name'''
origin = self.spec.origin
loader = self.machinery.ExtensionFileLoader(fullname, origin)
spec = importlib.util.spec_from_loader(fullname, loader)
module = importlib.util.module_from_spec(spec)
loader.exec_module(module)
return module
def test_load_submodule(self):
'''Test loading a simulated submodule'''
module = self.load_module_by_name('pkg.' + self.name)
self.assertIsInstance(module, types.ModuleType)
self.assertEqual(module.__name__, 'pkg.' + self.name)
self.assertEqual(module.str_const, 'something different')
def test_load_short_name(self):
'''Test loading module with a one-character name'''
module = self.load_module_by_name('x')
self.assertIsInstance(module, types.ModuleType)
self.assertEqual(module.__name__, 'x')
self.assertEqual(module.str_const, 'something different')
self.assertNotIn('x', sys.modules)
def test_load_twice(self):
'''Test that 2 loads result in 2 module objects'''
module1 = self.load_module_by_name(self.name)
module2 = self.load_module_by_name(self.name)
self.assertIsNot(module1, module2)
def test_unloadable(self):
'''Test nonexistent module'''
name = 'asdfjkl;'
with self.assertRaises(ImportError) as cm:
self.load_module_by_name(name)
self.assertEqual(cm.exception.name, name)
def test_unloadable_nonascii(self):
'''Test behavior with nonexistent module with non-ASCII name'''
name = 'fo\xf3'
with self.assertRaises(ImportError) as cm:
self.load_module_by_name(name)
self.assertEqual(cm.exception.name, name)
def test_nonmodule(self):
'''Test returning a non-module object from create works'''
name = self.name + '_nonmodule'
mod = self.load_module_by_name(name)
self.assertNotEqual(type(mod), type(unittest))
self.assertEqual(mod.three, 3)
# issue 27782
def test_nonmodule_with_methods(self):
'''Test creating a non-module object with methods defined'''
name = self.name + '_nonmodule_with_methods'
mod = self.load_module_by_name(name)
self.assertNotEqual(type(mod), type(unittest))
self.assertEqual(mod.three, 3)
self.assertEqual(mod.bar(10, 1), 9)
def test_null_slots(self):
'''Test that NULL slots aren't a problem'''
name = self.name + '_null_slots'
module = self.load_module_by_name(name)
self.assertIsInstance(module, types.ModuleType)
self.assertEqual(module.__name__, name)
def test_bad_modules(self):
'''Test SystemError is raised for misbehaving extensions'''
for name_base in [
'bad_slot_large',
'bad_slot_negative',
'create_int_with_state',
'negative_size',
'export_null',
'export_uninitialized',
'export_raise',
'export_unreported_exception',
'create_null',
'create_raise',
'create_unreported_exception',
'nonmodule_with_exec_slots',
'exec_err',
'exec_raise',
'exec_unreported_exception',
]:
with self.subTest(name_base):
name = self.name + '_' + name_base
with self.assertRaises(SystemError):
self.load_module_by_name(name)
def test_nonascii(self):
'''Test that modules with non-ASCII names can be loaded'''
# punycode behaves slightly differently in some-ASCII and no-ASCII
# cases, so test both
cases = [
(self.name + '_zkou\u0161ka_na\u010dten\xed', 'Czech'),
('\uff3f\u30a4\u30f3\u30dd\u30fc\u30c8\u30c6\u30b9\u30c8',
'Japanese'),
]
for name, lang in cases:
with self.subTest(name):
module = self.load_module_by_name(name)
self.assertEqual(module.__name__, name)
self.assertEqual(module.__doc__, "Module named in %s" % lang)
@unittest.skipIf(not hasattr(sys, 'gettotalrefcount'),
'--with-pydebug has to be enabled for this test')
def test_bad_traverse(self):
''' Issue #32374: Test that traverse fails when accessing per-module
state before Py_mod_exec was executed.
(Multiphase initialization modules only)
'''
script = """if True:
try:
from test import support
import importlib.util as util
spec = util.find_spec('_testmultiphase')
spec.name = '_testmultiphase_with_bad_traverse'
with support.SuppressCrashReport():
m = spec.loader.create_module(spec)
except:
# Prevent Python-level exceptions from
# ending the process with non-zero status
# (We are testing for a crash in C-code)
pass"""
assert_python_failure("-c", script)
(Frozen_MultiPhaseExtensionModuleTests,
Source_MultiPhaseExtensionModuleTests
) = util.test_both(MultiPhaseExtensionModuleTests, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| 11,906 | 301 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/extension/__main__.py | from . import load_tests
import unittest
unittest.main()
| 58 | 5 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/extension/test_path_hook.py | from .. import util
machinery = util.import_importlib('importlib.machinery')
import unittest
class PathHookTests:
"""Test the path hook for extension modules."""
# XXX Should it only succeed for pre-existing directories?
# XXX Should it only work for directories containing an extension module?
def hook(self, entry):
return self.machinery.FileFinder.path_hook(
(self.machinery.ExtensionFileLoader,
self.machinery.EXTENSION_SUFFIXES))(entry)
def test_success(self):
# Path hook should handle a directory where a known extension module
# exists.
self.assertTrue(hasattr(self.hook(util.EXTENSIONS.path), 'find_module'))
(Frozen_PathHooksTests,
Source_PathHooksTests
) = util.test_both(PathHookTests, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| 864 | 32 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/extension/test_case_sensitivity.py | from importlib import _bootstrap_external
from test import support
import unittest
from .. import util
importlib = util.import_importlib('importlib')
machinery = util.import_importlib('importlib.machinery')
@unittest.skipIf(util.EXTENSIONS.filename is None, '_testcapi not available')
@util.case_insensitive_tests
class ExtensionModuleCaseSensitivityTest(util.CASEOKTestBase):
def find_module(self):
good_name = util.EXTENSIONS.name
bad_name = good_name.upper()
assert good_name != bad_name
finder = self.machinery.FileFinder(util.EXTENSIONS.path,
(self.machinery.ExtensionFileLoader,
self.machinery.EXTENSION_SUFFIXES))
return finder.find_module(bad_name)
def test_case_sensitive(self):
with support.EnvironmentVarGuard() as env:
env.unset('PYTHONCASEOK')
self.caseok_env_changed(should_exist=False)
loader = self.find_module()
self.assertIsNone(loader)
def test_case_insensitivity(self):
with support.EnvironmentVarGuard() as env:
env.set('PYTHONCASEOK', '1')
self.caseok_env_changed(should_exist=True)
loader = self.find_module()
self.assertTrue(hasattr(loader, 'load_module'))
(Frozen_ExtensionCaseSensitivity,
Source_ExtensionCaseSensitivity
) = util.test_both(ExtensionModuleCaseSensitivityTest, importlib=importlib,
machinery=machinery)
if __name__ == '__main__':
unittest.main()
| 1,573 | 47 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/extension/__init__.py | import os
from test.support import load_package_tests
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)
| 142 | 6 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/extension/test_finder.py | from .. import abc
from .. import util
machinery = util.import_importlib('importlib.machinery')
import unittest
import warnings
class FinderTests(abc.FinderTests):
"""Test the finder for extension modules."""
def find_module(self, fullname):
importer = self.machinery.FileFinder(util.EXTENSIONS.path,
(self.machinery.ExtensionFileLoader,
self.machinery.EXTENSION_SUFFIXES))
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
return importer.find_module(fullname)
def test_module(self):
self.assertTrue(self.find_module(util.EXTENSIONS.name))
# No extension module as an __init__ available for testing.
test_package = test_package_in_package = None
# No extension module in a package available for testing.
test_module_in_package = None
# Extension modules cannot be an __init__ for a package.
test_package_over_module = None
def test_failure(self):
self.assertIsNone(self.find_module('asdfjkl;'))
(Frozen_FinderTests,
Source_FinderTests
) = util.test_both(FinderTests, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| 1,272 | 45 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/frozen/test_loader.py | from .. import abc
from .. import util
machinery = util.import_importlib('importlib.machinery')
from test.support import captured_stdout
import types
import unittest
import warnings
class ExecModuleTests(abc.LoaderTests):
def exec_module(self, name):
with util.uncache(name), captured_stdout() as stdout:
spec = self.machinery.ModuleSpec(
name, self.machinery.FrozenImporter, origin='frozen',
is_package=self.machinery.FrozenImporter.is_package(name))
module = types.ModuleType(name)
module.__spec__ = spec
assert not hasattr(module, 'initialized')
self.machinery.FrozenImporter.exec_module(module)
self.assertTrue(module.initialized)
self.assertTrue(hasattr(module, '__spec__'))
self.assertEqual(module.__spec__.origin, 'frozen')
return module, stdout.getvalue()
def test_module(self):
name = '__hello__'
module, output = self.exec_module(name)
check = {'__name__': name}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
self.assertEqual(output, 'Hello world!\n')
self.assertTrue(hasattr(module, '__spec__'))
def test_package(self):
name = '__phello__'
module, output = self.exec_module(name)
check = {'__name__': name}
for attr, value in check.items():
attr_value = getattr(module, attr)
self.assertEqual(attr_value, value,
'for {name}.{attr}, {given!r} != {expected!r}'.format(
name=name, attr=attr, given=attr_value,
expected=value))
self.assertEqual(output, 'Hello world!\n')
def test_lacking_parent(self):
name = '__phello__.spam'
with util.uncache('__phello__'):
module, output = self.exec_module(name)
check = {'__name__': name}
for attr, value in check.items():
attr_value = getattr(module, attr)
self.assertEqual(attr_value, value,
'for {name}.{attr}, {given} != {expected!r}'.format(
name=name, attr=attr, given=attr_value,
expected=value))
self.assertEqual(output, 'Hello world!\n')
def test_module_repr(self):
name = '__hello__'
module, output = self.exec_module(name)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
repr_str = self.machinery.FrozenImporter.module_repr(module)
self.assertEqual(repr_str,
"<module '__hello__' (frozen)>")
def test_module_repr_indirect(self):
name = '__hello__'
module, output = self.exec_module(name)
self.assertEqual(repr(module),
"<module '__hello__' (frozen)>")
# No way to trigger an error in a frozen module.
test_state_after_failure = None
def test_unloadable(self):
assert self.machinery.FrozenImporter.find_module('_not_real') is None
with self.assertRaises(ImportError) as cm:
self.exec_module('_not_real')
self.assertEqual(cm.exception.name, '_not_real')
(Frozen_ExecModuleTests,
Source_ExecModuleTests
) = util.test_both(ExecModuleTests, machinery=machinery)
class LoaderTests(abc.LoaderTests):
def test_module(self):
with util.uncache('__hello__'), captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = self.machinery.FrozenImporter.load_module('__hello__')
check = {'__name__': '__hello__',
'__package__': '',
'__loader__': self.machinery.FrozenImporter,
}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
self.assertFalse(hasattr(module, '__file__'))
def test_package(self):
with util.uncache('__phello__'), captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = self.machinery.FrozenImporter.load_module('__phello__')
check = {'__name__': '__phello__',
'__package__': '__phello__',
'__path__': [],
'__loader__': self.machinery.FrozenImporter,
}
for attr, value in check.items():
attr_value = getattr(module, attr)
self.assertEqual(attr_value, value,
"for __phello__.%s, %r != %r" %
(attr, attr_value, value))
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
self.assertFalse(hasattr(module, '__file__'))
def test_lacking_parent(self):
with util.uncache('__phello__', '__phello__.spam'), \
captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = self.machinery.FrozenImporter.load_module('__phello__.spam')
check = {'__name__': '__phello__.spam',
'__package__': '__phello__',
'__loader__': self.machinery.FrozenImporter,
}
for attr, value in check.items():
attr_value = getattr(module, attr)
self.assertEqual(attr_value, value,
"for __phello__.spam.%s, %r != %r" %
(attr, attr_value, value))
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
self.assertFalse(hasattr(module, '__file__'))
def test_module_reuse(self):
with util.uncache('__hello__'), captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module1 = self.machinery.FrozenImporter.load_module('__hello__')
module2 = self.machinery.FrozenImporter.load_module('__hello__')
self.assertIs(module1, module2)
self.assertEqual(stdout.getvalue(),
'Hello world!\nHello world!\n')
def test_module_repr(self):
with util.uncache('__hello__'), captured_stdout():
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = self.machinery.FrozenImporter.load_module('__hello__')
repr_str = self.machinery.FrozenImporter.module_repr(module)
self.assertEqual(repr_str,
"<module '__hello__' (frozen)>")
def test_module_repr_indirect(self):
with util.uncache('__hello__'), captured_stdout():
module = self.machinery.FrozenImporter.load_module('__hello__')
self.assertEqual(repr(module),
"<module '__hello__' (frozen)>")
# No way to trigger an error in a frozen module.
test_state_after_failure = None
def test_unloadable(self):
assert self.machinery.FrozenImporter.find_module('_not_real') is None
with self.assertRaises(ImportError) as cm:
self.machinery.FrozenImporter.load_module('_not_real')
self.assertEqual(cm.exception.name, '_not_real')
(Frozen_LoaderTests,
Source_LoaderTests
) = util.test_both(LoaderTests, machinery=machinery)
class InspectLoaderTests:
"""Tests for the InspectLoader methods for FrozenImporter."""
def test_get_code(self):
# Make sure that the code object is good.
name = '__hello__'
with captured_stdout() as stdout:
code = self.machinery.FrozenImporter.get_code(name)
mod = types.ModuleType(name)
exec(code, mod.__dict__)
self.assertTrue(hasattr(mod, 'initialized'))
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
def test_get_source(self):
# Should always return None.
result = self.machinery.FrozenImporter.get_source('__hello__')
self.assertIsNone(result)
def test_is_package(self):
# Should be able to tell what is a package.
test_for = (('__hello__', False), ('__phello__', True),
('__phello__.spam', False))
for name, is_package in test_for:
result = self.machinery.FrozenImporter.is_package(name)
self.assertEqual(bool(result), is_package)
def test_failure(self):
# Raise ImportError for modules that are not frozen.
for meth_name in ('get_code', 'get_source', 'is_package'):
method = getattr(self.machinery.FrozenImporter, meth_name)
with self.assertRaises(ImportError) as cm:
method('importlib')
self.assertEqual(cm.exception.name, 'importlib')
(Frozen_ILTests,
Source_ILTests
) = util.test_both(InspectLoaderTests, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| 9,339 | 226 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/frozen/__main__.py | from . import load_tests
import unittest
unittest.main()
| 58 | 5 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/frozen/__init__.py | import os
from test.support import load_package_tests
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)
| 142 | 6 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/frozen/test_finder.py | from .. import abc
from .. import util
machinery = util.import_importlib('importlib.machinery')
import unittest
class FindSpecTests(abc.FinderTests):
"""Test finding frozen modules."""
def find(self, name, path=None):
finder = self.machinery.FrozenImporter
return finder.find_spec(name, path)
def test_module(self):
name = '__hello__'
spec = self.find(name)
self.assertEqual(spec.origin, 'frozen')
def test_package(self):
spec = self.find('__phello__')
self.assertIsNotNone(spec)
def test_module_in_package(self):
spec = self.find('__phello__.spam', ['__phello__'])
self.assertIsNotNone(spec)
# No frozen package within another package to test with.
test_package_in_package = None
# No easy way to test.
test_package_over_module = None
def test_failure(self):
spec = self.find('<not real>')
self.assertIsNone(spec)
(Frozen_FindSpecTests,
Source_FindSpecTests
) = util.test_both(FindSpecTests, machinery=machinery)
class FinderTests(abc.FinderTests):
"""Test finding frozen modules."""
def find(self, name, path=None):
finder = self.machinery.FrozenImporter
return finder.find_module(name, path)
def test_module(self):
name = '__hello__'
loader = self.find(name)
self.assertTrue(hasattr(loader, 'load_module'))
def test_package(self):
loader = self.find('__phello__')
self.assertTrue(hasattr(loader, 'load_module'))
def test_module_in_package(self):
loader = self.find('__phello__.spam', ['__phello__'])
self.assertTrue(hasattr(loader, 'load_module'))
# No frozen package within another package to test with.
test_package_in_package = None
# No easy way to test.
test_package_over_module = None
def test_failure(self):
loader = self.find('<not real>')
self.assertIsNone(loader)
(Frozen_FinderTests,
Source_FinderTests
) = util.test_both(FinderTests, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| 2,105 | 85 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/namespace_pkgs/nested_portion1.zip | PK
h£@ nested_portion1/UT ã£O%£Oux è è PK
'%¤@ nested_portion1/foo/UT *£O>£Oux è è PK
'%¤@ö( nested_portion1/foo/one.pyUT *£O'£Oux è è attr = 'portion1 foo one'
PK
h£@ ýA nested_portion1/UT ã£Oux è è PK
'%¤@ ýAJ nested_portion1/foo/UT *£Oux è è PK
'%¤@ö( ´ nested_portion1/foo/one.pyUT *£Oux è è PK | 556 | 8 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/namespace_pkgs/top_level_portion1.zip | PK
&¤@ foo/UT Ì£Oá£Oux è è PK
&¤@ö(
foo/one.pyUT Ì£OÈ£Oux è è attr = 'portion1 foo one'
PK
&¤@ ýA foo/UT Ì£Oux è è PK
&¤@ö(
´> foo/one.pyUT Ì£Oux è è PK | 332 | 8 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/namespace_pkgs/missing_directory.zip | PK
º%¤@ bar/UT ?£Ob£Oux è è PK
º%¤@DÞu¾# #
bar/two.pyUT ?£O<£Oux è è attr = 'missing_directory foo two'
PK
¼%¤@ö(
foo/one.pyUT D£OB£Oux è è attr = 'portion1 foo one'
PK
º%¤@ ýA bar/UT ?£Oux è è PK
º%¤@DÞu¾# #
´> bar/two.pyUT ?£Oux è è PK
¼%¤@ö(
´¥ foo/one.pyUT D£Oux è è PK ê | 515 | 13 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/namespace_pkgs/project3/parent/child/three.py | attr = 'parent child three'
| 28 | 2 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/namespace_pkgs/not_a_namespace_pkg/foo/one.py | attr = 'portion1 foo one'
| 26 | 2 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/namespace_pkgs/not_a_namespace_pkg/foo/__init__.py | 0 | 1 | jart/cosmopolitan | false |
|
cosmopolitan/third_party/python/Lib/test/test_importlib/namespace_pkgs/portion2/foo/two.py | attr = 'portion2 foo two'
| 26 | 2 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/namespace_pkgs/portion1/foo/one.py | attr = 'portion1 foo one'
| 26 | 2 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/namespace_pkgs/project2/parent/child/two.py | attr = 'parent child two'
| 26 | 2 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/namespace_pkgs/module_and_namespace_package/a_test.py | attr = 'in module'
| 19 | 2 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/namespace_pkgs/module_and_namespace_package/a_test/empty | 0 | 1 | jart/cosmopolitan | false |
|
cosmopolitan/third_party/python/Lib/test/test_importlib/namespace_pkgs/project1/parent/child/one.py | attr = 'parent child one'
| 26 | 2 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/namespace_pkgs/both_portions/foo/two.py | attr = 'both_portions foo two'
| 31 | 2 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/namespace_pkgs/both_portions/foo/one.py | attr = 'both_portions foo one'
| 31 | 2 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/builtin/test_loader.py | from .. import abc
from .. import util
machinery = util.import_importlib('importlib.machinery')
import sys
import types
import unittest
@unittest.skipIf(util.BUILTINS.good_name is None, 'no reasonable builtin module')
class LoaderTests(abc.LoaderTests):
"""Test load_module() for built-in modules."""
def setUp(self):
self.verification = {'__name__': 'errno', '__package__': '',
'__loader__': self.machinery.BuiltinImporter}
def verify(self, module):
"""Verify that the module matches against what it should have."""
self.assertIsInstance(module, types.ModuleType)
for attr, value in self.verification.items():
self.assertEqual(getattr(module, attr), value)
self.assertIn(module.__name__, sys.modules)
def load_module(self, name):
return self.machinery.BuiltinImporter.load_module(name)
def test_module(self):
# Common case.
with util.uncache(util.BUILTINS.good_name):
module = self.load_module(util.BUILTINS.good_name)
self.verify(module)
# Built-in modules cannot be a package.
test_package = test_lacking_parent = None
# No way to force an import failure.
test_state_after_failure = None
def test_module_reuse(self):
# Test that the same module is used in a reload.
with util.uncache(util.BUILTINS.good_name):
module1 = self.load_module(util.BUILTINS.good_name)
module2 = self.load_module(util.BUILTINS.good_name)
self.assertIs(module1, module2)
def test_unloadable(self):
name = 'dssdsdfff'
assert name not in sys.builtin_module_names
with self.assertRaises(ImportError) as cm:
self.load_module(name)
self.assertEqual(cm.exception.name, name)
def test_already_imported(self):
# Using the name of a module already imported but not a built-in should
# still fail.
module_name = 'builtin_reload_test'
assert module_name not in sys.builtin_module_names
with util.uncache(module_name):
module = types.ModuleType(module_name)
sys.modules[module_name] = module
with self.assertRaises(ImportError) as cm:
self.load_module(module_name)
self.assertEqual(cm.exception.name, module_name)
(Frozen_LoaderTests,
Source_LoaderTests
) = util.test_both(LoaderTests, machinery=machinery)
@unittest.skipIf(util.BUILTINS.good_name is None, 'no reasonable builtin module')
class InspectLoaderTests:
"""Tests for InspectLoader methods for BuiltinImporter."""
def test_get_code(self):
# There is no code object.
result = self.machinery.BuiltinImporter.get_code(util.BUILTINS.good_name)
self.assertIsNone(result)
def test_get_source(self):
# There is no source.
result = self.machinery.BuiltinImporter.get_source(util.BUILTINS.good_name)
self.assertIsNone(result)
def test_is_package(self):
# Cannot be a package.
result = self.machinery.BuiltinImporter.is_package(util.BUILTINS.good_name)
self.assertFalse(result)
@unittest.skipIf(util.BUILTINS.bad_name is None, 'all modules are built in')
def test_not_builtin(self):
# Modules not built-in should raise ImportError.
for meth_name in ('get_code', 'get_source', 'is_package'):
method = getattr(self.machinery.BuiltinImporter, meth_name)
with self.assertRaises(ImportError) as cm:
method(util.BUILTINS.bad_name)
(Frozen_InspectLoaderTests,
Source_InspectLoaderTests
) = util.test_both(InspectLoaderTests, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| 3,741 | 109 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/builtin/__main__.py | from . import load_tests
import unittest
unittest.main()
| 58 | 5 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/builtin/__init__.py | import os
from test.support import load_package_tests
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)
| 142 | 6 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/builtin/test_finder.py | from .. import abc
from .. import util
machinery = util.import_importlib('importlib.machinery')
import sys
import unittest
@unittest.skipIf(util.BUILTINS.good_name is None, 'no reasonable builtin module')
class FindSpecTests(abc.FinderTests):
"""Test find_spec() for built-in modules."""
def test_module(self):
# Common case.
with util.uncache(util.BUILTINS.good_name):
found = self.machinery.BuiltinImporter.find_spec(util.BUILTINS.good_name)
self.assertTrue(found)
self.assertEqual(found.origin, 'built-in')
# Built-in modules cannot be a package.
test_package = None
# Built-in modules cannot be in a package.
test_module_in_package = None
# Built-in modules cannot be a package.
test_package_in_package = None
# Built-in modules cannot be a package.
test_package_over_module = None
def test_failure(self):
name = 'importlib'
assert name not in sys.builtin_module_names
spec = self.machinery.BuiltinImporter.find_spec(name)
self.assertIsNone(spec)
def test_ignore_path(self):
# The value for 'path' should always trigger a failed import.
with util.uncache(util.BUILTINS.good_name):
spec = self.machinery.BuiltinImporter.find_spec(util.BUILTINS.good_name,
['pkg'])
self.assertIsNone(spec)
(Frozen_FindSpecTests,
Source_FindSpecTests
) = util.test_both(FindSpecTests, machinery=machinery)
@unittest.skipIf(util.BUILTINS.good_name is None, 'no reasonable builtin module')
class FinderTests(abc.FinderTests):
"""Test find_module() for built-in modules."""
def test_module(self):
# Common case.
with util.uncache(util.BUILTINS.good_name):
found = self.machinery.BuiltinImporter.find_module(util.BUILTINS.good_name)
self.assertTrue(found)
self.assertTrue(hasattr(found, 'load_module'))
# Built-in modules cannot be a package.
test_package = test_package_in_package = test_package_over_module = None
# Built-in modules cannot be in a package.
test_module_in_package = None
def test_failure(self):
assert 'importlib' not in sys.builtin_module_names
loader = self.machinery.BuiltinImporter.find_module('importlib')
self.assertIsNone(loader)
def test_ignore_path(self):
# The value for 'path' should always trigger a failed import.
with util.uncache(util.BUILTINS.good_name):
loader = self.machinery.BuiltinImporter.find_module(util.BUILTINS.good_name,
['pkg'])
self.assertIsNone(loader)
(Frozen_FinderTests,
Source_FinderTests
) = util.test_both(FinderTests, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| 2,891 | 91 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/import_/test_path.py | from .. import util
importlib = util.import_importlib('importlib')
machinery = util.import_importlib('importlib.machinery')
import os
import sys
import tempfile
from types import ModuleType
import unittest
import warnings
import zipimport
class FinderTests:
"""Tests for PathFinder."""
find = None
check_found = None
def test_failure(self):
# Test None returned upon not finding a suitable loader.
module = '<test module>'
with util.import_state():
self.assertIsNone(self.find(module))
def test_sys_path(self):
# Test that sys.path is used when 'path' is None.
# Implicitly tests that sys.path_importer_cache is used.
module = '<test module>'
path = '<test path>'
importer = util.mock_spec(module)
with util.import_state(path_importer_cache={path: importer},
path=[path]):
found = self.find(module)
self.check_found(found, importer)
def test_path(self):
# Test that 'path' is used when set.
# Implicitly tests that sys.path_importer_cache is used.
module = '<test module>'
path = '<test path>'
importer = util.mock_spec(module)
with util.import_state(path_importer_cache={path: importer}):
found = self.find(module, [path])
self.check_found(found, importer)
def test_empty_list(self):
# An empty list should not count as asking for sys.path.
module = 'module'
path = '<test path>'
importer = util.mock_spec(module)
with util.import_state(path_importer_cache={path: importer},
path=[path]):
self.assertIsNone(self.find('module', []))
def test_path_hooks(self):
# Test that sys.path_hooks is used.
# Test that sys.path_importer_cache is set.
module = '<test module>'
path = '<test path>'
importer = util.mock_spec(module)
hook = util.mock_path_hook(path, importer=importer)
with util.import_state(path_hooks=[hook]):
found = self.find(module, [path])
self.check_found(found, importer)
self.assertIn(path, sys.path_importer_cache)
self.assertIs(sys.path_importer_cache[path], importer)
def test_empty_path_hooks(self):
# Test that if sys.path_hooks is empty a warning is raised,
# sys.path_importer_cache gets None set, and PathFinder returns None.
path_entry = 'bogus_path'
with util.import_state(path_importer_cache={}, path_hooks=[],
path=[path_entry]):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertIsNone(self.find('os'))
self.assertIsNone(sys.path_importer_cache[path_entry])
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, ImportWarning))
def test_path_importer_cache_empty_string(self):
# The empty string should create a finder using the cwd.
path = ''
module = '<test module>'
importer = util.mock_spec(module)
hook = util.mock_path_hook(os.getcwd(), importer=importer)
with util.import_state(path=[path], path_hooks=[hook]):
found = self.find(module)
self.check_found(found, importer)
self.assertIn(os.getcwd(), sys.path_importer_cache)
def test_None_on_sys_path(self):
# Putting None in sys.path[0] caused an import regression from Python
# 3.2: http://bugs.python.org/issue16514
new_path = sys.path[:]
new_path.insert(0, None)
new_path_importer_cache = sys.path_importer_cache.copy()
new_path_importer_cache.pop(None, None)
new_path_hooks = [zipimport.zipimporter,
self.machinery.FileFinder.path_hook(
*self.importlib._bootstrap_external._get_supported_file_loaders())]
missing = object()
email = sys.modules.pop('email', missing)
try:
with util.import_state(meta_path=sys.meta_path[:],
path=new_path,
path_importer_cache=new_path_importer_cache,
path_hooks=new_path_hooks):
module = self.importlib.import_module('email')
self.assertIsInstance(module, ModuleType)
finally:
if email is not missing:
sys.modules['email'] = email
def test_finder_with_find_module(self):
class TestFinder:
def find_module(self, fullname):
return self.to_return
failing_finder = TestFinder()
failing_finder.to_return = None
path = 'testing path'
with util.import_state(path_importer_cache={path: failing_finder}):
self.assertIsNone(
self.machinery.PathFinder.find_spec('whatever', [path]))
success_finder = TestFinder()
success_finder.to_return = __loader__
with util.import_state(path_importer_cache={path: success_finder}):
spec = self.machinery.PathFinder.find_spec('whatever', [path])
self.assertEqual(spec.loader, __loader__)
def test_finder_with_find_loader(self):
class TestFinder:
loader = None
portions = []
def find_loader(self, fullname):
return self.loader, self.portions
path = 'testing path'
with util.import_state(path_importer_cache={path: TestFinder()}):
self.assertIsNone(
self.machinery.PathFinder.find_spec('whatever', [path]))
success_finder = TestFinder()
success_finder.loader = __loader__
with util.import_state(path_importer_cache={path: success_finder}):
spec = self.machinery.PathFinder.find_spec('whatever', [path])
self.assertEqual(spec.loader, __loader__)
def test_finder_with_find_spec(self):
class TestFinder:
spec = None
def find_spec(self, fullname, target=None):
return self.spec
path = 'testing path'
with util.import_state(path_importer_cache={path: TestFinder()}):
self.assertIsNone(
self.machinery.PathFinder.find_spec('whatever', [path]))
success_finder = TestFinder()
success_finder.spec = self.machinery.ModuleSpec('whatever', __loader__)
with util.import_state(path_importer_cache={path: success_finder}):
got = self.machinery.PathFinder.find_spec('whatever', [path])
self.assertEqual(got, success_finder.spec)
def test_deleted_cwd(self):
# Issue #22834
old_dir = os.getcwd()
self.addCleanup(os.chdir, old_dir)
new_dir = tempfile.mkdtemp()
try:
os.chdir(new_dir)
try:
os.rmdir(new_dir)
except OSError:
# EINVAL on Solaris, EBUSY on AIX, ENOTEMPTY on Windows
self.skipTest("platform does not allow "
"the deletion of the cwd")
except:
os.chdir(old_dir)
os.rmdir(new_dir)
raise
with util.import_state(path=['']):
# Do not want FileNotFoundError raised.
self.assertIsNone(self.machinery.PathFinder.find_spec('whatever'))
class FindModuleTests(FinderTests):
def find(self, *args, **kwargs):
return self.machinery.PathFinder.find_module(*args, **kwargs)
def check_found(self, found, importer):
self.assertIs(found, importer)
(Frozen_FindModuleTests,
Source_FindModuleTests
) = util.test_both(FindModuleTests, importlib=importlib, machinery=machinery)
class FindSpecTests(FinderTests):
def find(self, *args, **kwargs):
return self.machinery.PathFinder.find_spec(*args, **kwargs)
def check_found(self, found, importer):
self.assertIs(found.loader, importer)
(Frozen_FindSpecTests,
Source_FindSpecTests
) = util.test_both(FindSpecTests, importlib=importlib, machinery=machinery)
class PathEntryFinderTests:
def test_finder_with_failing_find_spec(self):
# PathEntryFinder with find_module() defined should work.
# Issue #20763.
class Finder:
path_location = 'test_finder_with_find_module'
def __init__(self, path):
if path != self.path_location:
raise ImportError
@staticmethod
def find_module(fullname):
return None
with util.import_state(path=[Finder.path_location]+sys.path[:],
path_hooks=[Finder]):
self.machinery.PathFinder.find_spec('importlib')
def test_finder_with_failing_find_module(self):
# PathEntryFinder with find_module() defined should work.
# Issue #20763.
class Finder:
path_location = 'test_finder_with_find_module'
def __init__(self, path):
if path != self.path_location:
raise ImportError
@staticmethod
def find_module(fullname):
return None
with util.import_state(path=[Finder.path_location]+sys.path[:],
path_hooks=[Finder]):
self.machinery.PathFinder.find_module('importlib')
(Frozen_PEFTests,
Source_PEFTests
) = util.test_both(PathEntryFinderTests, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| 9,669 | 258 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/import_/test_meta_path.py | from .. import util
import importlib._bootstrap
import sys
from types import MethodType
import unittest
import warnings
class CallingOrder:
"""Calls to the importers on sys.meta_path happen in order that they are
specified in the sequence, starting with the first importer
[first called], and then continuing on down until one is found that doesn't
return None [continuing]."""
def test_first_called(self):
# [first called]
mod = 'top_level'
with util.mock_spec(mod) as first, util.mock_spec(mod) as second:
with util.import_state(meta_path=[first, second]):
self.assertIs(self.__import__(mod), first.modules[mod])
def test_continuing(self):
# [continuing]
mod_name = 'for_real'
with util.mock_spec('nonexistent') as first, \
util.mock_spec(mod_name) as second:
first.find_spec = lambda self, fullname, path=None, parent=None: None
with util.import_state(meta_path=[first, second]):
self.assertIs(self.__import__(mod_name), second.modules[mod_name])
def test_empty(self):
# Raise an ImportWarning if sys.meta_path is empty.
module_name = 'nothing'
try:
del sys.modules[module_name]
except KeyError:
pass
with util.import_state(meta_path=[]):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertIsNone(importlib._bootstrap._find_spec('nothing',
None))
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, ImportWarning))
(Frozen_CallingOrder,
Source_CallingOrder
) = util.test_both(CallingOrder, __import__=util.__import__)
class CallSignature:
"""If there is no __path__ entry on the parent module, then 'path' is None
[no path]. Otherwise, the value for __path__ is passed in for the 'path'
argument [path set]."""
def log_finder(self, importer):
fxn = getattr(importer, self.finder_name)
log = []
def wrapper(self, *args, **kwargs):
log.append([args, kwargs])
return fxn(*args, **kwargs)
return log, wrapper
def test_no_path(self):
# [no path]
mod_name = 'top_level'
assert '.' not in mod_name
with self.mock_modules(mod_name) as importer:
log, wrapped_call = self.log_finder(importer)
setattr(importer, self.finder_name, MethodType(wrapped_call, importer))
with util.import_state(meta_path=[importer]):
self.__import__(mod_name)
assert len(log) == 1
args = log[0][0]
# Assuming all arguments are positional.
self.assertEqual(args[0], mod_name)
self.assertIsNone(args[1])
def test_with_path(self):
# [path set]
pkg_name = 'pkg'
mod_name = pkg_name + '.module'
path = [42]
assert '.' in mod_name
with self.mock_modules(pkg_name+'.__init__', mod_name) as importer:
importer.modules[pkg_name].__path__ = path
log, wrapped_call = self.log_finder(importer)
setattr(importer, self.finder_name, MethodType(wrapped_call, importer))
with util.import_state(meta_path=[importer]):
self.__import__(mod_name)
assert len(log) == 2
args = log[1][0]
kwargs = log[1][1]
# Assuming all arguments are positional.
self.assertFalse(kwargs)
self.assertEqual(args[0], mod_name)
self.assertIs(args[1], path)
class CallSignaturePEP302(CallSignature):
mock_modules = util.mock_modules
finder_name = 'find_module'
(Frozen_CallSignaturePEP302,
Source_CallSignaturePEP302
) = util.test_both(CallSignaturePEP302, __import__=util.__import__)
class CallSignaturePEP451(CallSignature):
mock_modules = util.mock_spec
finder_name = 'find_spec'
(Frozen_CallSignaturePEP451,
Source_CallSignaturePEP451
) = util.test_both(CallSignaturePEP451, __import__=util.__import__)
if __name__ == '__main__':
unittest.main()
| 4,317 | 126 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/import_/test___loader__.py | from importlib import machinery
import sys
import types
import unittest
from .. import util
class SpecLoaderMock:
def find_spec(self, fullname, path=None, target=None):
return machinery.ModuleSpec(fullname, self)
def create_module(self, spec):
return None
def exec_module(self, module):
pass
class SpecLoaderAttributeTests:
def test___loader__(self):
loader = SpecLoaderMock()
with util.uncache('blah'), util.import_state(meta_path=[loader]):
module = self.__import__('blah')
self.assertEqual(loader, module.__loader__)
(Frozen_SpecTests,
Source_SpecTests
) = util.test_both(SpecLoaderAttributeTests, __import__=util.__import__)
class LoaderMock:
def find_module(self, fullname, path=None):
return self
def load_module(self, fullname):
sys.modules[fullname] = self.module
return self.module
class LoaderAttributeTests:
def test___loader___missing(self):
module = types.ModuleType('blah')
try:
del module.__loader__
except AttributeError:
pass
loader = LoaderMock()
loader.module = module
with util.uncache('blah'), util.import_state(meta_path=[loader]):
module = self.__import__('blah')
self.assertEqual(loader, module.__loader__)
def test___loader___is_None(self):
module = types.ModuleType('blah')
module.__loader__ = None
loader = LoaderMock()
loader.module = module
with util.uncache('blah'), util.import_state(meta_path=[loader]):
returned_module = self.__import__('blah')
self.assertEqual(loader, module.__loader__)
(Frozen_Tests,
Source_Tests
) = util.test_both(LoaderAttributeTests, __import__=util.__import__)
if __name__ == '__main__':
unittest.main()
| 1,861 | 76 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/import_/__main__.py | from . import load_tests
import unittest
unittest.main()
| 58 | 5 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/import_/test_packages.py | from .. import util
import sys
import unittest
from test import support
class ParentModuleTests:
"""Importing a submodule should import the parent modules."""
def test_import_parent(self):
with util.mock_spec('pkg.__init__', 'pkg.module') as mock:
with util.import_state(meta_path=[mock]):
module = self.__import__('pkg.module')
self.assertIn('pkg', sys.modules)
def test_bad_parent(self):
with util.mock_spec('pkg.module') as mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises(ImportError) as cm:
self.__import__('pkg.module')
self.assertEqual(cm.exception.name, 'pkg')
def test_raising_parent_after_importing_child(self):
def __init__():
import pkg.module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.module',
module_code={'pkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises(ZeroDivisionError):
self.__import__('pkg')
self.assertNotIn('pkg', sys.modules)
self.assertIn('pkg.module', sys.modules)
with self.assertRaises(ZeroDivisionError):
self.__import__('pkg.module')
self.assertNotIn('pkg', sys.modules)
self.assertIn('pkg.module', sys.modules)
def test_raising_parent_after_relative_importing_child(self):
def __init__():
from . import module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.module',
module_code={'pkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises((ZeroDivisionError, ImportError)):
# This raises ImportError on the "from . import module"
# line, not sure why.
self.__import__('pkg')
self.assertNotIn('pkg', sys.modules)
with self.assertRaises((ZeroDivisionError, ImportError)):
self.__import__('pkg.module')
self.assertNotIn('pkg', sys.modules)
# XXX False
#self.assertIn('pkg.module', sys.modules)
def test_raising_parent_after_double_relative_importing_child(self):
def __init__():
from ..subpkg import module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.subpkg.__init__',
'pkg.subpkg.module',
module_code={'pkg.subpkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises((ZeroDivisionError, ImportError)):
# This raises ImportError on the "from ..subpkg import module"
# line, not sure why.
self.__import__('pkg.subpkg')
self.assertNotIn('pkg.subpkg', sys.modules)
with self.assertRaises((ZeroDivisionError, ImportError)):
self.__import__('pkg.subpkg.module')
self.assertNotIn('pkg.subpkg', sys.modules)
# XXX False
#self.assertIn('pkg.subpkg.module', sys.modules)
def test_module_not_package(self):
# Try to import a submodule from a non-package should raise ImportError.
assert not hasattr(sys, '__path__')
with self.assertRaises(ImportError) as cm:
self.__import__('sys.no_submodules_here')
self.assertEqual(cm.exception.name, 'sys.no_submodules_here')
def test_module_not_package_but_side_effects(self):
# If a module injects something into sys.modules as a side-effect, then
# pick up on that fact.
name = 'mod'
subname = name + '.b'
def module_injection():
sys.modules[subname] = 'total bunk'
mock_spec = util.mock_spec('mod',
module_code={'mod': module_injection})
with mock_spec as mock:
with util.import_state(meta_path=[mock]):
try:
submodule = self.__import__(subname)
finally:
support.unload(subname)
(Frozen_ParentTests,
Source_ParentTests
) = util.test_both(ParentModuleTests, __import__=util.__import__)
if __name__ == '__main__':
unittest.main()
| 4,544 | 111 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/import_/test_api.py | from .. import util
from importlib import machinery
import sys
import types
import unittest
PKG_NAME = 'fine'
SUBMOD_NAME = 'fine.bogus'
class BadSpecFinderLoader:
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if fullname == SUBMOD_NAME:
spec = machinery.ModuleSpec(fullname, cls)
return spec
@staticmethod
def create_module(spec):
return None
@staticmethod
def exec_module(module):
if module.__name__ == SUBMOD_NAME:
raise ImportError('I cannot be loaded!')
class BadLoaderFinder:
@classmethod
def find_module(cls, fullname, path):
if fullname == SUBMOD_NAME:
return cls
@classmethod
def load_module(cls, fullname):
if fullname == SUBMOD_NAME:
raise ImportError('I cannot be loaded!')
class APITest:
"""Test API-specific details for __import__ (e.g. raising the right
exception when passing in an int for the module name)."""
def test_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
util.import_importlib('some module that does not exist')
def test_name_requires_rparition(self):
# Raise TypeError if a non-string is passed in for the module name.
with self.assertRaises(TypeError):
self.__import__(42)
def test_negative_level(self):
# Raise ValueError when a negative level is specified.
# PEP 328 did away with sys.module None entries and the ambiguity of
# absolute/relative imports.
with self.assertRaises(ValueError):
self.__import__('os', globals(), level=-1)
def test_nonexistent_fromlist_entry(self):
# If something in fromlist doesn't exist, that's okay.
# issue15715
mod = types.ModuleType(PKG_NAME)
mod.__path__ = ['XXX']
with util.import_state(meta_path=[self.bad_finder_loader]):
with util.uncache(PKG_NAME):
sys.modules[PKG_NAME] = mod
self.__import__(PKG_NAME, fromlist=['not here'])
def test_fromlist_load_error_propagates(self):
# If something in fromlist triggers an exception not related to not
# existing, let that exception propagate.
# issue15316
mod = types.ModuleType(PKG_NAME)
mod.__path__ = ['XXX']
with util.import_state(meta_path=[self.bad_finder_loader]):
with util.uncache(PKG_NAME):
sys.modules[PKG_NAME] = mod
with self.assertRaises(ImportError):
self.__import__(PKG_NAME,
fromlist=[SUBMOD_NAME.rpartition('.')[-1]])
def test_blocked_fromlist(self):
# If fromlist entry is None, let a ModuleNotFoundError propagate.
# issue31642
mod = types.ModuleType(PKG_NAME)
mod.__path__ = []
with util.import_state(meta_path=[self.bad_finder_loader]):
with util.uncache(PKG_NAME, SUBMOD_NAME):
sys.modules[PKG_NAME] = mod
sys.modules[SUBMOD_NAME] = None
with self.assertRaises(ModuleNotFoundError) as cm:
self.__import__(PKG_NAME,
fromlist=[SUBMOD_NAME.rpartition('.')[-1]])
self.assertEqual(cm.exception.name, SUBMOD_NAME)
class OldAPITests(APITest):
bad_finder_loader = BadLoaderFinder
(Frozen_OldAPITests,
Source_OldAPITests
) = util.test_both(OldAPITests, __import__=util.__import__)
class SpecAPITests(APITest):
bad_finder_loader = BadSpecFinderLoader
(Frozen_SpecAPITests,
Source_SpecAPITests
) = util.test_both(SpecAPITests, __import__=util.__import__)
if __name__ == '__main__':
unittest.main()
| 3,788 | 120 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/import_/test_caching.py | """Test that sys.modules is used properly by import."""
from .. import util
import sys
from types import MethodType
import unittest
class UseCache:
"""When it comes to sys.modules, import prefers it over anything else.
Once a name has been resolved, sys.modules is checked to see if it contains
the module desired. If so, then it is returned [use cache]. If it is not
found, then the proper steps are taken to perform the import, but
sys.modules is still used to return the imported module (e.g., not what a
loader returns) [from cache on return]. This also applies to imports of
things contained within a package and thus get assigned as an attribute
[from cache to attribute] or pulled in thanks to a fromlist import
[from cache for fromlist]. But if sys.modules contains None then
ImportError is raised [None in cache].
"""
def test_using_cache(self):
# [use cache]
module_to_use = "some module found!"
with util.uncache('some_module'):
sys.modules['some_module'] = module_to_use
module = self.__import__('some_module')
self.assertEqual(id(module_to_use), id(module))
def test_None_in_cache(self):
#[None in cache]
name = 'using_None'
with util.uncache(name):
sys.modules[name] = None
with self.assertRaises(ImportError) as cm:
self.__import__(name)
self.assertEqual(cm.exception.name, name)
(Frozen_UseCache,
Source_UseCache
) = util.test_both(UseCache, __import__=util.__import__)
class ImportlibUseCache(UseCache, unittest.TestCase):
# Pertinent only to PEP 302; exec_module() doesn't return a module.
__import__ = util.__import__['Source']
def create_mock(self, *names, return_=None):
mock = util.mock_modules(*names)
original_load = mock.load_module
def load_module(self, fullname):
original_load(fullname)
return return_
mock.load_module = MethodType(load_module, mock)
return mock
# __import__ inconsistent between loaders and built-in import when it comes
# to when to use the module in sys.modules and when not to.
def test_using_cache_after_loader(self):
# [from cache on return]
with self.create_mock('module') as mock:
with util.import_state(meta_path=[mock]):
module = self.__import__('module')
self.assertEqual(id(module), id(sys.modules['module']))
# See test_using_cache_after_loader() for reasoning.
def test_using_cache_for_assigning_to_attribute(self):
# [from cache to attribute]
with self.create_mock('pkg.__init__', 'pkg.module') as importer:
with util.import_state(meta_path=[importer]):
module = self.__import__('pkg.module')
self.assertTrue(hasattr(module, 'module'))
self.assertEqual(id(module.module),
id(sys.modules['pkg.module']))
# See test_using_cache_after_loader() for reasoning.
def test_using_cache_for_fromlist(self):
# [from cache for fromlist]
with self.create_mock('pkg.__init__', 'pkg.module') as importer:
with util.import_state(meta_path=[importer]):
module = self.__import__('pkg', fromlist=['module'])
self.assertTrue(hasattr(module, 'module'))
self.assertEqual(id(module.module),
id(sys.modules['pkg.module']))
if __name__ == '__main__':
unittest.main()
| 3,599 | 94 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/import_/test___package__.py | """PEP 366 ("Main module explicit relative imports") specifies the
semantics for the __package__ attribute on modules. This attribute is
used, when available, to detect which package a module belongs to (instead
of using the typical __path__/__name__ test).
"""
import unittest
import warnings
from .. import util
class Using__package__:
"""Use of __package__ supercedes the use of __name__/__path__ to calculate
what package a module belongs to. The basic algorithm is [__package__]::
def resolve_name(name, package, level):
level -= 1
base = package.rsplit('.', level)[0]
return '{0}.{1}'.format(base, name)
But since there is no guarantee that __package__ has been set (or not been
set to None [None]), there has to be a way to calculate the attribute's value
[__name__]::
def calc_package(caller_name, has___path__):
if has__path__:
return caller_name
else:
return caller_name.rsplit('.', 1)[0]
Then the normal algorithm for relative name imports can proceed as if
__package__ had been set.
"""
def import_module(self, globals_):
with self.mock_modules('pkg.__init__', 'pkg.fake') as importer:
with util.import_state(meta_path=[importer]):
self.__import__('pkg.fake')
module = self.__import__('',
globals=globals_,
fromlist=['attr'], level=2)
return module
def test_using___package__(self):
# [__package__]
module = self.import_module({'__package__': 'pkg.fake'})
self.assertEqual(module.__name__, 'pkg')
def test_using___name__(self):
# [__name__]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
module = self.import_module({'__name__': 'pkg.fake',
'__path__': []})
self.assertEqual(module.__name__, 'pkg')
def test_warn_when_using___name__(self):
with self.assertWarns(ImportWarning):
self.import_module({'__name__': 'pkg.fake', '__path__': []})
def test_None_as___package__(self):
# [None]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
module = self.import_module({
'__name__': 'pkg.fake', '__path__': [], '__package__': None })
self.assertEqual(module.__name__, 'pkg')
def test_spec_fallback(self):
# If __package__ isn't defined, fall back on __spec__.parent.
module = self.import_module({'__spec__': FakeSpec('pkg.fake')})
self.assertEqual(module.__name__, 'pkg')
def test_warn_when_package_and_spec_disagree(self):
# Raise an ImportWarning if __package__ != __spec__.parent.
with self.assertWarns(ImportWarning):
self.import_module({'__package__': 'pkg.fake',
'__spec__': FakeSpec('pkg.fakefake')})
def test_bad__package__(self):
globals = {'__package__': '<not real>'}
with self.assertRaises(ModuleNotFoundError):
self.__import__('', globals, {}, ['relimport'], 1)
def test_bunk__package__(self):
globals = {'__package__': 42}
with self.assertRaises(TypeError):
self.__import__('', globals, {}, ['relimport'], 1)
class FakeSpec:
def __init__(self, parent):
self.parent = parent
class Using__package__PEP302(Using__package__):
mock_modules = util.mock_modules
(Frozen_UsingPackagePEP302,
Source_UsingPackagePEP302
) = util.test_both(Using__package__PEP302, __import__=util.__import__)
class Using__package__PEP451(Using__package__):
mock_modules = util.mock_spec
(Frozen_UsingPackagePEP451,
Source_UsingPackagePEP451
) = util.test_both(Using__package__PEP451, __import__=util.__import__)
class Setting__package__:
"""Because __package__ is a new feature, it is not always set by a loader.
Import will set it as needed to help with the transition to relying on
__package__.
For a top-level module, __package__ is set to None [top-level]. For a
package __name__ is used for __package__ [package]. For submodules the
value is __name__.rsplit('.', 1)[0] [submodule].
"""
__import__ = util.__import__['Source']
# [top-level]
def test_top_level(self):
with self.mock_modules('top_level') as mock:
with util.import_state(meta_path=[mock]):
del mock['top_level'].__package__
module = self.__import__('top_level')
self.assertEqual(module.__package__, '')
# [package]
def test_package(self):
with self.mock_modules('pkg.__init__') as mock:
with util.import_state(meta_path=[mock]):
del mock['pkg'].__package__
module = self.__import__('pkg')
self.assertEqual(module.__package__, 'pkg')
# [submodule]
def test_submodule(self):
with self.mock_modules('pkg.__init__', 'pkg.mod') as mock:
with util.import_state(meta_path=[mock]):
del mock['pkg.mod'].__package__
pkg = self.__import__('pkg.mod')
module = getattr(pkg, 'mod')
self.assertEqual(module.__package__, 'pkg')
class Setting__package__PEP302(Setting__package__, unittest.TestCase):
mock_modules = util.mock_modules
class Setting__package__PEP451(Setting__package__, unittest.TestCase):
mock_modules = util.mock_spec
if __name__ == '__main__':
unittest.main()
| 5,638 | 164 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/import_/test_fromlist.py | """Test that the semantics relating to the 'fromlist' argument are correct."""
from .. import util
import warnings
import unittest
class ReturnValue:
"""The use of fromlist influences what import returns.
If direct ``import ...`` statement is used, the root module or package is
returned [import return]. But if fromlist is set, then the specified module
is actually returned (whether it is a relative import or not)
[from return].
"""
def test_return_from_import(self):
# [import return]
with util.mock_spec('pkg.__init__', 'pkg.module') as importer:
with util.import_state(meta_path=[importer]):
module = self.__import__('pkg.module')
self.assertEqual(module.__name__, 'pkg')
def test_return_from_from_import(self):
# [from return]
with util.mock_modules('pkg.__init__', 'pkg.module')as importer:
with util.import_state(meta_path=[importer]):
module = self.__import__('pkg.module', fromlist=['attr'])
self.assertEqual(module.__name__, 'pkg.module')
(Frozen_ReturnValue,
Source_ReturnValue
) = util.test_both(ReturnValue, __import__=util.__import__)
class HandlingFromlist:
"""Using fromlist triggers different actions based on what is being asked
of it.
If fromlist specifies an object on a module, nothing special happens
[object case]. This is even true if the object does not exist [bad object].
If a package is being imported, then what is listed in fromlist may be
treated as a module to be imported [module]. And this extends to what is
contained in __all__ when '*' is imported [using *]. And '*' does not need
to be the only name in the fromlist [using * with others].
"""
def test_object(self):
# [object case]
with util.mock_modules('module') as importer:
with util.import_state(meta_path=[importer]):
module = self.__import__('module', fromlist=['attr'])
self.assertEqual(module.__name__, 'module')
def test_nonexistent_object(self):
# [bad object]
with util.mock_modules('module') as importer:
with util.import_state(meta_path=[importer]):
module = self.__import__('module', fromlist=['non_existent'])
self.assertEqual(module.__name__, 'module')
self.assertFalse(hasattr(module, 'non_existent'))
def test_module_from_package(self):
# [module]
with util.mock_modules('pkg.__init__', 'pkg.module') as importer:
with util.import_state(meta_path=[importer]):
module = self.__import__('pkg', fromlist=['module'])
self.assertEqual(module.__name__, 'pkg')
self.assertTrue(hasattr(module, 'module'))
self.assertEqual(module.module.__name__, 'pkg.module')
def test_nonexistent_from_package(self):
with util.mock_modules('pkg.__init__') as importer:
with util.import_state(meta_path=[importer]):
module = self.__import__('pkg', fromlist=['non_existent'])
self.assertEqual(module.__name__, 'pkg')
self.assertFalse(hasattr(module, 'non_existent'))
def test_module_from_package_triggers_ModuleNotFoundError(self):
# If a submodule causes an ModuleNotFoundError because it tries
# to import a module which doesn't exist, that should let the
# ModuleNotFoundError propagate.
def module_code():
import i_do_not_exist
with util.mock_modules('pkg.__init__', 'pkg.mod',
module_code={'pkg.mod': module_code}) as importer:
with util.import_state(meta_path=[importer]):
with self.assertRaises(ModuleNotFoundError) as exc:
self.__import__('pkg', fromlist=['mod'])
self.assertEqual('i_do_not_exist', exc.exception.name)
def test_empty_string(self):
with util.mock_modules('pkg.__init__', 'pkg.mod') as importer:
with util.import_state(meta_path=[importer]):
module = self.__import__('pkg.mod', fromlist=[''])
self.assertEqual(module.__name__, 'pkg.mod')
def basic_star_test(self, fromlist=['*']):
# [using *]
with util.mock_modules('pkg.__init__', 'pkg.module') as mock:
with util.import_state(meta_path=[mock]):
mock['pkg'].__all__ = ['module']
module = self.__import__('pkg', fromlist=fromlist)
self.assertEqual(module.__name__, 'pkg')
self.assertTrue(hasattr(module, 'module'))
self.assertEqual(module.module.__name__, 'pkg.module')
def test_using_star(self):
# [using *]
self.basic_star_test()
def test_fromlist_as_tuple(self):
self.basic_star_test(('*',))
def test_star_with_others(self):
# [using * with others]
context = util.mock_modules('pkg.__init__', 'pkg.module1', 'pkg.module2')
with context as mock:
with util.import_state(meta_path=[mock]):
mock['pkg'].__all__ = ['module1']
module = self.__import__('pkg', fromlist=['module2', '*'])
self.assertEqual(module.__name__, 'pkg')
self.assertTrue(hasattr(module, 'module1'))
self.assertTrue(hasattr(module, 'module2'))
self.assertEqual(module.module1.__name__, 'pkg.module1')
self.assertEqual(module.module2.__name__, 'pkg.module2')
def test_nonexistent_in_all(self):
with util.mock_modules('pkg.__init__') as importer:
with util.import_state(meta_path=[importer]):
importer['pkg'].__all__ = ['non_existent']
module = self.__import__('pkg', fromlist=['*'])
self.assertEqual(module.__name__, 'pkg')
self.assertFalse(hasattr(module, 'non_existent'))
def test_star_in_all(self):
with util.mock_modules('pkg.__init__') as importer:
with util.import_state(meta_path=[importer]):
importer['pkg'].__all__ = ['*']
module = self.__import__('pkg', fromlist=['*'])
self.assertEqual(module.__name__, 'pkg')
self.assertFalse(hasattr(module, '*'))
def test_invalid_type(self):
with util.mock_modules('pkg.__init__') as importer:
with util.import_state(meta_path=[importer]), \
warnings.catch_warnings():
warnings.simplefilter('error', BytesWarning)
with self.assertRaisesRegex(TypeError, r'\bfrom\b'):
self.__import__('pkg', fromlist=[b'attr'])
with self.assertRaisesRegex(TypeError, r'\bfrom\b'):
self.__import__('pkg', fromlist=iter([b'attr']))
def test_invalid_type_in_all(self):
with util.mock_modules('pkg.__init__') as importer:
with util.import_state(meta_path=[importer]), \
warnings.catch_warnings():
warnings.simplefilter('error', BytesWarning)
importer['pkg'].__all__ = [b'attr']
with self.assertRaisesRegex(TypeError, r'\bpkg\.__all__\b'):
self.__import__('pkg', fromlist=['*'])
(Frozen_FromList,
Source_FromList
) = util.test_both(HandlingFromlist, __import__=util.__import__)
if __name__ == '__main__':
unittest.main()
| 7,526 | 176 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/import_/__init__.py | import os
from test.support import load_package_tests
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)
| 142 | 6 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/import_/test_relative_imports.py | """Test relative imports (PEP 328)."""
from .. import util
import unittest
import warnings
class RelativeImports:
"""PEP 328 introduced relative imports. This allows for imports to occur
from within a package without having to specify the actual package name.
A simple example is to import another module within the same package
[module from module]::
# From pkg.mod1 with pkg.mod2 being a module.
from . import mod2
This also works for getting an attribute from a module that is specified
in a relative fashion [attr from module]::
# From pkg.mod1.
from .mod2 import attr
But this is in no way restricted to working between modules; it works
from [package to module],::
# From pkg, importing pkg.module which is a module.
from . import module
[module to package],::
# Pull attr from pkg, called from pkg.module which is a module.
from . import attr
and [package to package]::
# From pkg.subpkg1 (both pkg.subpkg[1,2] are packages).
from .. import subpkg2
The number of dots used is in no way restricted [deep import]::
# Import pkg.attr from pkg.pkg1.pkg2.pkg3.pkg4.pkg5.
from ...... import attr
To prevent someone from accessing code that is outside of a package, one
cannot reach the location containing the root package itself::
# From pkg.__init__ [too high from package]
from .. import top_level
# From pkg.module [too high from module]
from .. import top_level
Relative imports are the only type of import that allow for an empty
module name for an import [empty name].
"""
def relative_import_test(self, create, globals_, callback):
"""Abstract out boilerplace for setting up for an import test."""
uncache_names = []
for name in create:
if not name.endswith('.__init__'):
uncache_names.append(name)
else:
uncache_names.append(name[:-len('.__init__')])
with util.mock_spec(*create) as importer:
with util.import_state(meta_path=[importer]):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for global_ in globals_:
with util.uncache(*uncache_names):
callback(global_)
def test_module_from_module(self):
# [module from module]
create = 'pkg.__init__', 'pkg.mod2'
globals_ = {'__package__': 'pkg'}, {'__name__': 'pkg.mod1'}
def callback(global_):
self.__import__('pkg') # For __import__().
module = self.__import__('', global_, fromlist=['mod2'], level=1)
self.assertEqual(module.__name__, 'pkg')
self.assertTrue(hasattr(module, 'mod2'))
self.assertEqual(module.mod2.attr, 'pkg.mod2')
self.relative_import_test(create, globals_, callback)
def test_attr_from_module(self):
# [attr from module]
create = 'pkg.__init__', 'pkg.mod2'
globals_ = {'__package__': 'pkg'}, {'__name__': 'pkg.mod1'}
def callback(global_):
self.__import__('pkg') # For __import__().
module = self.__import__('mod2', global_, fromlist=['attr'],
level=1)
self.assertEqual(module.__name__, 'pkg.mod2')
self.assertEqual(module.attr, 'pkg.mod2')
self.relative_import_test(create, globals_, callback)
def test_package_to_module(self):
# [package to module]
create = 'pkg.__init__', 'pkg.module'
globals_ = ({'__package__': 'pkg'},
{'__name__': 'pkg', '__path__': ['blah']})
def callback(global_):
self.__import__('pkg') # For __import__().
module = self.__import__('', global_, fromlist=['module'],
level=1)
self.assertEqual(module.__name__, 'pkg')
self.assertTrue(hasattr(module, 'module'))
self.assertEqual(module.module.attr, 'pkg.module')
self.relative_import_test(create, globals_, callback)
def test_module_to_package(self):
# [module to package]
create = 'pkg.__init__', 'pkg.module'
globals_ = {'__package__': 'pkg'}, {'__name__': 'pkg.module'}
def callback(global_):
self.__import__('pkg') # For __import__().
module = self.__import__('', global_, fromlist=['attr'], level=1)
self.assertEqual(module.__name__, 'pkg')
self.relative_import_test(create, globals_, callback)
def test_package_to_package(self):
# [package to package]
create = ('pkg.__init__', 'pkg.subpkg1.__init__',
'pkg.subpkg2.__init__')
globals_ = ({'__package__': 'pkg.subpkg1'},
{'__name__': 'pkg.subpkg1', '__path__': ['blah']})
def callback(global_):
module = self.__import__('', global_, fromlist=['subpkg2'],
level=2)
self.assertEqual(module.__name__, 'pkg')
self.assertTrue(hasattr(module, 'subpkg2'))
self.assertEqual(module.subpkg2.attr, 'pkg.subpkg2.__init__')
def test_deep_import(self):
# [deep import]
create = ['pkg.__init__']
for count in range(1,6):
create.append('{0}.pkg{1}.__init__'.format(
create[-1][:-len('.__init__')], count))
globals_ = ({'__package__': 'pkg.pkg1.pkg2.pkg3.pkg4.pkg5'},
{'__name__': 'pkg.pkg1.pkg2.pkg3.pkg4.pkg5',
'__path__': ['blah']})
def callback(global_):
self.__import__(globals_[0]['__package__'])
module = self.__import__('', global_, fromlist=['attr'], level=6)
self.assertEqual(module.__name__, 'pkg')
self.relative_import_test(create, globals_, callback)
def test_too_high_from_package(self):
# [too high from package]
create = ['top_level', 'pkg.__init__']
globals_ = ({'__package__': 'pkg'},
{'__name__': 'pkg', '__path__': ['blah']})
def callback(global_):
self.__import__('pkg')
with self.assertRaises(ValueError):
self.__import__('', global_, fromlist=['top_level'],
level=2)
self.relative_import_test(create, globals_, callback)
def test_too_high_from_module(self):
# [too high from module]
create = ['top_level', 'pkg.__init__', 'pkg.module']
globals_ = {'__package__': 'pkg'}, {'__name__': 'pkg.module'}
def callback(global_):
self.__import__('pkg')
with self.assertRaises(ValueError):
self.__import__('', global_, fromlist=['top_level'],
level=2)
self.relative_import_test(create, globals_, callback)
def test_empty_name_w_level_0(self):
# [empty name]
with self.assertRaises(ValueError):
self.__import__('')
def test_import_from_different_package(self):
# Test importing from a different package than the caller.
# in pkg.subpkg1.mod
# from ..subpkg2 import mod
create = ['__runpy_pkg__.__init__',
'__runpy_pkg__.__runpy_pkg__.__init__',
'__runpy_pkg__.uncle.__init__',
'__runpy_pkg__.uncle.cousin.__init__',
'__runpy_pkg__.uncle.cousin.nephew']
globals_ = {'__package__': '__runpy_pkg__.__runpy_pkg__'}
def callback(global_):
self.__import__('__runpy_pkg__.__runpy_pkg__')
module = self.__import__('uncle.cousin', globals_, {},
fromlist=['nephew'],
level=2)
self.assertEqual(module.__name__, '__runpy_pkg__.uncle.cousin')
self.relative_import_test(create, globals_, callback)
def test_import_relative_import_no_fromlist(self):
# Import a relative module w/ no fromlist.
create = ['crash.__init__', 'crash.mod']
globals_ = [{'__package__': 'crash', '__name__': 'crash'}]
def callback(global_):
self.__import__('crash')
mod = self.__import__('mod', global_, {}, [], 1)
self.assertEqual(mod.__name__, 'crash.mod')
self.relative_import_test(create, globals_, callback)
def test_relative_import_no_globals(self):
# No globals for a relative import is an error.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with self.assertRaises(KeyError):
self.__import__('sys', level=1)
def test_relative_import_no_package(self):
with self.assertRaises(ImportError):
self.__import__('a', {'__package__': '', '__spec__': None},
level=1)
def test_relative_import_no_package_exists_absolute(self):
with self.assertRaises(ImportError):
self.__import__('sys', {'__package__': '', '__spec__': None},
level=1)
(Frozen_RelativeImports,
Source_RelativeImports
) = util.test_both(RelativeImports, __import__=util.__import__)
if __name__ == '__main__':
unittest.main()
| 9,401 | 233 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/source/__main__.py | from . import load_tests
import unittest
unittest.main()
| 58 | 5 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/source/test_path_hook.py | from .. import util
machinery = util.import_importlib('importlib.machinery')
import unittest
class PathHookTest:
"""Test the path hook for source."""
def path_hook(self):
return self.machinery.FileFinder.path_hook((self.machinery.SourceFileLoader,
self.machinery.SOURCE_SUFFIXES))
def test_success(self):
with util.create_modules('dummy') as mapping:
self.assertTrue(hasattr(self.path_hook()(mapping['.root']),
'find_spec'))
def test_success_legacy(self):
with util.create_modules('dummy') as mapping:
self.assertTrue(hasattr(self.path_hook()(mapping['.root']),
'find_module'))
def test_empty_string(self):
# The empty string represents the cwd.
self.assertTrue(hasattr(self.path_hook()(''), 'find_spec'))
def test_empty_string_legacy(self):
# The empty string represents the cwd.
self.assertTrue(hasattr(self.path_hook()(''), 'find_module'))
(Frozen_PathHookTest,
Source_PathHooktest
) = util.test_both(PathHookTest, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| 1,190 | 42 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/source/test_case_sensitivity.py | """Test case-sensitivity (PEP 235)."""
from .. import util
importlib = util.import_importlib('importlib')
machinery = util.import_importlib('importlib.machinery')
import os
from test import support as test_support
import unittest
@util.case_insensitive_tests
class CaseSensitivityTest(util.CASEOKTestBase):
"""PEP 235 dictates that on case-preserving, case-insensitive file systems
that imports are case-sensitive unless the PYTHONCASEOK environment
variable is set."""
name = 'MoDuLe'
assert name != name.lower()
def finder(self, path):
return self.machinery.FileFinder(path,
(self.machinery.SourceFileLoader,
self.machinery.SOURCE_SUFFIXES),
(self.machinery.SourcelessFileLoader,
self.machinery.BYTECODE_SUFFIXES))
def sensitivity_test(self):
"""Look for a module with matching and non-matching sensitivity."""
sensitive_pkg = 'sensitive.{0}'.format(self.name)
insensitive_pkg = 'insensitive.{0}'.format(self.name.lower())
context = util.create_modules(insensitive_pkg, sensitive_pkg)
with context as mapping:
sensitive_path = os.path.join(mapping['.root'], 'sensitive')
insensitive_path = os.path.join(mapping['.root'], 'insensitive')
sensitive_finder = self.finder(sensitive_path)
insensitive_finder = self.finder(insensitive_path)
return self.find(sensitive_finder), self.find(insensitive_finder)
def test_sensitive(self):
with test_support.EnvironmentVarGuard() as env:
env.unset('PYTHONCASEOK')
self.caseok_env_changed(should_exist=False)
sensitive, insensitive = self.sensitivity_test()
self.assertIsNotNone(sensitive)
self.assertIn(self.name, sensitive.get_filename(self.name))
self.assertIsNone(insensitive)
def test_insensitive(self):
with test_support.EnvironmentVarGuard() as env:
env.set('PYTHONCASEOK', '1')
self.caseok_env_changed(should_exist=True)
sensitive, insensitive = self.sensitivity_test()
self.assertIsNotNone(sensitive)
self.assertIn(self.name, sensitive.get_filename(self.name))
self.assertIsNotNone(insensitive)
self.assertIn(self.name, insensitive.get_filename(self.name))
class CaseSensitivityTestPEP302(CaseSensitivityTest):
def find(self, finder):
return finder.find_module(self.name)
(Frozen_CaseSensitivityTestPEP302,
Source_CaseSensitivityTestPEP302
) = util.test_both(CaseSensitivityTestPEP302, importlib=importlib,
machinery=machinery)
class CaseSensitivityTestPEP451(CaseSensitivityTest):
def find(self, finder):
found = finder.find_spec(self.name)
return found.loader if found is not None else found
(Frozen_CaseSensitivityTestPEP451,
Source_CaseSensitivityTestPEP451
) = util.test_both(CaseSensitivityTestPEP451, importlib=importlib,
machinery=machinery)
if __name__ == '__main__':
unittest.main()
| 3,221 | 86 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/source/test_file_loader.py | from .. import abc
from .. import util
importlib = util.import_importlib('importlib')
importlib_abc = util.import_importlib('importlib.abc')
machinery = util.import_importlib('importlib.machinery')
importlib_util = util.import_importlib('importlib.util')
import errno
import marshal
import os
import py_compile
import shutil
import stat
import sys
import types
import unittest
import warnings
from test.support import make_legacy_pyc, unload
class SimpleTest(abc.LoaderTests):
"""Should have no issue importing a source module [basic]. And if there is
a syntax error, it should raise a SyntaxError [syntax error].
"""
def setUp(self):
self.name = 'spam'
self.filepath = os.path.join('ham', self.name + '.py')
self.loader = self.machinery.SourceFileLoader(self.name, self.filepath)
def test_load_module_API(self):
class Tester(self.abc.FileLoader):
def get_source(self, _): return 'attr = 42'
def is_package(self, _): return False
loader = Tester('blah', 'blah.py')
self.addCleanup(unload, 'blah')
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = loader.load_module() # Should not raise an exception.
def test_get_filename_API(self):
# If fullname is not set then assume self.path is desired.
class Tester(self.abc.FileLoader):
def get_code(self, _): pass
def get_source(self, _): pass
def is_package(self, _): pass
def module_repr(self, _): pass
path = 'some_path'
name = 'some_name'
loader = Tester(name, path)
self.assertEqual(path, loader.get_filename(name))
self.assertEqual(path, loader.get_filename())
self.assertEqual(path, loader.get_filename(None))
with self.assertRaises(ImportError):
loader.get_filename(name + 'XXX')
def test_equality(self):
other = self.machinery.SourceFileLoader(self.name, self.filepath)
self.assertEqual(self.loader, other)
def test_inequality(self):
other = self.machinery.SourceFileLoader('_' + self.name, self.filepath)
self.assertNotEqual(self.loader, other)
# [basic]
def test_module(self):
with util.create_modules('_temp') as mapping:
loader = self.machinery.SourceFileLoader('_temp', mapping['_temp'])
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = loader.load_module('_temp')
self.assertIn('_temp', sys.modules)
check = {'__name__': '_temp', '__file__': mapping['_temp'],
'__package__': ''}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
def test_package(self):
with util.create_modules('_pkg.__init__') as mapping:
loader = self.machinery.SourceFileLoader('_pkg',
mapping['_pkg.__init__'])
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = loader.load_module('_pkg')
self.assertIn('_pkg', sys.modules)
check = {'__name__': '_pkg', '__file__': mapping['_pkg.__init__'],
'__path__': [os.path.dirname(mapping['_pkg.__init__'])],
'__package__': '_pkg'}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
def test_lacking_parent(self):
with util.create_modules('_pkg.__init__', '_pkg.mod')as mapping:
loader = self.machinery.SourceFileLoader('_pkg.mod',
mapping['_pkg.mod'])
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = loader.load_module('_pkg.mod')
self.assertIn('_pkg.mod', sys.modules)
check = {'__name__': '_pkg.mod', '__file__': mapping['_pkg.mod'],
'__package__': '_pkg'}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
def fake_mtime(self, fxn):
"""Fake mtime to always be higher than expected."""
return lambda name: fxn(name) + 1
def test_module_reuse(self):
with util.create_modules('_temp') as mapping:
loader = self.machinery.SourceFileLoader('_temp', mapping['_temp'])
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = loader.load_module('_temp')
module_id = id(module)
module_dict_id = id(module.__dict__)
with open(mapping['_temp'], 'w') as file:
file.write("testing_var = 42\n")
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = loader.load_module('_temp')
self.assertIn('testing_var', module.__dict__,
"'testing_var' not in "
"{0}".format(list(module.__dict__.keys())))
self.assertEqual(module, sys.modules['_temp'])
self.assertEqual(id(module), module_id)
self.assertEqual(id(module.__dict__), module_dict_id)
def test_state_after_failure(self):
# A failed reload should leave the original module intact.
attributes = ('__file__', '__path__', '__package__')
value = '<test>'
name = '_temp'
with util.create_modules(name) as mapping:
orig_module = types.ModuleType(name)
for attr in attributes:
setattr(orig_module, attr, value)
with open(mapping[name], 'w') as file:
file.write('+++ bad syntax +++')
loader = self.machinery.SourceFileLoader('_temp', mapping['_temp'])
with self.assertRaises(SyntaxError):
loader.exec_module(orig_module)
for attr in attributes:
self.assertEqual(getattr(orig_module, attr), value)
with self.assertRaises(SyntaxError):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
loader.load_module(name)
for attr in attributes:
self.assertEqual(getattr(orig_module, attr), value)
# [syntax error]
def test_bad_syntax(self):
with util.create_modules('_temp') as mapping:
with open(mapping['_temp'], 'w') as file:
file.write('=')
loader = self.machinery.SourceFileLoader('_temp', mapping['_temp'])
with self.assertRaises(SyntaxError):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
loader.load_module('_temp')
self.assertNotIn('_temp', sys.modules)
def test_file_from_empty_string_dir(self):
# Loading a module found from an empty string entry on sys.path should
# not only work, but keep all attributes relative.
file_path = '_temp.py'
with open(file_path, 'w') as file:
file.write("# test file for importlib")
try:
with util.uncache('_temp'):
loader = self.machinery.SourceFileLoader('_temp', file_path)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
mod = loader.load_module('_temp')
self.assertEqual(file_path, mod.__file__)
self.assertEqual(self.util.cache_from_source(file_path),
mod.__cached__)
finally:
os.unlink(file_path)
pycache = os.path.dirname(self.util.cache_from_source(file_path))
if os.path.exists(pycache):
shutil.rmtree(pycache)
@util.writes_bytecode_files
def test_timestamp_overflow(self):
# When a modification timestamp is larger than 2**32, it should be
# truncated rather than raise an OverflowError.
with util.create_modules('_temp') as mapping:
source = mapping['_temp']
compiled = self.util.cache_from_source(source)
with open(source, 'w') as f:
f.write("x = 5")
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno != getattr(errno, 'EOVERFLOW', None):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
loader = self.machinery.SourceFileLoader('_temp', mapping['_temp'])
# PEP 451
module = types.ModuleType('_temp')
module.__spec__ = self.util.spec_from_loader('_temp', loader)
loader.exec_module(module)
self.assertEqual(module.x, 5)
self.assertTrue(os.path.exists(compiled))
os.unlink(compiled)
# PEP 302
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
mod = loader.load_module('_temp')
# Sanity checks.
self.assertEqual(mod.__cached__, compiled)
self.assertEqual(mod.x, 5)
# The pyc file was created.
self.assertTrue(os.path.exists(compiled))
def test_unloadable(self):
loader = self.machinery.SourceFileLoader('good name', {})
module = types.ModuleType('bad name')
module.__spec__ = self.machinery.ModuleSpec('bad name', loader)
with self.assertRaises(ImportError):
loader.exec_module(module)
with self.assertRaises(ImportError):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
loader.load_module('bad name')
(Frozen_SimpleTest,
Source_SimpleTest
) = util.test_both(SimpleTest, importlib=importlib, machinery=machinery,
abc=importlib_abc, util=importlib_util)
class BadBytecodeTest:
def import_(self, file, module_name):
raise NotImplementedError
def manipulate_bytecode(self, name, mapping, manipulator, *,
del_source=False):
"""Manipulate the bytecode of a module by passing it into a callable
that returns what to use as the new bytecode."""
try:
del sys.modules['_temp']
except KeyError:
pass
py_compile.compile(mapping[name])
if not del_source:
bytecode_path = self.util.cache_from_source(mapping[name])
else:
os.unlink(mapping[name])
bytecode_path = make_legacy_pyc(mapping[name])
if manipulator:
with open(bytecode_path, 'rb') as file:
bc = file.read()
new_bc = manipulator(bc)
with open(bytecode_path, 'wb') as file:
if new_bc is not None:
file.write(new_bc)
return bytecode_path
def _test_empty_file(self, test, *, del_source=False):
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: b'',
del_source=del_source)
test('_temp', mapping, bc_path)
@util.writes_bytecode_files
def _test_partial_magic(self, test, *, del_source=False):
# When their are less than 4 bytes to a .pyc, regenerate it if
# possible, else raise ImportError.
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:3],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_magic_only(self, test, *, del_source=False):
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:4],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_partial_timestamp(self, test, *, del_source=False):
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:7],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_partial_size(self, test, *, del_source=False):
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:11],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_no_marshal(self, *, del_source=False):
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:12],
del_source=del_source)
file_path = mapping['_temp'] if not del_source else bc_path
with self.assertRaises(EOFError):
self.import_(file_path, '_temp')
def _test_non_code_marshal(self, *, del_source=False):
with util.create_modules('_temp') as mapping:
bytecode_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:12] + marshal.dumps(b'abcd'),
del_source=del_source)
file_path = mapping['_temp'] if not del_source else bytecode_path
with self.assertRaises(ImportError) as cm:
self.import_(file_path, '_temp')
self.assertEqual(cm.exception.name, '_temp')
self.assertEqual(cm.exception.path, bytecode_path)
def _test_bad_marshal(self, *, del_source=False):
with util.create_modules('_temp') as mapping:
bytecode_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:12] + b'<test>',
del_source=del_source)
file_path = mapping['_temp'] if not del_source else bytecode_path
with self.assertRaises(EOFError):
self.import_(file_path, '_temp')
def _test_bad_magic(self, test, *, del_source=False):
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: b'\x00\x00\x00\x00' + bc[4:])
test('_temp', mapping, bc_path)
class BadBytecodeTestPEP451(BadBytecodeTest):
def import_(self, file, module_name):
loader = self.loader(module_name, file)
module = types.ModuleType(module_name)
module.__spec__ = self.util.spec_from_loader(module_name, loader)
loader.exec_module(module)
class BadBytecodeTestPEP302(BadBytecodeTest):
def import_(self, file, module_name):
loader = self.loader(module_name, file)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = loader.load_module(module_name)
self.assertIn(module_name, sys.modules)
class SourceLoaderBadBytecodeTest:
@classmethod
def setUpClass(cls):
cls.loader = cls.machinery.SourceFileLoader
@util.writes_bytecode_files
def test_empty_file(self):
# When a .pyc is empty, regenerate it if possible, else raise
# ImportError.
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as file:
self.assertGreater(len(file.read()), 12)
self._test_empty_file(test)
def test_partial_magic(self):
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as file:
self.assertGreater(len(file.read()), 12)
self._test_partial_magic(test)
@util.writes_bytecode_files
def test_magic_only(self):
# When there is only the magic number, regenerate the .pyc if possible,
# else raise EOFError.
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as file:
self.assertGreater(len(file.read()), 12)
self._test_magic_only(test)
@util.writes_bytecode_files
def test_bad_magic(self):
# When the magic number is different, the bytecode should be
# regenerated.
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as bytecode_file:
self.assertEqual(bytecode_file.read(4),
self.util.MAGIC_NUMBER)
self._test_bad_magic(test)
@util.writes_bytecode_files
def test_partial_timestamp(self):
# When the timestamp is partial, regenerate the .pyc, else
# raise EOFError.
def test(name, mapping, bc_path):
self.import_(mapping[name], name)
with open(bc_path, 'rb') as file:
self.assertGreater(len(file.read()), 12)
self._test_partial_timestamp(test)
@util.writes_bytecode_files
def test_partial_size(self):
# When the size is partial, regenerate the .pyc, else
# raise EOFError.
def test(name, mapping, bc_path):
self.import_(mapping[name], name)
with open(bc_path, 'rb') as file:
self.assertGreater(len(file.read()), 12)
self._test_partial_size(test)
@util.writes_bytecode_files
def test_no_marshal(self):
# When there is only the magic number and timestamp, raise EOFError.
self._test_no_marshal()
@util.writes_bytecode_files
def test_non_code_marshal(self):
self._test_non_code_marshal()
# XXX ImportError when sourceless
# [bad marshal]
@util.writes_bytecode_files
def test_bad_marshal(self):
# Bad marshal data should raise a ValueError.
self._test_bad_marshal()
# [bad timestamp]
@util.writes_bytecode_files
def test_old_timestamp(self):
# When the timestamp is older than the source, bytecode should be
# regenerated.
zeros = b'\x00\x00\x00\x00'
with util.create_modules('_temp') as mapping:
py_compile.compile(mapping['_temp'])
bytecode_path = self.util.cache_from_source(mapping['_temp'])
with open(bytecode_path, 'r+b') as bytecode_file:
bytecode_file.seek(4)
bytecode_file.write(zeros)
self.import_(mapping['_temp'], '_temp')
source_mtime = os.path.getmtime(mapping['_temp'])
source_timestamp = self.importlib._w_long(source_mtime)
with open(bytecode_path, 'rb') as bytecode_file:
bytecode_file.seek(4)
self.assertEqual(bytecode_file.read(4), source_timestamp)
# [bytecode read-only]
@util.writes_bytecode_files
def test_read_only_bytecode(self):
# When bytecode is read-only but should be rewritten, fail silently.
with util.create_modules('_temp') as mapping:
# Create bytecode that will need to be re-created.
py_compile.compile(mapping['_temp'])
bytecode_path = self.util.cache_from_source(mapping['_temp'])
with open(bytecode_path, 'r+b') as bytecode_file:
bytecode_file.seek(0)
bytecode_file.write(b'\x00\x00\x00\x00')
# Make the bytecode read-only.
os.chmod(bytecode_path,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
try:
# Should not raise OSError!
self.import_(mapping['_temp'], '_temp')
finally:
# Make writable for eventual clean-up.
os.chmod(bytecode_path, stat.S_IWUSR)
class SourceLoaderBadBytecodeTestPEP451(
SourceLoaderBadBytecodeTest, BadBytecodeTestPEP451):
pass
(Frozen_SourceBadBytecodePEP451,
Source_SourceBadBytecodePEP451
) = util.test_both(SourceLoaderBadBytecodeTestPEP451, importlib=importlib,
machinery=machinery, abc=importlib_abc,
util=importlib_util)
class SourceLoaderBadBytecodeTestPEP302(
SourceLoaderBadBytecodeTest, BadBytecodeTestPEP302):
pass
(Frozen_SourceBadBytecodePEP302,
Source_SourceBadBytecodePEP302
) = util.test_both(SourceLoaderBadBytecodeTestPEP302, importlib=importlib,
machinery=machinery, abc=importlib_abc,
util=importlib_util)
class SourcelessLoaderBadBytecodeTest:
@classmethod
def setUpClass(cls):
cls.loader = cls.machinery.SourcelessFileLoader
def test_empty_file(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(ImportError) as cm:
self.import_(bytecode_path, name)
self.assertEqual(cm.exception.name, name)
self.assertEqual(cm.exception.path, bytecode_path)
self._test_empty_file(test, del_source=True)
def test_partial_magic(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(ImportError) as cm:
self.import_(bytecode_path, name)
self.assertEqual(cm.exception.name, name)
self.assertEqual(cm.exception.path, bytecode_path)
self._test_partial_magic(test, del_source=True)
def test_magic_only(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_magic_only(test, del_source=True)
def test_bad_magic(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(ImportError) as cm:
self.import_(bytecode_path, name)
self.assertEqual(cm.exception.name, name)
self.assertEqual(cm.exception.path, bytecode_path)
self._test_bad_magic(test, del_source=True)
def test_partial_timestamp(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_partial_timestamp(test, del_source=True)
def test_partial_size(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_partial_size(test, del_source=True)
def test_no_marshal(self):
self._test_no_marshal(del_source=True)
def test_non_code_marshal(self):
self._test_non_code_marshal(del_source=True)
class SourcelessLoaderBadBytecodeTestPEP451(SourcelessLoaderBadBytecodeTest,
BadBytecodeTestPEP451):
pass
(Frozen_SourcelessBadBytecodePEP451,
Source_SourcelessBadBytecodePEP451
) = util.test_both(SourcelessLoaderBadBytecodeTestPEP451, importlib=importlib,
machinery=machinery, abc=importlib_abc,
util=importlib_util)
class SourcelessLoaderBadBytecodeTestPEP302(SourcelessLoaderBadBytecodeTest,
BadBytecodeTestPEP302):
pass
(Frozen_SourcelessBadBytecodePEP302,
Source_SourcelessBadBytecodePEP302
) = util.test_both(SourcelessLoaderBadBytecodeTestPEP302, importlib=importlib,
machinery=machinery, abc=importlib_abc,
util=importlib_util)
if __name__ == '__main__':
unittest.main()
| 24,376 | 603 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/source/test_source_encoding.py | from .. import util
machinery = util.import_importlib('importlib.machinery')
import codecs
import importlib.util
import re
import types
# Because sys.path gets essentially blanked, need to have unicodedata already
# imported for the parser to use.
import unicodedata
import unittest
import warnings
CODING_RE = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
class EncodingTest:
"""PEP 3120 makes UTF-8 the default encoding for source code
[default encoding].
PEP 263 specifies how that can change on a per-file basis. Either the first
or second line can contain the encoding line [encoding first line]
encoding second line]. If the file has the BOM marker it is considered UTF-8
implicitly [BOM]. If any encoding is specified it must be UTF-8, else it is
an error [BOM and utf-8][BOM conflict].
"""
variable = '\u00fc'
character = '\u00c9'
source_line = "{0} = '{1}'\n".format(variable, character)
module_name = '_temp'
def run_test(self, source):
with util.create_modules(self.module_name) as mapping:
with open(mapping[self.module_name], 'wb') as file:
file.write(source)
loader = self.machinery.SourceFileLoader(self.module_name,
mapping[self.module_name])
return self.load(loader)
def create_source(self, encoding):
encoding_line = "# coding={0}".format(encoding)
assert CODING_RE.match(encoding_line)
source_lines = [encoding_line.encode('utf-8')]
source_lines.append(self.source_line.encode(encoding))
return b'\n'.join(source_lines)
def test_non_obvious_encoding(self):
# Make sure that an encoding that has never been a standard one for
# Python works.
encoding_line = "# coding=koi8-r"
assert CODING_RE.match(encoding_line)
source = "{0}\na=42\n".format(encoding_line).encode("koi8-r")
self.run_test(source)
# [default encoding]
def test_default_encoding(self):
self.run_test(self.source_line.encode('utf-8'))
# [encoding first line]
def test_encoding_on_first_line(self):
encoding = 'Latin-1'
source = self.create_source(encoding)
self.run_test(source)
# [encoding second line]
def test_encoding_on_second_line(self):
source = b"#/usr/bin/python\n" + self.create_source('Latin-1')
self.run_test(source)
# [BOM]
def test_bom(self):
self.run_test(codecs.BOM_UTF8 + self.source_line.encode('utf-8'))
# [BOM and utf-8]
def test_bom_and_utf_8(self):
source = codecs.BOM_UTF8 + self.create_source('utf-8')
self.run_test(source)
# [BOM conflict]
def test_bom_conflict(self):
source = codecs.BOM_UTF8 + self.create_source('latin-1')
with self.assertRaises(SyntaxError):
self.run_test(source)
class EncodingTestPEP451(EncodingTest):
def load(self, loader):
module = types.ModuleType(self.module_name)
module.__spec__ = importlib.util.spec_from_loader(self.module_name, loader)
loader.exec_module(module)
return module
(Frozen_EncodingTestPEP451,
Source_EncodingTestPEP451
) = util.test_both(EncodingTestPEP451, machinery=machinery)
class EncodingTestPEP302(EncodingTest):
def load(self, loader):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
return loader.load_module(self.module_name)
(Frozen_EncodingTestPEP302,
Source_EncodingTestPEP302
) = util.test_both(EncodingTestPEP302, machinery=machinery)
class LineEndingTest:
r"""Source written with the three types of line endings (\n, \r\n, \r)
need to be readable [cr][crlf][lf]."""
def run_test(self, line_ending):
module_name = '_temp'
source_lines = [b"a = 42", b"b = -13", b'']
source = line_ending.join(source_lines)
with util.create_modules(module_name) as mapping:
with open(mapping[module_name], 'wb') as file:
file.write(source)
loader = self.machinery.SourceFileLoader(module_name,
mapping[module_name])
return self.load(loader, module_name)
# [cr]
def test_cr(self):
self.run_test(b'\r')
# [crlf]
def test_crlf(self):
self.run_test(b'\r\n')
# [lf]
def test_lf(self):
self.run_test(b'\n')
class LineEndingTestPEP451(LineEndingTest):
def load(self, loader, module_name):
module = types.ModuleType(module_name)
module.__spec__ = importlib.util.spec_from_loader(module_name, loader)
loader.exec_module(module)
return module
(Frozen_LineEndingTestPEP451,
Source_LineEndingTestPEP451
) = util.test_both(LineEndingTestPEP451, machinery=machinery)
class LineEndingTestPEP302(LineEndingTest):
def load(self, loader, module_name):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
return loader.load_module(module_name)
(Frozen_LineEndingTestPEP302,
Source_LineEndingTestPEP302
) = util.test_both(LineEndingTestPEP302, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| 5,330 | 176 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/source/__init__.py | import os
from test.support import load_package_tests
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)
| 142 | 6 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/test/test_importlib/source/test_finder.py | from .. import abc
from .. import util
machinery = util.import_importlib('importlib.machinery')
import errno
import os
import py_compile
import stat
import sys
import tempfile
from test.support import make_legacy_pyc
import unittest
import warnings
class FinderTests(abc.FinderTests):
"""For a top-level module, it should just be found directly in the
directory being searched. This is true for a directory with source
[top-level source], bytecode [top-level bc], or both [top-level both].
There is also the possibility that it is a package [top-level package], in
which case there will be a directory with the module name and an
__init__.py file. If there is a directory without an __init__.py an
ImportWarning is returned [empty dir].
For sub-modules and sub-packages, the same happens as above but only use
the tail end of the name [sub module] [sub package] [sub empty].
When there is a conflict between a package and module having the same name
in the same directory, the package wins out [package over module]. This is
so that imports of modules within the package can occur rather than trigger
an import error.
When there is a package and module with the same name, always pick the
package over the module [package over module]. This is so that imports from
the package have the possibility of succeeding.
"""
def get_finder(self, root):
loader_details = [(self.machinery.SourceFileLoader,
self.machinery.SOURCE_SUFFIXES),
(self.machinery.SourcelessFileLoader,
self.machinery.BYTECODE_SUFFIXES)]
return self.machinery.FileFinder(root, *loader_details)
def import_(self, root, module):
finder = self.get_finder(root)
return self._find(finder, module, loader_only=True)
def run_test(self, test, create=None, *, compile_=None, unlink=None):
"""Test the finding of 'test' with the creation of modules listed in
'create'.
Any names listed in 'compile_' are byte-compiled. Modules
listed in 'unlink' have their source files deleted.
"""
if create is None:
create = {test}
with util.create_modules(*create) as mapping:
if compile_:
for name in compile_:
py_compile.compile(mapping[name])
if unlink:
for name in unlink:
os.unlink(mapping[name])
try:
make_legacy_pyc(mapping[name])
except OSError as error:
# Some tests do not set compile_=True so the source
# module will not get compiled and there will be no
# PEP 3147 pyc file to rename.
if error.errno != errno.ENOENT:
raise
loader = self.import_(mapping['.root'], test)
self.assertTrue(hasattr(loader, 'load_module'))
return loader
def test_module(self):
# [top-level source]
self.run_test('top_level')
# [top-level bc]
self.run_test('top_level', compile_={'top_level'},
unlink={'top_level'})
# [top-level both]
self.run_test('top_level', compile_={'top_level'})
# [top-level package]
def test_package(self):
# Source.
self.run_test('pkg', {'pkg.__init__'})
# Bytecode.
self.run_test('pkg', {'pkg.__init__'}, compile_={'pkg.__init__'},
unlink={'pkg.__init__'})
# Both.
self.run_test('pkg', {'pkg.__init__'}, compile_={'pkg.__init__'})
# [sub module]
def test_module_in_package(self):
with util.create_modules('pkg.__init__', 'pkg.sub') as mapping:
pkg_dir = os.path.dirname(mapping['pkg.__init__'])
loader = self.import_(pkg_dir, 'pkg.sub')
self.assertTrue(hasattr(loader, 'load_module'))
# [sub package]
def test_package_in_package(self):
context = util.create_modules('pkg.__init__', 'pkg.sub.__init__')
with context as mapping:
pkg_dir = os.path.dirname(mapping['pkg.__init__'])
loader = self.import_(pkg_dir, 'pkg.sub')
self.assertTrue(hasattr(loader, 'load_module'))
# [package over modules]
def test_package_over_module(self):
name = '_temp'
loader = self.run_test(name, {'{0}.__init__'.format(name), name})
self.assertIn('__init__', loader.get_filename(name))
def test_failure(self):
with util.create_modules('blah') as mapping:
nothing = self.import_(mapping['.root'], 'sdfsadsadf')
self.assertIsNone(nothing)
def test_empty_string_for_dir(self):
# The empty string from sys.path means to search in the cwd.
finder = self.machinery.FileFinder('', (self.machinery.SourceFileLoader,
self.machinery.SOURCE_SUFFIXES))
with open('mod.py', 'w') as file:
file.write("# test file for importlib")
try:
loader = self._find(finder, 'mod', loader_only=True)
self.assertTrue(hasattr(loader, 'load_module'))
finally:
os.unlink('mod.py')
def test_invalidate_caches(self):
# invalidate_caches() should reset the mtime.
finder = self.machinery.FileFinder('', (self.machinery.SourceFileLoader,
self.machinery.SOURCE_SUFFIXES))
finder._path_mtime = 42
finder.invalidate_caches()
self.assertEqual(finder._path_mtime, -1)
# Regression test for http://bugs.python.org/issue14846
def test_dir_removal_handling(self):
mod = 'mod'
with util.create_modules(mod) as mapping:
finder = self.get_finder(mapping['.root'])
found = self._find(finder, 'mod', loader_only=True)
self.assertIsNotNone(found)
found = self._find(finder, 'mod', loader_only=True)
self.assertIsNone(found)
@unittest.skipUnless(sys.platform != 'win32',
'os.chmod() does not support the needed arguments under Windows')
def test_no_read_directory(self):
# Issue #16730
tempdir = tempfile.TemporaryDirectory()
original_mode = os.stat(tempdir.name).st_mode
def cleanup(tempdir):
"""Cleanup function for the temporary directory.
Since we muck with the permissions, we want to set them back to
their original values to make sure the directory can be properly
cleaned up.
"""
os.chmod(tempdir.name, original_mode)
# If this is not explicitly called then the __del__ method is used,
# but since already mucking around might as well explicitly clean
# up.
tempdir.__exit__(None, None, None)
self.addCleanup(cleanup, tempdir)
os.chmod(tempdir.name, stat.S_IWUSR | stat.S_IXUSR)
finder = self.get_finder(tempdir.name)
found = self._find(finder, 'doesnotexist')
self.assertEqual(found, self.NOT_FOUND)
def test_ignore_file(self):
# If a directory got changed to a file from underneath us, then don't
# worry about looking for submodules.
with tempfile.NamedTemporaryFile() as file_obj:
finder = self.get_finder(file_obj.name)
found = self._find(finder, 'doesnotexist')
self.assertEqual(found, self.NOT_FOUND)
class FinderTestsPEP451(FinderTests):
NOT_FOUND = None
def _find(self, finder, name, loader_only=False):
spec = finder.find_spec(name)
return spec.loader if spec is not None else spec
(Frozen_FinderTestsPEP451,
Source_FinderTestsPEP451
) = util.test_both(FinderTestsPEP451, machinery=machinery)
class FinderTestsPEP420(FinderTests):
NOT_FOUND = (None, [])
def _find(self, finder, name, loader_only=False):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
loader_portions = finder.find_loader(name)
return loader_portions[0] if loader_only else loader_portions
(Frozen_FinderTestsPEP420,
Source_FinderTestsPEP420
) = util.test_both(FinderTestsPEP420, machinery=machinery)
class FinderTestsPEP302(FinderTests):
NOT_FOUND = None
def _find(self, finder, name, loader_only=False):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
return finder.find_module(name)
(Frozen_FinderTestsPEP302,
Source_FinderTestsPEP302
) = util.test_both(FinderTestsPEP302, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| 8,775 | 237 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/compat.py | """Compatibility helpers for the different Python versions."""
import sys
PY34 = sys.version_info >= (3, 4)
PY35 = sys.version_info >= (3, 5)
PY352 = sys.version_info >= (3, 5, 2)
def flatten_list_bytes(list_of_data):
"""Concatenate a sequence of bytes-like objects."""
if not PY34:
# On Python 3.3 and older, bytes.join() doesn't handle
# memoryview.
list_of_data = (
bytes(data) if isinstance(data, memoryview) else data
for data in list_of_data)
return b''.join(list_of_data)
| 543 | 19 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/base_subprocess.py | import collections
import subprocess
import warnings
from . import compat
from . import protocols
from . import transports
from .coroutines import coroutine
from .log import logger
class BaseSubprocessTransport(transports.SubprocessTransport):
def __init__(self, loop, protocol, args, shell,
stdin, stdout, stderr, bufsize,
waiter=None, extra=None, **kwargs):
super().__init__(extra)
self._closed = False
self._protocol = protocol
self._loop = loop
self._proc = None
self._pid = None
self._returncode = None
self._exit_waiters = []
self._pending_calls = collections.deque()
self._pipes = {}
self._finished = False
if stdin == subprocess.PIPE:
self._pipes[0] = None
if stdout == subprocess.PIPE:
self._pipes[1] = None
if stderr == subprocess.PIPE:
self._pipes[2] = None
# Create the child process: set the _proc attribute
try:
self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
stderr=stderr, bufsize=bufsize, **kwargs)
except:
self.close()
raise
self._pid = self._proc.pid
self._extra['subprocess'] = self._proc
if self._loop.get_debug():
if isinstance(args, (bytes, str)):
program = args
else:
program = args[0]
logger.debug('process %r created: pid %s',
program, self._pid)
self._loop.create_task(self._connect_pipes(waiter))
def __repr__(self):
info = [self.__class__.__name__]
if self._closed:
info.append('closed')
if self._pid is not None:
info.append('pid=%s' % self._pid)
if self._returncode is not None:
info.append('returncode=%s' % self._returncode)
elif self._pid is not None:
info.append('running')
else:
info.append('not started')
stdin = self._pipes.get(0)
if stdin is not None:
info.append('stdin=%s' % stdin.pipe)
stdout = self._pipes.get(1)
stderr = self._pipes.get(2)
if stdout is not None and stderr is stdout:
info.append('stdout=stderr=%s' % stdout.pipe)
else:
if stdout is not None:
info.append('stdout=%s' % stdout.pipe)
if stderr is not None:
info.append('stderr=%s' % stderr.pipe)
return '<%s>' % ' '.join(info)
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
raise NotImplementedError
def set_protocol(self, protocol):
self._protocol = protocol
def get_protocol(self):
return self._protocol
def is_closing(self):
return self._closed
def close(self):
if self._closed:
return
self._closed = True
for proto in self._pipes.values():
if proto is None:
continue
proto.pipe.close()
if (self._proc is not None
# the child process finished?
and self._returncode is None
# the child process finished but the transport was not notified yet?
and self._proc.poll() is None
):
if self._loop.get_debug():
logger.warning('Close running child process: kill %r', self)
try:
self._proc.kill()
except ProcessLookupError:
pass
# Don't clear the _proc reference yet: _post_init() may still run
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if compat.PY34:
def __del__(self):
if not self._closed:
warnings.warn("unclosed transport %r" % self, ResourceWarning,
source=self)
self.close()
def get_pid(self):
return self._pid
def get_returncode(self):
return self._returncode
def get_pipe_transport(self, fd):
if fd in self._pipes:
return self._pipes[fd].pipe
else:
return None
def _check_proc(self):
if self._proc is None:
raise ProcessLookupError()
def send_signal(self, signal):
self._check_proc()
self._proc.send_signal(signal)
def terminate(self):
self._check_proc()
self._proc.terminate()
def kill(self):
self._check_proc()
self._proc.kill()
@coroutine
def _connect_pipes(self, waiter):
try:
proc = self._proc
loop = self._loop
if proc.stdin is not None:
_, pipe = yield from loop.connect_write_pipe(
lambda: WriteSubprocessPipeProto(self, 0),
proc.stdin)
self._pipes[0] = pipe
if proc.stdout is not None:
_, pipe = yield from loop.connect_read_pipe(
lambda: ReadSubprocessPipeProto(self, 1),
proc.stdout)
self._pipes[1] = pipe
if proc.stderr is not None:
_, pipe = yield from loop.connect_read_pipe(
lambda: ReadSubprocessPipeProto(self, 2),
proc.stderr)
self._pipes[2] = pipe
assert self._pending_calls is not None
loop.call_soon(self._protocol.connection_made, self)
for callback, data in self._pending_calls:
loop.call_soon(callback, *data)
self._pending_calls = None
except Exception as exc:
if waiter is not None and not waiter.cancelled():
waiter.set_exception(exc)
else:
if waiter is not None and not waiter.cancelled():
waiter.set_result(None)
def _call(self, cb, *data):
if self._pending_calls is not None:
self._pending_calls.append((cb, data))
else:
self._loop.call_soon(cb, *data)
def _pipe_connection_lost(self, fd, exc):
self._call(self._protocol.pipe_connection_lost, fd, exc)
self._try_finish()
def _pipe_data_received(self, fd, data):
self._call(self._protocol.pipe_data_received, fd, data)
def _process_exited(self, returncode):
assert returncode is not None, returncode
assert self._returncode is None, self._returncode
if self._loop.get_debug():
logger.info('%r exited with return code %r',
self, returncode)
self._returncode = returncode
if self._proc.returncode is None:
# asyncio uses a child watcher: copy the status into the Popen
# object. On Python 3.6, it is required to avoid a ResourceWarning.
self._proc.returncode = returncode
self._call(self._protocol.process_exited)
self._try_finish()
# wake up futures waiting for wait()
for waiter in self._exit_waiters:
if not waiter.cancelled():
waiter.set_result(returncode)
self._exit_waiters = None
@coroutine
def _wait(self):
"""Wait until the process exit and return the process return code.
This method is a coroutine."""
if self._returncode is not None:
return self._returncode
waiter = self._loop.create_future()
self._exit_waiters.append(waiter)
return (yield from waiter)
def _try_finish(self):
assert not self._finished
if self._returncode is None:
return
if all(p is not None and p.disconnected
for p in self._pipes.values()):
self._finished = True
self._call(self._call_connection_lost, None)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._loop = None
self._proc = None
self._protocol = None
class WriteSubprocessPipeProto(protocols.BaseProtocol):
def __init__(self, proc, fd):
self.proc = proc
self.fd = fd
self.pipe = None
self.disconnected = False
def connection_made(self, transport):
self.pipe = transport
def __repr__(self):
return ('<%s fd=%s pipe=%r>'
% (self.__class__.__name__, self.fd, self.pipe))
def connection_lost(self, exc):
self.disconnected = True
self.proc._pipe_connection_lost(self.fd, exc)
self.proc = None
def pause_writing(self):
self.proc._protocol.pause_writing()
def resume_writing(self):
self.proc._protocol.resume_writing()
class ReadSubprocessPipeProto(WriteSubprocessPipeProto,
protocols.Protocol):
def data_received(self, data):
self.proc._pipe_data_received(self.fd, data)
| 9,096 | 294 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/base_events.py | """Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import concurrent.futures
import heapq
import inspect
import itertools
import logging
import os
import socket
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
from . import compat
from . import coroutines
from . import events
from . import futures
from . import tasks
from .coroutines import coroutine
from .log import logger
__all__ = ['BaseEventLoop']
# Minimum number of _scheduled timer handles before cleanup of
# cancelled handles is performed.
_MIN_SCHEDULED_TIMER_HANDLES = 100
# Minimum fraction of _scheduled timer handles that are cancelled
# before cleanup of cancelled handles is performed.
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
# Exceptions which must not call the exception handler in fatal error
# methods (_fatal_error())
_FATAL_ERROR_IGNORE = (BrokenPipeError,
ConnectionResetError, ConnectionAbortedError)
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
# Maximum timeout passed to select to avoid OS limitations
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s
# *reuse_address* parameter
_unset = object()
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _is_stream_socket(sock_type):
if hasattr(socket, 'SOCK_NONBLOCK'):
# Linux's socket.type is a bitmask that can include extra info
# about socket (like SOCK_NONBLOCK bit), therefore we can't do simple
# `sock_type == socket.SOCK_STREAM`, see
# https://github.com/torvalds/linux/blob/v4.13/include/linux/net.h#L77
# for more details.
return (sock_type & 0xF) == socket.SOCK_STREAM
else:
return sock_type == socket.SOCK_STREAM
def _is_dgram_socket(sock_type):
if hasattr(socket, 'SOCK_NONBLOCK'):
# See the comment in `_is_stream_socket`.
return (sock_type & 0xF) == socket.SOCK_DGRAM
else:
return sock_type == socket.SOCK_DGRAM
def _ipaddr_info(host, port, family, type, proto):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if _is_stream_socket(type):
proto = socket.IPPROTO_TCP
elif _is_dgram_socket(type):
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, 0, 0)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _ensure_resolved(address, *, family=0, type=socket.SOCK_STREAM, proto=0,
flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto)
if info is not None:
# "host" is already a resolved IP.
fut = loop.create_future()
fut.set_result([info])
return fut
else:
return loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
_is_stream_socket(sock.type) and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
def _run_until_complete_cb(fut):
exc = fut._exception
if (isinstance(exc, BaseException)
and not isinstance(exc, Exception)):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
fut._loop.stop()
class Server(events.AbstractServer):
def __init__(self, loop, sockets):
self._loop = loop
self.sockets = sockets
self._active_count = 0
self._waiters = []
def __repr__(self):
return '<%s sockets=%r>' % (self.__class__.__name__, self.sockets)
def _attach(self):
assert self.sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self.sockets is None:
self._wakeup()
def close(self):
sockets = self.sockets
if sockets is None:
return
self.sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
if self._active_count == 0:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
@coroutine
def wait_closed(self):
if self.sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
yield from waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug((not sys.flags.ignore_environment
and bool(os.environ.get('PYTHONASYNCIODEBUG'))))
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_wrapper_set = False
if hasattr(sys, 'get_asyncgen_hooks'):
# Python >= 3.6
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
else:
self._asyncgens = None
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
def __repr__(self):
return ('<%s running=%s closed=%s debug=%s>'
% (self.__class__.__name__, self.is_running(),
self.is_closed(), self.get_debug()))
def create_future(self):
"""Create a Future object attached to the loop."""
return futures.Future(loop=self)
def create_task(self, coro):
"""Schedule a coroutine object.
Return a task object.
"""
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self)
if task._source_traceback:
del task._source_traceback[-1]
else:
task = self._task_factory(self, coro)
return task
def set_task_factory(self, factory):
"""Set a task factory that will be used by loop.create_task().
If factory is None the default task factory will be set.
If factory is a callable, it should have a signature matching
'(loop, coro)', where 'loop' will be a reference to the active
event loop, 'coro' will be a coroutine object. The callable
must return a Future.
"""
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
"""Return a task factory, or None if the default one is in use."""
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
@coroutine
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _write_to_self(self):
"""Write a byte to self-pipe, to wake up the event loop.
This may be called from a different thread.
The subclass is responsible for implementing the self-pipe.
"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
"asynchronous generator {!r} was scheduled after "
"loop.shutdown_asyncgens() call".format(agen),
ResourceWarning, source=self)
self._asyncgens.add(agen)
@coroutine
def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators."""
self._asyncgens_shutdown_called = True
if self._asyncgens is None or not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
# generators alive.
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
shutdown_coro = tasks.gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True,
loop=self)
results = yield from shutdown_coro
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': 'an error occurred during closing of '
'asynchronous generator {!r}'.format(agen),
'exception': result,
'asyncgen': agen
})
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
self._set_coroutine_wrapper(self._debug)
self._thread_id = threading.get_ident()
if self._asyncgens is not None:
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_wrapper(False)
if self._asyncgens is not None:
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
# local task.
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback already scheduled will still run. This simply informs
run_forever to stop looping after a complete iteration.
"""
self._stopping = True
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if compat.PY34:
def __del__(self):
if not self.is_closed():
warnings.warn("unclosed event loop %r" % self, ResourceWarning,
source=self)
if not self.is_running():
self.close()
def is_running(self):
"""Returns True if the event loop is running."""
return (self._thread_id is not None)
def time(self):
"""Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
return time.monotonic()
def call_later(self, delay, callback, *args):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
timer = self.call_at(self.time() + delay, callback, *args)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
"coroutines cannot be used with {}()".format(method))
if not callable(callback):
raise TypeError(
'a callable object was expected by {}(), got {!r}'.format(
method, callback))
def _call_soon(self, callback, args):
handle = events.Handle(callback, args, self)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
"""Check that the current thread is the thread running the event loop.
Non-thread-safe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args):
"""Like call_soon(), but thread-safe."""
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor()
self._default_executor = executor
return futures.wrap_future(executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = ["%s:%r" % (host, port)]
if family:
msg.append('family=%r' % family)
if type:
msg.append('type=%r' % type)
if proto:
msg.append('proto=%r' % proto)
if flags:
msg.append('flags=%r' % flags)
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = ('Getting address info %s took %.3f ms: %r'
% (msg, dt * 1e3, addrinfo))
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
return self.run_in_executor(None, self._getaddrinfo_debug,
host, port, family, type, proto, flags)
else:
return self.run_in_executor(None, socket.getaddrinfo,
host, port, family, type, proto, flags)
def getnameinfo(self, sockaddr, flags=0):
return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags)
@coroutine
def create_connection(self, protocol_factory, host=None, port=None, *,
ssl=None, family=0, proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None):
"""Connect to a TCP server.
Create a streaming transport connection to a given Internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
f1 = _ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
fs = [f1]
if local_addr is not None:
f2 = _ensure_resolved(local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
fs.append(f2)
else:
f2 = None
yield from tasks.wait(fs, loop=self)
infos = f1.result()
if not infos:
raise OSError('getaddrinfo() returned empty list')
if f2 is not None:
laddr_infos = f2.result()
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
exceptions = []
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
if f2 is not None:
for _, _, _, _, laddr in laddr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
exc = OSError(
exc.errno, 'error while '
'attempting to bind on address '
'{!r}: {}'.format(
laddr, exc.strerror.lower()))
exceptions.append(exc)
else:
sock.close()
sock = None
continue
if self._debug:
logger.debug("connect %r to %r", sock, address)
yield from self.sock_connect(sock, address)
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if not _is_stream_socket(sock.type):
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
'A Stream Socket was expected, got {!r}'.format(sock))
transport, protocol = yield from self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
@coroutine
def _create_connection_transport(self, sock, protocol_factory, ssl,
server_hostname, server_side=False):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
yield from waiter
except:
transport.close()
raise
return transport, protocol
@coroutine
def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=_unset, reuse_port=None,
allow_broadcast=None, sock=None):
"""Create datagram connection."""
if sock is not None:
if not _is_dgram_socket(sock.type):
raise ValueError(
'A UDP Socket was expected, got {!r}'.format(sock))
if (local_addr or remote_addr or
family or proto or flags or
reuse_port or allow_broadcast):
# show the problematic kwargs in exception msg
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_address=reuse_address, reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(
'{}={}'.format(k, v) for k, v in opts.items() if v)
raise ValueError(
'socket modifier keyword arguments can not be used '
'when sock is specified. ({})'.format(problems))
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
else:
# join address by (family, protocol)
addr_infos = collections.OrderedDict()
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = yield from _ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
# bpo-37228
if reuse_address is not _unset:
if reuse_address:
raise ValueError("Passing `reuse_address=True` is no "
"longer supported, as the usage of "
"SO_REUSEPORT in UDP poses a significant "
"security concern.")
else:
warnings.warn("The *reuse_address* parameter has been "
"deprecated as of 3.6.10 and is scheduled "
"for removal in 3.11.", DeprecationWarning,
stacklevel=2)
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
yield from self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
yield from waiter
except:
transport.close()
raise
return transport, protocol
@coroutine
def _create_server_getaddrinfo(self, host, port, family, flags):
infos = yield from _ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo({!r}) returned empty list'.format(host))
return infos
@coroutine
def create_server(self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None):
"""Create a TCP server.
The host parameter can be a string, in that case the TCP server is bound
to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = yield from tasks.gather(*fs, loop=self)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower()))
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if not _is_stream_socket(sock.type):
raise ValueError(
'A Stream Socket was expected, got {!r}'.format(sock))
sockets = [sock]
server = Server(self, sockets)
for sock in sockets:
sock.listen(backlog)
sock.setblocking(False)
self._start_serving(protocol_factory, sock, ssl, server, backlog)
if self._debug:
logger.info("%r is serving", server)
return server
@coroutine
def connect_accepted_socket(self, protocol_factory, sock, *, ssl=None):
"""Handle an accepted connection.
This is used by servers that accept connections outside of
asyncio but that use asyncio to handle connections.
This method is a coroutine. When completed, the coroutine
returns a (transport, protocol) pair.
"""
if not _is_stream_socket(sock.type):
raise ValueError(
'A Stream Socket was expected, got {!r}'.format(sock))
transport, protocol = yield from self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
@coroutine
def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
yield from waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
@coroutine
def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
yield from waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append('stdin=%s' % _format_pipe(stdin))
if stdout is not None and stderr == subprocess.STDOUT:
info.append('stdout=stderr=%s' % _format_pipe(stdout))
else:
if stdout is not None:
info.append('stdout=%s' % _format_pipe(stdout))
if stderr is not None:
info.append('stderr=%s' % _format_pipe(stderr))
logger.debug(' '.join(info))
@coroutine
def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=False, shell=True, bufsize=0,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = yield from self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
@coroutine
def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0, **kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
popen_args = (program,) + args
for arg in popen_args:
if not isinstance(arg, (str, bytes)):
raise TypeError("program arguments must be "
"a bytes or text string, not %s"
% type(arg).__name__)
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'execute program %r' % program
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = yield from self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
"""Return an exception handler, or None if the default one is in use.
"""
return self._exception_handler
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError('A callable object or None is expected, '
'got {!r}'.format(handler))
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
This default handler logs the error message and other
context-dependent information. In debug mode, a truncated
stack trace is also appended showing where the given object
(e.g. a handle or future or task) was created, if any.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context
and self._current_handle is not None
and self._current_handle._source_traceback):
context['handle_traceback'] = self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append('{}: {}'.format(key, value))
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance;
- 'asyncgen' (optional): Asynchronous generator that caused
the exception.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except Exception:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except Exception as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except Exception:
# Guard 'default_exception_handler' in case it is
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
"""Notification that a TimerHandle has been cancelled."""
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
# Remove delayed calls that were cancelled if their number
# is too high
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
if self._debug and timeout != 0:
t0 = self.time()
event_list = self._selector.select(timeout)
dt = self.time() - t0
if dt >= 1.0:
level = logging.INFO
else:
level = logging.DEBUG
nevent = len(event_list)
if timeout is None:
logger.log(level, 'poll took %.3f ms: %s events',
dt * 1e3, nevent)
elif nevent:
logger.log(level,
'poll %.3f ms took %.3f ms: %s events',
timeout * 1e3, dt * 1e3, nevent)
elif dt >= 1.0:
logger.log(level,
'poll %.3f ms took %.3f ms: timeout',
timeout * 1e3, dt * 1e3)
else:
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is thread-safe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def _set_coroutine_wrapper(self, enabled):
try:
set_wrapper = sys.set_coroutine_wrapper
get_wrapper = sys.get_coroutine_wrapper
except AttributeError:
return
enabled = bool(enabled)
if self._coroutine_wrapper_set == enabled:
return
wrapper = coroutines.debug_wrapper
current_wrapper = get_wrapper()
if enabled:
if current_wrapper not in (None, wrapper):
warnings.warn(
"loop.set_debug(True): cannot set debug coroutine "
"wrapper; another wrapper is already set %r" %
current_wrapper, RuntimeWarning)
else:
set_wrapper(wrapper)
self._coroutine_wrapper_set = True
else:
if current_wrapper not in (None, wrapper):
warnings.warn(
"loop.set_debug(False): cannot unset debug coroutine "
"wrapper; another wrapper was set %r" %
current_wrapper, RuntimeWarning)
else:
set_wrapper(None)
self._coroutine_wrapper_set = False
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self._set_coroutine_wrapper(enabled)
| 57,875 | 1,506 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/base_futures.py | __all__ = []
import concurrent.futures._base
import reprlib
from . import events
Error = concurrent.futures._base.Error
CancelledError = concurrent.futures.CancelledError
TimeoutError = concurrent.futures.TimeoutError
class InvalidStateError(Error):
"""The operation is not allowed in this state."""
# States for Future.
_PENDING = 'PENDING'
_CANCELLED = 'CANCELLED'
_FINISHED = 'FINISHED'
def isfuture(obj):
"""Check for a Future.
This returns True when obj is a Future instance or is advertising
itself as duck-type compatible by setting _asyncio_future_blocking.
See comment in Future for more details.
"""
return (hasattr(obj.__class__, '_asyncio_future_blocking') and
obj._asyncio_future_blocking is not None)
def _format_callbacks(cb):
"""helper function for Future.__repr__"""
size = len(cb)
if not size:
cb = ''
def format_cb(callback):
return events._format_callback_source(callback, ())
if size == 1:
cb = format_cb(cb[0])
elif size == 2:
cb = '{}, {}'.format(format_cb(cb[0]), format_cb(cb[1]))
elif size > 2:
cb = '{}, <{} more>, {}'.format(format_cb(cb[0]),
size - 2,
format_cb(cb[-1]))
return 'cb=[%s]' % cb
def _future_repr_info(future):
# (Future) -> str
"""helper function for Future.__repr__"""
info = [future._state.lower()]
if future._state == _FINISHED:
if future._exception is not None:
info.append('exception={!r}'.format(future._exception))
else:
# use reprlib to limit the length of the output, especially
# for very long strings
result = reprlib.repr(future._result)
info.append('result={}'.format(result))
if future._callbacks:
info.append(_format_callbacks(future._callbacks))
if future._source_traceback:
frame = future._source_traceback[-1]
info.append('created at %s:%s' % (frame[0], frame[1]))
return info
| 2,074 | 72 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/windows_events.py | """Selector and proactor event loops for Windows."""
import _winapi
import errno
import math
import socket
import struct
import weakref
from . import events
from . import base_subprocess
from . import futures
from . import proactor_events
from . import selector_events
from . import tasks
from . import windows_utils
from . import _overlapped
from .coroutines import coroutine
from .log import logger
__all__ = ['SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
'DefaultEventLoopPolicy',
]
NULL = 0
INFINITE = 0xffffffff
ERROR_CONNECTION_REFUSED = 1225
ERROR_CONNECTION_ABORTED = 1236
# Initial delay in seconds for connect_pipe() before retrying to connect
CONNECT_PIPE_INIT_DELAY = 0.001
# Maximum delay in seconds for connect_pipe() before retrying to connect
CONNECT_PIPE_MAX_DELAY = 0.100
class _OverlappedFuture(futures.Future):
"""Subclass of Future which represents an overlapped operation.
Cancelling it will immediately cancel the overlapped operation.
"""
def __init__(self, ov, *, loop=None):
super().__init__(loop=loop)
if self._source_traceback:
del self._source_traceback[-1]
self._ov = ov
def _repr_info(self):
info = super()._repr_info()
if self._ov is not None:
state = 'pending' if self._ov.pending else 'completed'
info.insert(1, 'overlapped=<%s, %#x>' % (state, self._ov.address))
return info
def _cancel_overlapped(self):
if self._ov is None:
return
try:
self._ov.cancel()
except OSError as exc:
context = {
'message': 'Cancelling an overlapped future failed',
'exception': exc,
'future': self,
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
self._ov = None
def cancel(self):
self._cancel_overlapped()
return super().cancel()
def set_exception(self, exception):
super().set_exception(exception)
self._cancel_overlapped()
def set_result(self, result):
super().set_result(result)
self._ov = None
class _BaseWaitHandleFuture(futures.Future):
"""Subclass of Future which represents a wait handle."""
def __init__(self, ov, handle, wait_handle, *, loop=None):
super().__init__(loop=loop)
if self._source_traceback:
del self._source_traceback[-1]
# Keep a reference to the Overlapped object to keep it alive until the
# wait is unregistered
self._ov = ov
self._handle = handle
self._wait_handle = wait_handle
# Should we call UnregisterWaitEx() if the wait completes
# or is cancelled?
self._registered = True
def _poll(self):
# non-blocking wait: use a timeout of 0 millisecond
return (_winapi.WaitForSingleObject(self._handle, 0) ==
_winapi.WAIT_OBJECT_0)
def _repr_info(self):
info = super()._repr_info()
info.append('handle=%#x' % self._handle)
if self._handle is not None:
state = 'signaled' if self._poll() else 'waiting'
info.append(state)
if self._wait_handle is not None:
info.append('wait_handle=%#x' % self._wait_handle)
return info
def _unregister_wait_cb(self, fut):
# The wait was unregistered: it's not safe to destroy the Overlapped
# object
self._ov = None
def _unregister_wait(self):
if not self._registered:
return
self._registered = False
wait_handle = self._wait_handle
self._wait_handle = None
try:
_overlapped.UnregisterWait(wait_handle)
except OSError as exc:
if exc.winerror != _overlapped.ERROR_IO_PENDING:
context = {
'message': 'Failed to unregister the wait handle',
'exception': exc,
'future': self,
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
return
# ERROR_IO_PENDING means that the unregister is pending
self._unregister_wait_cb(None)
def cancel(self):
self._unregister_wait()
return super().cancel()
def set_exception(self, exception):
self._unregister_wait()
super().set_exception(exception)
def set_result(self, result):
self._unregister_wait()
super().set_result(result)
class _WaitCancelFuture(_BaseWaitHandleFuture):
"""Subclass of Future which represents a wait for the cancellation of a
_WaitHandleFuture using an event.
"""
def __init__(self, ov, event, wait_handle, *, loop=None):
super().__init__(ov, event, wait_handle, loop=loop)
self._done_callback = None
def cancel(self):
raise RuntimeError("_WaitCancelFuture must not be cancelled")
def set_result(self, result):
super().set_result(result)
if self._done_callback is not None:
self._done_callback(self)
def set_exception(self, exception):
super().set_exception(exception)
if self._done_callback is not None:
self._done_callback(self)
class _WaitHandleFuture(_BaseWaitHandleFuture):
def __init__(self, ov, handle, wait_handle, proactor, *, loop=None):
super().__init__(ov, handle, wait_handle, loop=loop)
self._proactor = proactor
self._unregister_proactor = True
self._event = _overlapped.CreateEvent(None, True, False, None)
self._event_fut = None
def _unregister_wait_cb(self, fut):
if self._event is not None:
_winapi.CloseHandle(self._event)
self._event = None
self._event_fut = None
# If the wait was cancelled, the wait may never be signalled, so
# it's required to unregister it. Otherwise, IocpProactor.close() will
# wait forever for an event which will never come.
#
# If the IocpProactor already received the event, it's safe to call
# _unregister() because we kept a reference to the Overlapped object
# which is used as a unique key.
self._proactor._unregister(self._ov)
self._proactor = None
super()._unregister_wait_cb(fut)
def _unregister_wait(self):
if not self._registered:
return
self._registered = False
wait_handle = self._wait_handle
self._wait_handle = None
try:
_overlapped.UnregisterWaitEx(wait_handle, self._event)
except OSError as exc:
if exc.winerror != _overlapped.ERROR_IO_PENDING:
context = {
'message': 'Failed to unregister the wait handle',
'exception': exc,
'future': self,
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
return
# ERROR_IO_PENDING is not an error, the wait was unregistered
self._event_fut = self._proactor._wait_cancel(self._event,
self._unregister_wait_cb)
class PipeServer(object):
"""Class representing a pipe server.
This is much like a bound, listening socket.
"""
def __init__(self, address):
self._address = address
self._free_instances = weakref.WeakSet()
# initialize the pipe attribute before calling _server_pipe_handle()
# because this function can raise an exception and the destructor calls
# the close() method
self._pipe = None
self._accept_pipe_future = None
self._pipe = self._server_pipe_handle(True)
def _get_unconnected_pipe(self):
# Create new instance and return previous one. This ensures
# that (until the server is closed) there is always at least
# one pipe handle for address. Therefore if a client attempt
# to connect it will not fail with FileNotFoundError.
tmp, self._pipe = self._pipe, self._server_pipe_handle(False)
return tmp
def _server_pipe_handle(self, first):
# Return a wrapper for a new pipe handle.
if self.closed():
return None
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
if first:
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
h = _winapi.CreateNamedPipe(
self._address, flags,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
_winapi.PIPE_UNLIMITED_INSTANCES,
windows_utils.BUFSIZE, windows_utils.BUFSIZE,
_winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
pipe = windows_utils.PipeHandle(h)
self._free_instances.add(pipe)
return pipe
def closed(self):
return (self._address is None)
def close(self):
if self._accept_pipe_future is not None:
self._accept_pipe_future.cancel()
self._accept_pipe_future = None
# Close all instances which have not been connected to by a client.
if self._address is not None:
for pipe in self._free_instances:
pipe.close()
self._pipe = None
self._address = None
self._free_instances.clear()
__del__ = close
class _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop):
"""Windows version of selector event loop."""
def _socketpair(self):
return windows_utils.socketpair()
class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
"""Windows version of proactor event loop using IOCP."""
def __init__(self, proactor=None):
if proactor is None:
proactor = IocpProactor()
super().__init__(proactor)
def _socketpair(self):
return windows_utils.socketpair()
@coroutine
def create_pipe_connection(self, protocol_factory, address):
f = self._proactor.connect_pipe(address)
pipe = yield from f
protocol = protocol_factory()
trans = self._make_duplex_pipe_transport(pipe, protocol,
extra={'addr': address})
return trans, protocol
@coroutine
def start_serving_pipe(self, protocol_factory, address):
server = PipeServer(address)
def loop_accept_pipe(f=None):
pipe = None
try:
if f:
pipe = f.result()
server._free_instances.discard(pipe)
if server.closed():
# A client connected before the server was closed:
# drop the client (close the pipe) and exit
pipe.close()
return
protocol = protocol_factory()
self._make_duplex_pipe_transport(
pipe, protocol, extra={'addr': address})
pipe = server._get_unconnected_pipe()
if pipe is None:
return
f = self._proactor.accept_pipe(pipe)
except OSError as exc:
if pipe and pipe.fileno() != -1:
self.call_exception_handler({
'message': 'Pipe accept failed',
'exception': exc,
'pipe': pipe,
})
pipe.close()
elif self._debug:
logger.warning("Accept pipe failed on pipe %r",
pipe, exc_info=True)
except futures.CancelledError:
if pipe:
pipe.close()
else:
server._accept_pipe_future = f
f.add_done_callback(loop_accept_pipe)
self.call_soon(loop_accept_pipe)
return [server]
@coroutine
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
waiter = self.create_future()
transp = _WindowsSubprocessTransport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
waiter=waiter, extra=extra,
**kwargs)
try:
yield from waiter
except Exception as exc:
# Workaround CPython bug #23353: using yield/yield-from in an
# except block of a generator doesn't clear properly sys.exc_info()
err = exc
else:
err = None
if err is not None:
transp.close()
yield from transp._wait()
raise err
return transp
class IocpProactor:
"""Proactor implementation using IOCP."""
def __init__(self, concurrency=0xffffffff):
self._loop = None
self._results = []
self._iocp = _overlapped.CreateIoCompletionPort(
_overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency)
self._cache = {}
self._registered = weakref.WeakSet()
self._unregistered = []
self._stopped_serving = weakref.WeakSet()
def __repr__(self):
return ('<%s overlapped#=%s result#=%s>'
% (self.__class__.__name__, len(self._cache),
len(self._results)))
def set_loop(self, loop):
self._loop = loop
def select(self, timeout=None):
if not self._results:
self._poll(timeout)
tmp = self._results
self._results = []
return tmp
def _result(self, value):
fut = self._loop.create_future()
fut.set_result(value)
return fut
def recv(self, conn, nbytes, flags=0):
self._register_with_iocp(conn)
ov = _overlapped.Overlapped(NULL)
try:
if isinstance(conn, socket.socket):
ov.WSARecv(conn.fileno(), nbytes, flags)
else:
ov.ReadFile(conn.fileno(), nbytes)
except BrokenPipeError:
return self._result(b'')
def finish_recv(trans, key, ov):
try:
return ov.getresult()
except OSError as exc:
if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
raise ConnectionResetError(*exc.args)
else:
raise
return self._register(ov, conn, finish_recv)
def send(self, conn, buf, flags=0):
self._register_with_iocp(conn)
ov = _overlapped.Overlapped(NULL)
if isinstance(conn, socket.socket):
ov.WSASend(conn.fileno(), buf, flags)
else:
ov.WriteFile(conn.fileno(), buf)
def finish_send(trans, key, ov):
try:
return ov.getresult()
except OSError as exc:
if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
raise ConnectionResetError(*exc.args)
else:
raise
return self._register(ov, conn, finish_send)
def accept(self, listener):
self._register_with_iocp(listener)
conn = self._get_accept_socket(listener.family)
ov = _overlapped.Overlapped(NULL)
ov.AcceptEx(listener.fileno(), conn.fileno())
def finish_accept(trans, key, ov):
ov.getresult()
# Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work.
buf = struct.pack('@P', listener.fileno())
conn.setsockopt(socket.SOL_SOCKET,
_overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf)
conn.settimeout(listener.gettimeout())
return conn, conn.getpeername()
@coroutine
def accept_coro(future, conn):
# Coroutine closing the accept socket if the future is cancelled
try:
yield from future
except futures.CancelledError:
conn.close()
raise
future = self._register(ov, listener, finish_accept)
coro = accept_coro(future, conn)
tasks.ensure_future(coro, loop=self._loop)
return future
def connect(self, conn, address):
self._register_with_iocp(conn)
# The socket needs to be locally bound before we call ConnectEx().
try:
_overlapped.BindLocal(conn.fileno(), conn.family)
except OSError as e:
if e.winerror != errno.WSAEINVAL:
raise
# Probably already locally bound; check using getsockname().
if conn.getsockname()[1] == 0:
raise
ov = _overlapped.Overlapped(NULL)
ov.ConnectEx(conn.fileno(), address)
def finish_connect(trans, key, ov):
ov.getresult()
# Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work.
conn.setsockopt(socket.SOL_SOCKET,
_overlapped.SO_UPDATE_CONNECT_CONTEXT, 0)
return conn
return self._register(ov, conn, finish_connect)
def accept_pipe(self, pipe):
self._register_with_iocp(pipe)
ov = _overlapped.Overlapped(NULL)
connected = ov.ConnectNamedPipe(pipe.fileno())
if connected:
# ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means
# that the pipe is connected. There is no need to wait for the
# completion of the connection.
return self._result(pipe)
def finish_accept_pipe(trans, key, ov):
ov.getresult()
return pipe
return self._register(ov, pipe, finish_accept_pipe)
@coroutine
def connect_pipe(self, address):
delay = CONNECT_PIPE_INIT_DELAY
while True:
# Unfortunately there is no way to do an overlapped connect to a pipe.
# Call CreateFile() in a loop until it doesn't fail with
# ERROR_PIPE_BUSY
try:
handle = _overlapped.ConnectPipe(address)
break
except OSError as exc:
if exc.winerror != _overlapped.ERROR_PIPE_BUSY:
raise
# ConnectPipe() failed with ERROR_PIPE_BUSY: retry later
delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY)
yield from tasks.sleep(delay, loop=self._loop)
return windows_utils.PipeHandle(handle)
def wait_for_handle(self, handle, timeout=None):
"""Wait for a handle.
Return a Future object. The result of the future is True if the wait
completed, or False if the wait did not complete (on timeout).
"""
return self._wait_for_handle(handle, timeout, False)
def _wait_cancel(self, event, done_callback):
fut = self._wait_for_handle(event, None, True)
# add_done_callback() cannot be used because the wait may only complete
# in IocpProactor.close(), while the event loop is not running.
fut._done_callback = done_callback
return fut
def _wait_for_handle(self, handle, timeout, _is_cancel):
if timeout is None:
ms = _winapi.INFINITE
else:
# RegisterWaitForSingleObject() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
ms = math.ceil(timeout * 1e3)
# We only create ov so we can use ov.address as a key for the cache.
ov = _overlapped.Overlapped(NULL)
wait_handle = _overlapped.RegisterWaitWithQueue(
handle, self._iocp, ov.address, ms)
if _is_cancel:
f = _WaitCancelFuture(ov, handle, wait_handle, loop=self._loop)
else:
f = _WaitHandleFuture(ov, handle, wait_handle, self,
loop=self._loop)
if f._source_traceback:
del f._source_traceback[-1]
def finish_wait_for_handle(trans, key, ov):
# Note that this second wait means that we should only use
# this with handles types where a successful wait has no
# effect. So events or processes are all right, but locks
# or semaphores are not. Also note if the handle is
# signalled and then quickly reset, then we may return
# False even though we have not timed out.
return f._poll()
self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle)
return f
def _register_with_iocp(self, obj):
# To get notifications of finished ops on this objects sent to the
# completion port, were must register the handle.
if obj not in self._registered:
self._registered.add(obj)
_overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0)
# XXX We could also use SetFileCompletionNotificationModes()
# to avoid sending notifications to completion port of ops
# that succeed immediately.
def _register(self, ov, obj, callback):
# Return a future which will be set with the result of the
# operation when it completes. The future's value is actually
# the value returned by callback().
f = _OverlappedFuture(ov, loop=self._loop)
if f._source_traceback:
del f._source_traceback[-1]
if not ov.pending:
# The operation has completed, so no need to postpone the
# work. We cannot take this short cut if we need the
# NumberOfBytes, CompletionKey values returned by
# PostQueuedCompletionStatus().
try:
value = callback(None, None, ov)
except OSError as e:
f.set_exception(e)
else:
f.set_result(value)
# Even if GetOverlappedResult() was called, we have to wait for the
# notification of the completion in GetQueuedCompletionStatus().
# Register the overlapped operation to keep a reference to the
# OVERLAPPED object, otherwise the memory is freed and Windows may
# read uninitialized memory.
# Register the overlapped operation for later. Note that
# we only store obj to prevent it from being garbage
# collected too early.
self._cache[ov.address] = (f, ov, obj, callback)
return f
def _unregister(self, ov):
"""Unregister an overlapped object.
Call this method when its future has been cancelled. The event can
already be signalled (pending in the proactor event queue). It is also
safe if the event is never signalled (because it was cancelled).
"""
self._unregistered.append(ov)
def _get_accept_socket(self, family):
s = socket.socket(family)
s.settimeout(0)
return s
def _poll(self, timeout=None):
if timeout is None:
ms = INFINITE
elif timeout < 0:
raise ValueError("negative timeout")
else:
# GetQueuedCompletionStatus() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
ms = math.ceil(timeout * 1e3)
if ms >= INFINITE:
raise ValueError("timeout too big")
while True:
status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms)
if status is None:
break
ms = 0
err, transferred, key, address = status
try:
f, ov, obj, callback = self._cache.pop(address)
except KeyError:
if self._loop.get_debug():
self._loop.call_exception_handler({
'message': ('GetQueuedCompletionStatus() returned an '
'unexpected event'),
'status': ('err=%s transferred=%s key=%#x address=%#x'
% (err, transferred, key, address)),
})
# key is either zero, or it is used to return a pipe
# handle which should be closed to avoid a leak.
if key not in (0, _overlapped.INVALID_HANDLE_VALUE):
_winapi.CloseHandle(key)
continue
if obj in self._stopped_serving:
f.cancel()
# Don't call the callback if _register() already read the result or
# if the overlapped has been cancelled
elif not f.done():
try:
value = callback(transferred, key, ov)
except OSError as e:
f.set_exception(e)
self._results.append(f)
else:
f.set_result(value)
self._results.append(f)
# Remove unregisted futures
for ov in self._unregistered:
self._cache.pop(ov.address, None)
self._unregistered.clear()
def _stop_serving(self, obj):
# obj is a socket or pipe handle. It will be closed in
# BaseProactorEventLoop._stop_serving() which will make any
# pending operations fail quickly.
self._stopped_serving.add(obj)
def close(self):
# Cancel remaining registered operations.
for address, (fut, ov, obj, callback) in list(self._cache.items()):
if fut.cancelled():
# Nothing to do with cancelled futures
pass
elif isinstance(fut, _WaitCancelFuture):
# _WaitCancelFuture must not be cancelled
pass
else:
try:
fut.cancel()
except OSError as exc:
if self._loop is not None:
context = {
'message': 'Cancelling a future failed',
'exception': exc,
'future': fut,
}
if fut._source_traceback:
context['source_traceback'] = fut._source_traceback
self._loop.call_exception_handler(context)
while self._cache:
if not self._poll(1):
logger.debug('taking long time to close proactor')
self._results = []
if self._iocp is not None:
_winapi.CloseHandle(self._iocp)
self._iocp = None
def __del__(self):
self.close()
class _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport):
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
self._proc = windows_utils.Popen(
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
bufsize=bufsize, **kwargs)
def callback(f):
returncode = self._proc.poll()
self._process_exited(returncode)
f = self._loop._proactor.wait_for_handle(int(self._proc._handle))
f.add_done_callback(callback)
SelectorEventLoop = _WindowsSelectorEventLoop
class _WindowsDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
_loop_factory = SelectorEventLoop
DefaultEventLoopPolicy = _WindowsDefaultEventLoopPolicy
| 27,831 | 780 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/streams.py | """Stream-related things."""
__all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol',
'open_connection', 'start_server',
'IncompleteReadError',
'LimitOverrunError',
]
import socket
if hasattr(socket, 'AF_UNIX'):
__all__.extend(['open_unix_connection', 'start_unix_server'])
from . import coroutines
from . import compat
from . import events
from . import protocols
from .coroutines import coroutine
from .log import logger
_DEFAULT_LIMIT = 2 ** 16
class IncompleteReadError(EOFError):
"""
Incomplete read error. Attributes:
- partial: read bytes string before the end of stream was reached
- expected: total number of expected bytes (or None if unknown)
"""
def __init__(self, partial, expected):
super().__init__("%d bytes read on a total of %r expected bytes"
% (len(partial), expected))
self.partial = partial
self.expected = expected
def __reduce__(self):
return type(self), (self.partial, self.expected)
class LimitOverrunError(Exception):
"""Reached the buffer limit while looking for a separator.
Attributes:
- consumed: total number of to be consumed bytes.
"""
def __init__(self, message, consumed):
super().__init__(message)
self.consumed = consumed
def __reduce__(self):
return type(self), (self.args[0], self.consumed)
@coroutine
def open_connection(host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""A wrapper for create_connection() returning a (reader, writer) pair.
The reader returned is a StreamReader instance; the writer is a
StreamWriter instance.
The arguments are all the usual arguments to create_connection()
except protocol_factory; most common are positional host and port,
with various optional keyword arguments following.
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
(If you want to customize the StreamReader and/or
StreamReaderProtocol classes, just copy the code -- there's
really nothing special here except some convenience.)
"""
if loop is None:
loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
transport, _ = yield from loop.create_connection(
lambda: protocol, host, port, **kwds)
writer = StreamWriter(transport, protocol, reader, loop)
return reader, writer
@coroutine
def start_server(client_connected_cb, host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Start a socket server, call back for each client connected.
The first parameter, `client_connected_cb`, takes two parameters:
client_reader, client_writer. client_reader is a StreamReader
object, while client_writer is a StreamWriter object. This
parameter can either be a plain callback function or a coroutine;
if it is a coroutine, it will be automatically converted into a
Task.
The rest of the arguments are all the usual arguments to
loop.create_server() except protocol_factory; most common are
positional host and port, with various optional keyword arguments
following. The return value is the same as loop.create_server().
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
The return value is the same as loop.create_server(), i.e. a
Server object which can be used to stop the service.
"""
if loop is None:
loop = events.get_event_loop()
def factory():
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, client_connected_cb,
loop=loop)
return protocol
return (yield from loop.create_server(factory, host, port, **kwds))
if hasattr(socket, 'AF_UNIX'):
# UNIX Domain Sockets are supported on this platform
@coroutine
def open_unix_connection(path=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `open_connection` but works with UNIX Domain Sockets."""
if loop is None:
loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
transport, _ = yield from loop.create_unix_connection(
lambda: protocol, path, **kwds)
writer = StreamWriter(transport, protocol, reader, loop)
return reader, writer
@coroutine
def start_unix_server(client_connected_cb, path=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `start_server` but works with UNIX Domain Sockets."""
if loop is None:
loop = events.get_event_loop()
def factory():
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, client_connected_cb,
loop=loop)
return protocol
return (yield from loop.create_unix_server(factory, path, **kwds))
class FlowControlMixin(protocols.Protocol):
"""Reusable flow control logic for StreamWriter.drain().
This implements the protocol methods pause_writing(),
resume_reading() and connection_lost(). If the subclass overrides
these it must call the super methods.
StreamWriter.drain() must wait for _drain_helper() coroutine.
"""
def __init__(self, loop=None):
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
self._paused = False
self._drain_waiter = None
self._connection_lost = False
def pause_writing(self):
assert not self._paused
self._paused = True
if self._loop.get_debug():
logger.debug("%r pauses writing", self)
def resume_writing(self):
assert self._paused
self._paused = False
if self._loop.get_debug():
logger.debug("%r resumes writing", self)
waiter = self._drain_waiter
if waiter is not None:
self._drain_waiter = None
if not waiter.done():
waiter.set_result(None)
def connection_lost(self, exc):
self._connection_lost = True
# Wake up the writer if currently paused.
if not self._paused:
return
waiter = self._drain_waiter
if waiter is None:
return
self._drain_waiter = None
if waiter.done():
return
if exc is None:
waiter.set_result(None)
else:
waiter.set_exception(exc)
@coroutine
def _drain_helper(self):
if self._connection_lost:
raise ConnectionResetError('Connection lost')
if not self._paused:
return
waiter = self._drain_waiter
assert waiter is None or waiter.cancelled()
waiter = self._loop.create_future()
self._drain_waiter = waiter
yield from waiter
class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
"""Helper class to adapt between Protocol and StreamReader.
(This is a helper class instead of making StreamReader itself a
Protocol subclass, because the StreamReader has other potential
uses, and to prevent the user of the StreamReader to accidentally
call inappropriate methods of the protocol.)
"""
def __init__(self, stream_reader, client_connected_cb=None, loop=None):
super().__init__(loop=loop)
self._stream_reader = stream_reader
self._stream_writer = None
self._client_connected_cb = client_connected_cb
self._over_ssl = False
def connection_made(self, transport):
self._stream_reader.set_transport(transport)
self._over_ssl = transport.get_extra_info('sslcontext') is not None
if self._client_connected_cb is not None:
self._stream_writer = StreamWriter(transport, self,
self._stream_reader,
self._loop)
res = self._client_connected_cb(self._stream_reader,
self._stream_writer)
if coroutines.iscoroutine(res):
self._loop.create_task(res)
def connection_lost(self, exc):
if self._stream_reader is not None:
if exc is None:
self._stream_reader.feed_eof()
else:
self._stream_reader.set_exception(exc)
super().connection_lost(exc)
self._stream_reader = None
self._stream_writer = None
def data_received(self, data):
self._stream_reader.feed_data(data)
def eof_received(self):
self._stream_reader.feed_eof()
if self._over_ssl:
# Prevent a warning in SSLProtocol.eof_received:
# "returning true from eof_received()
# has no effect when using ssl"
return False
return True
class StreamWriter:
"""Wraps a Transport.
This exposes write(), writelines(), [can_]write_eof(),
get_extra_info() and close(). It adds drain() which returns an
optional Future on which you can wait for flow control. It also
adds a transport property which references the Transport
directly.
"""
def __init__(self, transport, protocol, reader, loop):
self._transport = transport
self._protocol = protocol
# drain() expects that the reader has an exception() method
assert reader is None or isinstance(reader, StreamReader)
self._reader = reader
self._loop = loop
def __repr__(self):
info = [self.__class__.__name__, 'transport=%r' % self._transport]
if self._reader is not None:
info.append('reader=%r' % self._reader)
return '<%s>' % ' '.join(info)
@property
def transport(self):
return self._transport
def write(self, data):
self._transport.write(data)
def writelines(self, data):
self._transport.writelines(data)
def write_eof(self):
return self._transport.write_eof()
def can_write_eof(self):
return self._transport.can_write_eof()
def close(self):
return self._transport.close()
def get_extra_info(self, name, default=None):
return self._transport.get_extra_info(name, default)
@coroutine
def drain(self):
"""Flush the write buffer.
The intended use is to write
w.write(data)
yield from w.drain()
"""
if self._reader is not None:
exc = self._reader.exception()
if exc is not None:
raise exc
if self._transport is not None:
if self._transport.is_closing():
# Yield to the event loop so connection_lost() may be
# called. Without this, _drain_helper() would return
# immediately, and code that calls
# write(...); yield from drain()
# in a loop would never call connection_lost(), so it
# would not see an error when the socket is closed.
yield
yield from self._protocol._drain_helper()
class StreamReader:
def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
# The line length limit is a security feature;
# it also doubles as half the buffer limit.
if limit <= 0:
raise ValueError('Limit cannot be <= 0')
self._limit = limit
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
self._buffer = bytearray()
self._eof = False # Whether we're done.
self._waiter = None # A future used by _wait_for_data()
self._exception = None
self._transport = None
self._paused = False
def __repr__(self):
info = ['StreamReader']
if self._buffer:
info.append('%d bytes' % len(self._buffer))
if self._eof:
info.append('eof')
if self._limit != _DEFAULT_LIMIT:
info.append('l=%d' % self._limit)
if self._waiter:
info.append('w=%r' % self._waiter)
if self._exception:
info.append('e=%r' % self._exception)
if self._transport:
info.append('t=%r' % self._transport)
if self._paused:
info.append('paused')
return '<%s>' % ' '.join(info)
def exception(self):
return self._exception
def set_exception(self, exc):
self._exception = exc
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_exception(exc)
def _wakeup_waiter(self):
"""Wakeup read*() functions waiting for data or EOF."""
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(None)
def set_transport(self, transport):
assert self._transport is None, 'Transport already set'
self._transport = transport
def _maybe_resume_transport(self):
if self._paused and len(self._buffer) <= self._limit:
self._paused = False
self._transport.resume_reading()
def feed_eof(self):
self._eof = True
self._wakeup_waiter()
def at_eof(self):
"""Return True if the buffer is empty and 'feed_eof' was called."""
return self._eof and not self._buffer
def feed_data(self, data):
assert not self._eof, 'feed_data after feed_eof'
if not data:
return
self._buffer.extend(data)
self._wakeup_waiter()
if (self._transport is not None and
not self._paused and
len(self._buffer) > 2 * self._limit):
try:
self._transport.pause_reading()
except NotImplementedError:
# The transport can't be paused.
# We'll just have to buffer all data.
# Forget the transport so we don't keep trying.
self._transport = None
else:
self._paused = True
@coroutine
def _wait_for_data(self, func_name):
"""Wait until feed_data() or feed_eof() is called.
If stream was paused, automatically resume it.
"""
# StreamReader uses a future to link the protocol feed_data() method
# to a read coroutine. Running two read coroutines at the same time
# would have an unexpected behaviour. It would not possible to know
# which coroutine would get the next data.
if self._waiter is not None:
raise RuntimeError('%s() called while another coroutine is '
'already waiting for incoming data' % func_name)
assert not self._eof, '_wait_for_data after EOF'
# Waiting for data while paused will make deadlock, so prevent it.
# This is essential for readexactly(n) for case when n > self._limit.
if self._paused:
self._paused = False
self._transport.resume_reading()
self._waiter = self._loop.create_future()
try:
yield from self._waiter
finally:
self._waiter = None
@coroutine
def readline(self):
"""Read chunk of data from the stream until newline (b'\n') is found.
On success, return chunk that ends with newline. If only partial
line can be read due to EOF, return incomplete line without
terminating newline. When EOF was reached while no bytes read, empty
bytes object is returned.
If limit is reached, ValueError will be raised. In that case, if
newline was found, complete line including newline will be removed
from internal buffer. Else, internal buffer will be cleared. Limit is
compared against part of the line without newline.
If stream was paused, this function will automatically resume it if
needed.
"""
sep = b'\n'
seplen = len(sep)
try:
line = yield from self.readuntil(sep)
except IncompleteReadError as e:
return e.partial
except LimitOverrunError as e:
if self._buffer.startswith(sep, e.consumed):
del self._buffer[:e.consumed + seplen]
else:
self._buffer.clear()
self._maybe_resume_transport()
raise ValueError(e.args[0])
return line
@coroutine
def readuntil(self, separator=b'\n'):
"""Read data from the stream until ``separator`` is found.
On success, the data and separator will be removed from the
internal buffer (consumed). Returned data will include the
separator at the end.
Configured stream limit is used to check result. Limit sets the
maximal length of data that can be returned, not counting the
separator.
If an EOF occurs and the complete separator is still not found,
an IncompleteReadError exception will be raised, and the internal
buffer will be reset. The IncompleteReadError.partial attribute
may contain the separator partially.
If the data cannot be read because of over limit, a
LimitOverrunError exception will be raised, and the data
will be left in the internal buffer, so it can be read again.
"""
seplen = len(separator)
if seplen == 0:
raise ValueError('Separator should be at least one-byte string')
if self._exception is not None:
raise self._exception
# Consume whole buffer except last bytes, which length is
# one less than seplen. Let's check corner cases with
# separator='SEPARATOR':
# * we have received almost complete separator (without last
# byte). i.e buffer='some textSEPARATO'. In this case we
# can safely consume len(separator) - 1 bytes.
# * last byte of buffer is first byte of separator, i.e.
# buffer='abcdefghijklmnopqrS'. We may safely consume
# everything except that last byte, but this require to
# analyze bytes of buffer that match partial separator.
# This is slow and/or require FSM. For this case our
# implementation is not optimal, since require rescanning
# of data that is known to not belong to separator. In
# real world, separator will not be so long to notice
# performance problems. Even when reading MIME-encoded
# messages :)
# `offset` is the number of bytes from the beginning of the buffer
# where there is no occurrence of `separator`.
offset = 0
# Loop until we find `separator` in the buffer, exceed the buffer size,
# or an EOF has happened.
while True:
buflen = len(self._buffer)
# Check if we now have enough data in the buffer for `separator` to
# fit.
if buflen - offset >= seplen:
isep = self._buffer.find(separator, offset)
if isep != -1:
# `separator` is in the buffer. `isep` will be used later
# to retrieve the data.
break
# see upper comment for explanation.
offset = buflen + 1 - seplen
if offset > self._limit:
raise LimitOverrunError(
'Separator is not found, and chunk exceed the limit',
offset)
# Complete message (with full separator) may be present in buffer
# even when EOF flag is set. This may happen when the last chunk
# adds data which makes separator be found. That's why we check for
# EOF *ater* inspecting the buffer.
if self._eof:
chunk = bytes(self._buffer)
self._buffer.clear()
raise IncompleteReadError(chunk, None)
# _wait_for_data() will resume reading if stream was paused.
yield from self._wait_for_data('readuntil')
if isep > self._limit:
raise LimitOverrunError(
'Separator is found, but chunk is longer than limit', isep)
chunk = self._buffer[:isep + seplen]
del self._buffer[:isep + seplen]
self._maybe_resume_transport()
return bytes(chunk)
@coroutine
def read(self, n=-1):
"""Read up to `n` bytes from the stream.
If n is not provided, or set to -1, read until EOF and return all read
bytes. If the EOF was received and the internal buffer is empty, return
an empty bytes object.
If n is zero, return empty bytes object immediately.
If n is positive, this function try to read `n` bytes, and may return
less or equal bytes than requested, but at least one byte. If EOF was
received before any byte is read, this function returns empty byte
object.
Returned value is not limited with limit, configured at stream
creation.
If stream was paused, this function will automatically resume it if
needed.
"""
if self._exception is not None:
raise self._exception
if n == 0:
return b''
if n < 0:
# This used to just loop creating a new waiter hoping to
# collect everything in self._buffer, but that would
# deadlock if the subprocess sends more than self.limit
# bytes. So just call self.read(self._limit) until EOF.
blocks = []
while True:
block = yield from self.read(self._limit)
if not block:
break
blocks.append(block)
return b''.join(blocks)
if not self._buffer and not self._eof:
yield from self._wait_for_data('read')
# This will work right even if buffer is less than n bytes
data = bytes(self._buffer[:n])
del self._buffer[:n]
self._maybe_resume_transport()
return data
@coroutine
def readexactly(self, n):
"""Read exactly `n` bytes.
Raise an IncompleteReadError if EOF is reached before `n` bytes can be
read. The IncompleteReadError.partial attribute of the exception will
contain the partial read bytes.
if n is zero, return empty bytes object.
Returned value is not limited with limit, configured at stream
creation.
If stream was paused, this function will automatically resume it if
needed.
"""
if n < 0:
raise ValueError('readexactly size can not be less than zero')
if self._exception is not None:
raise self._exception
if n == 0:
return b''
while len(self._buffer) < n:
if self._eof:
incomplete = bytes(self._buffer)
self._buffer.clear()
raise IncompleteReadError(incomplete, n)
yield from self._wait_for_data('readexactly')
if len(self._buffer) == n:
data = bytes(self._buffer)
self._buffer.clear()
else:
data = bytes(self._buffer[:n])
del self._buffer[:n]
self._maybe_resume_transport()
return data
if compat.PY35:
@coroutine
def __aiter__(self):
return self
@coroutine
def __anext__(self):
val = yield from self.readline()
if val == b'':
raise StopAsyncIteration
return val
if compat.PY352:
# In Python 3.5.2 and greater, __aiter__ should return
# the asynchronous iterator directly.
def __aiter__(self):
return self
| 24,472 | 702 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/sslproto.py | import collections
import warnings
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from . import compat
from . import protocols
from . import transports
from .log import logger
def _create_transport_context(server_side, server_hostname):
if server_side:
raise ValueError('Server side SSL needs a valid SSLContext')
# Client side may pass ssl=True to use a default
# context; in that case the sslcontext passed is None.
# The default is secure for client connections.
if hasattr(ssl, 'create_default_context'):
# Python 3.4+: use up-to-date strong settings.
sslcontext = ssl.create_default_context()
if not server_hostname:
sslcontext.check_hostname = False
else:
# Fallback for Python 3.3.
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.options |= ssl.OP_NO_SSLv3
sslcontext.set_default_verify_paths()
sslcontext.verify_mode = ssl.CERT_REQUIRED
return sslcontext
def _is_sslproto_available():
return hasattr(ssl, "MemoryBIO")
# States of an _SSLPipe.
_UNWRAPPED = "UNWRAPPED"
_DO_HANDSHAKE = "DO_HANDSHAKE"
_WRAPPED = "WRAPPED"
_SHUTDOWN = "SHUTDOWN"
class _SSLPipe(object):
"""An SSL "Pipe".
An SSL pipe allows you to communicate with an SSL/TLS protocol instance
through memory buffers. It can be used to implement a security layer for an
existing connection where you don't have access to the connection's file
descriptor, or for some reason you don't want to use it.
An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode,
data is passed through untransformed. In wrapped mode, application level
data is encrypted to SSL record level data and vice versa. The SSL record
level is the lowest level in the SSL protocol suite and is what travels
as-is over the wire.
An SslPipe initially is in "unwrapped" mode. To start SSL, call
do_handshake(). To shutdown SSL again, call unwrap().
"""
max_size = 256 * 1024 # Buffer size passed to read()
def __init__(self, context, server_side, server_hostname=None):
"""
The *context* argument specifies the ssl.SSLContext to use.
The *server_side* argument indicates whether this is a server side or
client side transport.
The optional *server_hostname* argument can be used to specify the
hostname you are connecting to. You may only specify this parameter if
the _ssl module supports Server Name Indication (SNI).
"""
self._context = context
self._server_side = server_side
self._server_hostname = server_hostname
self._state = _UNWRAPPED
self._incoming = ssl.MemoryBIO()
self._outgoing = ssl.MemoryBIO()
self._sslobj = None
self._need_ssldata = False
self._handshake_cb = None
self._shutdown_cb = None
@property
def context(self):
"""The SSL context passed to the constructor."""
return self._context
@property
def ssl_object(self):
"""The internal ssl.SSLObject instance.
Return None if the pipe is not wrapped.
"""
return self._sslobj
@property
def need_ssldata(self):
"""Whether more record level data is needed to complete a handshake
that is currently in progress."""
return self._need_ssldata
@property
def wrapped(self):
"""
Whether a security layer is currently in effect.
Return False during handshake.
"""
return self._state == _WRAPPED
def do_handshake(self, callback=None):
"""Start the SSL handshake.
Return a list of ssldata. A ssldata element is a list of buffers
The optional *callback* argument can be used to install a callback that
will be called when the handshake is complete. The callback will be
called with None if successful, else an exception instance.
"""
if self._state != _UNWRAPPED:
raise RuntimeError('handshake in progress or completed')
self._sslobj = self._context.wrap_bio(
self._incoming, self._outgoing,
server_side=self._server_side,
server_hostname=self._server_hostname)
self._state = _DO_HANDSHAKE
self._handshake_cb = callback
ssldata, appdata = self.feed_ssldata(b'', only_handshake=True)
assert len(appdata) == 0
return ssldata
def shutdown(self, callback=None):
"""Start the SSL shutdown sequence.
Return a list of ssldata. A ssldata element is a list of buffers
The optional *callback* argument can be used to install a callback that
will be called when the shutdown is complete. The callback will be
called without arguments.
"""
if self._state == _UNWRAPPED:
raise RuntimeError('no security layer present')
if self._state == _SHUTDOWN:
raise RuntimeError('shutdown in progress')
assert self._state in (_WRAPPED, _DO_HANDSHAKE)
self._state = _SHUTDOWN
self._shutdown_cb = callback
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
return ssldata
def feed_eof(self):
"""Send a potentially "ragged" EOF.
This method will raise an SSL_ERROR_EOF exception if the EOF is
unexpected.
"""
self._incoming.write_eof()
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
def feed_ssldata(self, data, only_handshake=False):
"""Feed SSL record level data into the pipe.
The data must be a bytes instance. It is OK to send an empty bytes
instance. This can be used to get ssldata for a handshake initiated by
this endpoint.
Return a (ssldata, appdata) tuple. The ssldata element is a list of
buffers containing SSL data that needs to be sent to the remote SSL.
The appdata element is a list of buffers containing plaintext data that
needs to be forwarded to the application. The appdata list may contain
an empty buffer indicating an SSL "close_notify" alert. This alert must
be acknowledged by calling shutdown().
"""
if self._state == _UNWRAPPED:
# If unwrapped, pass plaintext data straight through.
if data:
appdata = [data]
else:
appdata = []
return ([], appdata)
self._need_ssldata = False
if data:
self._incoming.write(data)
ssldata = []
appdata = []
try:
if self._state == _DO_HANDSHAKE:
# Call do_handshake() until it doesn't raise anymore.
self._sslobj.do_handshake()
self._state = _WRAPPED
if self._handshake_cb:
self._handshake_cb(None)
if only_handshake:
return (ssldata, appdata)
# Handshake done: execute the wrapped block
if self._state == _WRAPPED:
# Main state: read data from SSL until close_notify
while True:
chunk = self._sslobj.read(self.max_size)
appdata.append(chunk)
if not chunk: # close_notify
break
elif self._state == _SHUTDOWN:
# Call shutdown() until it doesn't raise anymore.
self._sslobj.unwrap()
self._sslobj = None
self._state = _UNWRAPPED
if self._shutdown_cb:
self._shutdown_cb()
elif self._state == _UNWRAPPED:
# Drain possible plaintext data after close_notify.
appdata.append(self._incoming.read())
except (ssl.SSLError, ssl.CertificateError) as exc:
if getattr(exc, 'errno', None) not in (
ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
if self._state == _DO_HANDSHAKE and self._handshake_cb:
self._handshake_cb(exc)
raise
self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ)
# Check for record level data that needs to be sent back.
# Happens for the initial handshake and renegotiations.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
return (ssldata, appdata)
def feed_appdata(self, data, offset=0):
"""Feed plaintext data into the pipe.
Return an (ssldata, offset) tuple. The ssldata element is a list of
buffers containing record level data that needs to be sent to the
remote SSL instance. The offset is the number of plaintext bytes that
were processed, which may be less than the length of data.
NOTE: In case of short writes, this call MUST be retried with the SAME
buffer passed into the *data* argument (i.e. the id() must be the
same). This is an OpenSSL requirement. A further particularity is that
a short write will always have offset == 0, because the _ssl module
does not enable partial writes. And even though the offset is zero,
there will still be encrypted data in ssldata.
"""
assert 0 <= offset <= len(data)
if self._state == _UNWRAPPED:
# pass through data in unwrapped mode
if offset < len(data):
ssldata = [data[offset:]]
else:
ssldata = []
return (ssldata, len(data))
ssldata = []
view = memoryview(data)
while True:
self._need_ssldata = False
try:
if offset < len(view):
offset += self._sslobj.write(view[offset:])
except ssl.SSLError as exc:
# It is not allowed to call write() after unwrap() until the
# close_notify is acknowledged. We return the condition to the
# caller as a short write.
if exc.reason == 'PROTOCOL_IS_SHUTDOWN':
exc.errno = ssl.SSL_ERROR_WANT_READ
if exc.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
raise
self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ)
# See if there's any record level data back for us.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
if offset == len(view) or self._need_ssldata:
break
return (ssldata, offset)
class _SSLProtocolTransport(transports._FlowControlMixin,
transports.Transport):
def __init__(self, loop, ssl_protocol):
self._loop = loop
# SSLProtocol instance
self._ssl_protocol = ssl_protocol
self._closed = False
def get_extra_info(self, name, default=None):
"""Get optional transport information."""
return self._ssl_protocol._get_extra_info(name, default)
def set_protocol(self, protocol):
self._ssl_protocol._app_protocol = protocol
def get_protocol(self):
return self._ssl_protocol._app_protocol
def is_closing(self):
return self._closed
def close(self):
"""Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) called
with None as its argument.
"""
self._closed = True
self._ssl_protocol._start_shutdown()
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if compat.PY34:
def __del__(self):
if not self._closed:
warnings.warn("unclosed transport %r" % self, ResourceWarning,
source=self)
self.close()
def pause_reading(self):
"""Pause the receiving end.
No data will be passed to the protocol's data_received()
method until resume_reading() is called.
"""
self._ssl_protocol._transport.pause_reading()
def resume_reading(self):
"""Resume the receiving end.
Data received will once again be passed to the protocol's
data_received() method.
"""
self._ssl_protocol._transport.resume_reading()
def set_write_buffer_limits(self, high=None, low=None):
"""Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's
pause_writing() and resume_writing() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to an
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_writing() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_writing() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
self._ssl_protocol._transport.set_write_buffer_limits(high, low)
def get_write_buffer_size(self):
"""Return the current size of the write buffer."""
return self._ssl_protocol._transport.get_write_buffer_size()
def write(self, data):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError("data: expecting a bytes-like instance, got {!r}"
.format(type(data).__name__))
if not data:
return
self._ssl_protocol._write_appdata(data)
def can_write_eof(self):
"""Return True if this transport supports write_eof(), False if not."""
return False
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
self._ssl_protocol._abort()
class SSLProtocol(protocols.Protocol):
"""SSL protocol.
Implementation of SSL on top of a socket using incoming and outgoing
buffers which are ssl.MemoryBIO objects.
"""
def __init__(self, loop, app_protocol, sslcontext, waiter,
server_side=False, server_hostname=None,
call_connection_made=True):
if ssl is None:
raise RuntimeError('stdlib ssl module not available')
if not sslcontext:
sslcontext = _create_transport_context(server_side, server_hostname)
self._server_side = server_side
if server_hostname and not server_side:
self._server_hostname = server_hostname
else:
self._server_hostname = None
self._sslcontext = sslcontext
# SSL-specific extra info. More info are set when the handshake
# completes.
self._extra = dict(sslcontext=sslcontext)
# App data write buffering
self._write_backlog = collections.deque()
self._write_buffer_size = 0
self._waiter = waiter
self._loop = loop
self._app_protocol = app_protocol
self._app_transport = _SSLProtocolTransport(self._loop, self)
# _SSLPipe instance (None until the connection is made)
self._sslpipe = None
self._session_established = False
self._in_handshake = False
self._in_shutdown = False
# transport, ex: SelectorSocketTransport
self._transport = None
self._call_connection_made = call_connection_made
def _wakeup_waiter(self, exc=None):
if self._waiter is None:
return
if not self._waiter.cancelled():
if exc is not None:
self._waiter.set_exception(exc)
else:
self._waiter.set_result(None)
self._waiter = None
def connection_made(self, transport):
"""Called when the low-level connection is made.
Start the SSL handshake.
"""
self._transport = transport
self._sslpipe = _SSLPipe(self._sslcontext,
self._server_side,
self._server_hostname)
self._start_handshake()
def connection_lost(self, exc):
"""Called when the low-level connection is lost or closed.
The argument is an exception object or None (the latter
meaning a regular EOF is received or the connection was
aborted or closed).
"""
if self._session_established:
self._session_established = False
self._loop.call_soon(self._app_protocol.connection_lost, exc)
self._transport = None
self._app_transport = None
self._wakeup_waiter(exc)
def pause_writing(self):
"""Called when the low-level transport's buffer goes over
the high-water mark.
"""
self._app_protocol.pause_writing()
def resume_writing(self):
"""Called when the low-level transport's buffer drains below
the low-water mark.
"""
self._app_protocol.resume_writing()
def data_received(self, data):
"""Called when some SSL data is received.
The argument is a bytes object.
"""
if self._sslpipe is None:
# transport closing, sslpipe is destroyed
return
try:
ssldata, appdata = self._sslpipe.feed_ssldata(data)
except ssl.SSLError as e:
if self._loop.get_debug():
logger.warning('%r: SSL error %s (reason %s)',
self, e.errno, e.reason)
self._abort()
return
for chunk in ssldata:
self._transport.write(chunk)
for chunk in appdata:
if chunk:
self._app_protocol.data_received(chunk)
else:
self._start_shutdown()
break
def eof_received(self):
"""Called when the other end of the low-level stream
is half-closed.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol.
"""
try:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
self._wakeup_waiter(ConnectionResetError)
if not self._in_handshake:
keep_open = self._app_protocol.eof_received()
if keep_open:
logger.warning('returning true from eof_received() '
'has no effect when using ssl')
finally:
self._transport.close()
def _get_extra_info(self, name, default=None):
if name in self._extra:
return self._extra[name]
elif self._transport is not None:
return self._transport.get_extra_info(name, default)
else:
return default
def _start_shutdown(self):
if self._in_shutdown:
return
if self._in_handshake:
self._abort()
else:
self._in_shutdown = True
self._write_appdata(b'')
def _write_appdata(self, data):
self._write_backlog.append((data, 0))
self._write_buffer_size += len(data)
self._process_write_backlog()
def _start_handshake(self):
if self._loop.get_debug():
logger.debug("%r starts SSL handshake", self)
self._handshake_start_time = self._loop.time()
else:
self._handshake_start_time = None
self._in_handshake = True
# (b'', 1) is a special value in _process_write_backlog() to do
# the SSL handshake
self._write_backlog.append((b'', 1))
self._process_write_backlog()
def _on_handshake_complete(self, handshake_exc):
self._in_handshake = False
sslobj = self._sslpipe.ssl_object
try:
if handshake_exc is not None:
raise handshake_exc
peercert = sslobj.getpeercert()
if not hasattr(self._sslcontext, 'check_hostname'):
# Verify hostname if requested, Python 3.4+ uses check_hostname
# and checks the hostname in do_handshake()
if (self._server_hostname
and self._sslcontext.verify_mode != ssl.CERT_NONE):
ssl.match_hostname(peercert, self._server_hostname)
except BaseException as exc:
if self._loop.get_debug():
if isinstance(exc, ssl.CertificateError):
logger.warning("%r: SSL handshake failed "
"on verifying the certificate",
self, exc_info=True)
else:
logger.warning("%r: SSL handshake failed",
self, exc_info=True)
self._transport.close()
if isinstance(exc, Exception):
self._wakeup_waiter(exc)
return
else:
raise
if self._loop.get_debug():
dt = self._loop.time() - self._handshake_start_time
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
# Add extra info that becomes available after handshake.
self._extra.update(peercert=peercert,
cipher=sslobj.cipher(),
compression=sslobj.compression(),
ssl_object=sslobj,
)
if self._call_connection_made:
self._app_protocol.connection_made(self._app_transport)
self._wakeup_waiter()
self._session_established = True
# In case transport.write() was already called. Don't call
# immediately _process_write_backlog(), but schedule it:
# _on_handshake_complete() can be called indirectly from
# _process_write_backlog(), and _process_write_backlog() is not
# reentrant.
self._loop.call_soon(self._process_write_backlog)
def _process_write_backlog(self):
# Try to make progress on the write backlog.
if self._transport is None or self._sslpipe is None:
return
try:
for i in range(len(self._write_backlog)):
data, offset = self._write_backlog[0]
if data:
ssldata, offset = self._sslpipe.feed_appdata(data, offset)
elif offset:
ssldata = self._sslpipe.do_handshake(
self._on_handshake_complete)
offset = 1
else:
ssldata = self._sslpipe.shutdown(self._finalize)
offset = 1
for chunk in ssldata:
self._transport.write(chunk)
if offset < len(data):
self._write_backlog[0] = (data, offset)
# A short write means that a write is blocked on a read
# We need to enable reading if it is paused!
assert self._sslpipe.need_ssldata
if self._transport._paused:
self._transport.resume_reading()
break
# An entire chunk from the backlog was processed. We can
# delete it and reduce the outstanding buffer size.
del self._write_backlog[0]
self._write_buffer_size -= len(data)
except BaseException as exc:
if self._in_handshake:
# BaseExceptions will be re-raised in _on_handshake_complete.
self._on_handshake_complete(exc)
else:
self._fatal_error(exc, 'Fatal error on SSL transport')
if not isinstance(exc, Exception):
# BaseException
raise
def _fatal_error(self, exc, message='Fatal error on transport'):
# Should be called from exception handler only.
if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self._transport,
'protocol': self,
})
if self._transport:
self._transport._force_close(exc)
def _finalize(self):
self._sslpipe = None
if self._transport is not None:
self._transport.close()
def _abort(self):
try:
if self._transport is not None:
self._transport.abort()
finally:
self._finalize()
| 26,005 | 702 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/events.py | """Event loop and event loop policy."""
__all__ = ['AbstractEventLoopPolicy',
'AbstractEventLoop', 'AbstractServer',
'Handle', 'TimerHandle',
'get_event_loop_policy', 'set_event_loop_policy',
'get_event_loop', 'set_event_loop', 'new_event_loop',
'get_child_watcher', 'set_child_watcher',
'_set_running_loop', '_get_running_loop',
]
import functools
import inspect
import os
import reprlib
import socket
import subprocess
import sys
import threading
import traceback
from . import compat
from . import constants
def _get_function_source(func):
if compat.PY34:
func = inspect.unwrap(func)
elif hasattr(func, '__wrapped__'):
func = func.__wrapped__
if inspect.isfunction(func):
code = func.__code__
return (code.co_filename, code.co_firstlineno)
if isinstance(func, functools.partial):
return _get_function_source(func.func)
if compat.PY34 and isinstance(func, functools.partialmethod):
return _get_function_source(func.func)
return None
def _format_args_and_kwargs(args, kwargs):
"""Format function arguments and keyword arguments.
Special case for a single parameter: ('hello',) is formatted as ('hello').
"""
# use reprlib to limit the length of the output
items = []
if args:
items.extend(reprlib.repr(arg) for arg in args)
if kwargs:
items.extend('{}={}'.format(k, reprlib.repr(v))
for k, v in kwargs.items())
return '(' + ', '.join(items) + ')'
def _format_callback(func, args, kwargs, suffix=''):
if isinstance(func, functools.partial):
suffix = _format_args_and_kwargs(args, kwargs) + suffix
return _format_callback(func.func, func.args, func.keywords, suffix)
if hasattr(func, '__qualname__') and func.__qualname__:
func_repr = func.__qualname__
elif hasattr(func, '__name__') and func.__name__:
func_repr = func.__name__
else:
func_repr = repr(func)
func_repr += _format_args_and_kwargs(args, kwargs)
if suffix:
func_repr += suffix
return func_repr
def _format_callback_source(func, args):
func_repr = _format_callback(func, args, None)
source = _get_function_source(func)
if source:
func_repr += ' at %s:%s' % source
return func_repr
def extract_stack(f=None, limit=None):
"""Replacement for traceback.extract_stack() that only does the
necessary work for asyncio debug mode.
"""
if f is None:
f = sys._getframe().f_back
if limit is None:
# Limit the amount of work to a reasonable amount, as extract_stack()
# can be called for each coroutine and future in debug mode.
limit = constants.DEBUG_STACK_DEPTH
stack = traceback.StackSummary.extract(traceback.walk_stack(f),
limit=limit,
lookup_lines=False)
stack.reverse()
return stack
class Handle:
"""Object returned by callback registration methods."""
__slots__ = ('_callback', '_args', '_cancelled', '_loop',
'_source_traceback', '_repr', '__weakref__')
def __init__(self, callback, args, loop):
self._loop = loop
self._callback = callback
self._args = args
self._cancelled = False
self._repr = None
if self._loop.get_debug():
self._source_traceback = extract_stack(sys._getframe(1))
else:
self._source_traceback = None
def _repr_info(self):
info = [self.__class__.__name__]
if self._cancelled:
info.append('cancelled')
if self._callback is not None:
info.append(_format_callback_source(self._callback, self._args))
if self._source_traceback:
frame = self._source_traceback[-1]
info.append('created at %s:%s' % (frame[0], frame[1]))
return info
def __repr__(self):
if self._repr is not None:
return self._repr
info = self._repr_info()
return '<%s>' % ' '.join(info)
def cancel(self):
if not self._cancelled:
self._cancelled = True
if self._loop.get_debug():
# Keep a representation in debug mode to keep callback and
# parameters. For example, to log the warning
# "Executing <Handle...> took 2.5 second"
self._repr = repr(self)
self._callback = None
self._args = None
def _run(self):
try:
self._callback(*self._args)
except Exception as exc:
cb = _format_callback_source(self._callback, self._args)
msg = 'Exception in callback {}'.format(cb)
context = {
'message': msg,
'exception': exc,
'handle': self,
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
self = None # Needed to break cycles when an exception occurs.
class TimerHandle(Handle):
"""Object returned by timed callback registration methods."""
__slots__ = ['_scheduled', '_when']
def __init__(self, when, callback, args, loop):
assert when is not None
super().__init__(callback, args, loop)
if self._source_traceback:
del self._source_traceback[-1]
self._when = when
self._scheduled = False
def _repr_info(self):
info = super()._repr_info()
pos = 2 if self._cancelled else 1
info.insert(pos, 'when=%s' % self._when)
return info
def __hash__(self):
return hash(self._when)
def __lt__(self, other):
return self._when < other._when
def __le__(self, other):
if self._when < other._when:
return True
return self.__eq__(other)
def __gt__(self, other):
return self._when > other._when
def __ge__(self, other):
if self._when > other._when:
return True
return self.__eq__(other)
def __eq__(self, other):
if isinstance(other, TimerHandle):
return (self._when == other._when and
self._callback == other._callback and
self._args == other._args and
self._cancelled == other._cancelled)
return NotImplemented
def __ne__(self, other):
equal = self.__eq__(other)
return NotImplemented if equal is NotImplemented else not equal
def cancel(self):
if not self._cancelled:
self._loop._timer_handle_cancelled(self)
super().cancel()
class AbstractServer:
"""Abstract server returned by create_server()."""
def close(self):
"""Stop serving. This leaves existing connections open."""
return NotImplemented
def wait_closed(self):
"""Coroutine to wait until service is closed."""
return NotImplemented
class AbstractEventLoop:
"""Abstract event loop."""
# Running and stopping the event loop.
def run_forever(self):
"""Run the event loop until stop() is called."""
raise NotImplementedError
def run_until_complete(self, future):
"""Run the event loop until a Future is done.
Return the Future's result, or raise its exception.
"""
raise NotImplementedError
def stop(self):
"""Stop the event loop as soon as reasonable.
Exactly how soon that is may depend on the implementation, but
no more I/O callbacks should be scheduled.
"""
raise NotImplementedError
def is_running(self):
"""Return whether the event loop is currently running."""
raise NotImplementedError
def is_closed(self):
"""Returns True if the event loop was closed."""
raise NotImplementedError
def close(self):
"""Close the loop.
The loop should not be running.
This is idempotent and irreversible.
No other methods should be called after this one.
"""
raise NotImplementedError
def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators."""
raise NotImplementedError
# Methods scheduling callbacks. All these return Handles.
def _timer_handle_cancelled(self, handle):
"""Notification that a TimerHandle has been cancelled."""
raise NotImplementedError
def call_soon(self, callback, *args):
return self.call_later(0, callback, *args)
def call_later(self, delay, callback, *args):
raise NotImplementedError
def call_at(self, when, callback, *args):
raise NotImplementedError
def time(self):
raise NotImplementedError
def create_future(self):
raise NotImplementedError
# Method scheduling a coroutine object: create a task.
def create_task(self, coro):
raise NotImplementedError
# Methods for interacting with threads.
def call_soon_threadsafe(self, callback, *args):
raise NotImplementedError
def run_in_executor(self, executor, func, *args):
raise NotImplementedError
def set_default_executor(self, executor):
raise NotImplementedError
# Network I/O methods returning Futures.
def getaddrinfo(self, host, port, *, family=0, type=0, proto=0, flags=0):
raise NotImplementedError
def getnameinfo(self, sockaddr, flags=0):
raise NotImplementedError
def create_connection(self, protocol_factory, host=None, port=None, *,
ssl=None, family=0, proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None):
raise NotImplementedError
def create_server(self, protocol_factory, host=None, port=None, *,
family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
sock=None, backlog=100, ssl=None, reuse_address=None,
reuse_port=None):
"""A coroutine which creates a TCP server bound to host and port.
The return value is a Server object which can be used to stop
the service.
If host is an empty string or None all interfaces are assumed
and a list of multiple sockets will be returned (most likely
one for IPv4 and another one for IPv6). The host parameter can also be a
sequence (e.g. list) of hosts to bind to.
family can be set to either AF_INET or AF_INET6 to force the
socket to use IPv4 or IPv6. If not set it will be determined
from host (defaults to AF_UNSPEC).
flags is a bitmask for getaddrinfo().
sock can optionally be specified in order to use a preexisting
socket object.
backlog is the maximum number of queued connections passed to
listen() (defaults to 100).
ssl can be set to an SSLContext to enable SSL over the
accepted connections.
reuse_address tells the kernel to reuse a local socket in
TIME_WAIT state, without waiting for its natural timeout to
expire. If not specified will automatically be set to True on
UNIX.
reuse_port tells the kernel to allow this endpoint to be bound to
the same port as other existing endpoints are bound to, so long as
they all set this flag when being created. This option is not
supported on Windows.
"""
raise NotImplementedError
def create_unix_connection(self, protocol_factory, path, *,
ssl=None, sock=None,
server_hostname=None):
raise NotImplementedError
def create_unix_server(self, protocol_factory, path, *,
sock=None, backlog=100, ssl=None):
"""A coroutine which creates a UNIX Domain Socket server.
The return value is a Server object, which can be used to stop
the service.
path is a str, representing a file systsem path to bind the
server socket to.
sock can optionally be specified in order to use a preexisting
socket object.
backlog is the maximum number of queued connections passed to
listen() (defaults to 100).
ssl can be set to an SSLContext to enable SSL over the
accepted connections.
"""
raise NotImplementedError
def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=None, reuse_port=None,
allow_broadcast=None, sock=None):
"""A coroutine which creates a datagram endpoint.
This method will try to establish the endpoint in the background.
When successful, the coroutine returns a (transport, protocol) pair.
protocol_factory must be a callable returning a protocol instance.
socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_DGRAM.
reuse_address tells the kernel to reuse a local socket in
TIME_WAIT state, without waiting for its natural timeout to
expire. If not specified it will automatically be set to True on
UNIX.
reuse_port tells the kernel to allow this endpoint to be bound to
the same port as other existing endpoints are bound to, so long as
they all set this flag when being created. This option is not
supported on Windows and some UNIX's. If the
:py:data:`~socket.SO_REUSEPORT` constant is not defined then this
capability is unsupported.
allow_broadcast tells the kernel to allow this endpoint to send
messages to the broadcast address.
sock can optionally be specified in order to use a preexisting
socket object.
"""
raise NotImplementedError
# Pipes and subprocesses.
def connect_read_pipe(self, protocol_factory, pipe):
"""Register read pipe in event loop. Set the pipe to non-blocking mode.
protocol_factory should instantiate object with Protocol interface.
pipe is a file-like object.
Return pair (transport, protocol), where transport supports the
ReadTransport interface."""
# The reason to accept file-like object instead of just file descriptor
# is: we need to own pipe and close it at transport finishing
# Can got complicated errors if pass f.fileno(),
# close fd in pipe transport then close f and vise versa.
raise NotImplementedError
def connect_write_pipe(self, protocol_factory, pipe):
"""Register write pipe in event loop.
protocol_factory should instantiate object with BaseProtocol interface.
Pipe is file-like object already switched to nonblocking.
Return pair (transport, protocol), where transport support
WriteTransport interface."""
# The reason to accept file-like object instead of just file descriptor
# is: we need to own pipe and close it at transport finishing
# Can got complicated errors if pass f.fileno(),
# close fd in pipe transport then close f and vise versa.
raise NotImplementedError
def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs):
raise NotImplementedError
def subprocess_exec(self, protocol_factory, *args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs):
raise NotImplementedError
# Ready-based callback registration methods.
# The add_*() methods return None.
# The remove_*() methods return True if something was removed,
# False if there was nothing to delete.
def add_reader(self, fd, callback, *args):
raise NotImplementedError
def remove_reader(self, fd):
raise NotImplementedError
def add_writer(self, fd, callback, *args):
raise NotImplementedError
def remove_writer(self, fd):
raise NotImplementedError
# Completion based I/O methods returning Futures.
def sock_recv(self, sock, nbytes):
raise NotImplementedError
def sock_sendall(self, sock, data):
raise NotImplementedError
def sock_connect(self, sock, address):
raise NotImplementedError
def sock_accept(self, sock):
raise NotImplementedError
# Signal handling.
def add_signal_handler(self, sig, callback, *args):
raise NotImplementedError
def remove_signal_handler(self, sig):
raise NotImplementedError
# Task factory.
def set_task_factory(self, factory):
raise NotImplementedError
def get_task_factory(self):
raise NotImplementedError
# Error handlers.
def get_exception_handler(self):
raise NotImplementedError
def set_exception_handler(self, handler):
raise NotImplementedError
def default_exception_handler(self, context):
raise NotImplementedError
def call_exception_handler(self, context):
raise NotImplementedError
# Debug flag management.
def get_debug(self):
raise NotImplementedError
def set_debug(self, enabled):
raise NotImplementedError
class AbstractEventLoopPolicy:
"""Abstract policy for accessing the event loop."""
def get_event_loop(self):
"""Get the event loop for the current context.
Returns an event loop object implementing the BaseEventLoop interface,
or raises an exception in case no event loop has been set for the
current context and the current policy does not specify to create one.
It should never return None."""
raise NotImplementedError
def set_event_loop(self, loop):
"""Set the event loop for the current context to loop."""
raise NotImplementedError
def new_event_loop(self):
"""Create and return a new event loop object according to this
policy's rules. If there's need to set this loop as the event loop for
the current context, set_event_loop must be called explicitly."""
raise NotImplementedError
# Child processes handling (Unix only).
def get_child_watcher(self):
"Get the watcher for child processes."
raise NotImplementedError
def set_child_watcher(self, watcher):
"""Set the watcher for child processes."""
raise NotImplementedError
class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
"""Default policy implementation for accessing the event loop.
In this policy, each thread has its own event loop. However, we
only automatically create an event loop by default for the main
thread; other threads by default have no event loop.
Other policies may have different rules (e.g. a single global
event loop, or automatically creating an event loop per thread, or
using some other notion of context to which an event loop is
associated).
"""
_loop_factory = None
class _Local(threading.local):
_loop = None
_set_called = False
def __init__(self):
self._local = self._Local()
def get_event_loop(self):
"""Get the event loop.
This may be None or an instance of EventLoop.
"""
if (self._local._loop is None and
not self._local._set_called and
isinstance(threading.current_thread(), threading._MainThread)):
self.set_event_loop(self.new_event_loop())
if self._local._loop is None:
raise RuntimeError('There is no current event loop in thread %r.'
% threading.current_thread().name)
return self._local._loop
def set_event_loop(self, loop):
"""Set the event loop."""
self._local._set_called = True
assert loop is None or isinstance(loop, AbstractEventLoop)
self._local._loop = loop
def new_event_loop(self):
"""Create a new event loop.
You must call set_event_loop() to make this the current event
loop.
"""
return self._loop_factory()
# Event loop policy. The policy itself is always global, even if the
# policy's rules say that there is an event loop per thread (or other
# notion of context). The default policy is installed by the first
# call to get_event_loop_policy().
_event_loop_policy = None
# Lock for protecting the on-the-fly creation of the event loop policy.
_lock = threading.Lock()
# A TLS for the running event loop, used by _get_running_loop.
class _RunningLoop(threading.local):
loop_pid = (None, None)
_running_loop = _RunningLoop()
def _get_running_loop():
"""Return the running event loop or None.
This is a low-level function intended to be used by event loops.
This function is thread-specific.
"""
running_loop, pid = _running_loop.loop_pid
if running_loop is not None and pid == os.getpid():
return running_loop
def _set_running_loop(loop):
"""Set the running event loop.
This is a low-level function intended to be used by event loops.
This function is thread-specific.
"""
_running_loop.loop_pid = (loop, os.getpid())
def _init_event_loop_policy():
global _event_loop_policy
with _lock:
if _event_loop_policy is None: # pragma: no branch
from .unix_events import DefaultEventLoopPolicy
_event_loop_policy = DefaultEventLoopPolicy()
def get_event_loop_policy():
"""Get the current event loop policy."""
if _event_loop_policy is None:
_init_event_loop_policy()
return _event_loop_policy
def set_event_loop_policy(policy):
"""Set the current event loop policy.
If policy is None, the default policy is restored."""
global _event_loop_policy
assert policy is None or isinstance(policy, AbstractEventLoopPolicy)
_event_loop_policy = policy
def get_event_loop():
"""Return an asyncio event loop.
When called from a coroutine or a callback (e.g. scheduled with call_soon
or similar API), this function will always return the running event loop.
If there is no running event loop set, the function will return
the result of `get_event_loop_policy().get_event_loop()` call.
"""
current_loop = _get_running_loop()
if current_loop is not None:
return current_loop
return get_event_loop_policy().get_event_loop()
def set_event_loop(loop):
"""Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
get_event_loop_policy().set_event_loop(loop)
def new_event_loop():
"""Equivalent to calling get_event_loop_policy().new_event_loop()."""
return get_event_loop_policy().new_event_loop()
def get_child_watcher():
"""Equivalent to calling get_event_loop_policy().get_child_watcher()."""
return get_event_loop_policy().get_child_watcher()
def set_child_watcher(watcher):
"""Equivalent to calling
get_event_loop_policy().set_child_watcher(watcher)."""
return get_event_loop_policy().set_child_watcher(watcher)
| 23,522 | 716 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/test_utils.py | """Utilities shared by tests."""
import collections
import contextlib
import io
import logging
import os
import re
import socket
import socketserver
import sys
import tempfile
import threading
import time
import unittest
import weakref
from unittest import mock
from http.server import HTTPServer
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from . import compat
from . import events
from . import futures
from . import selectors
from . import tasks
from .coroutines import coroutine
from .log import logger
from test import support
if sys.platform == 'win32': # pragma: no cover
from .windows_utils import socketpair
else:
from socket import socketpair # pragma: no cover
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(os.__file__), 'test', filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
def dummy_ssl_context():
if ssl is None:
return None
else:
return ssl.SSLContext(ssl.PROTOCOL_SSLv23)
def run_briefly(loop):
@coroutine
def once():
pass
gen = once()
t = loop.create_task(gen)
# Don't log a warning if the task is not done after run_until_complete().
# It occurs if the loop is stopped or if a task raises a BaseException.
t._log_destroy_pending = False
try:
loop.run_until_complete(t)
finally:
gen.close()
def run_until(loop, pred, timeout=30):
deadline = time.time() + timeout
while not pred():
if timeout is not None:
timeout = deadline - time.time()
if timeout <= 0:
raise futures.TimeoutError()
loop.run_until_complete(tasks.sleep(0.001, loop=loop))
def run_once(loop):
"""Legacy API to run once through the event loop.
This is the recommended pattern for test code. It will poll the
selector once and run all callbacks scheduled in response to I/O
events.
"""
loop.call_soon(loop.stop)
loop.run_forever()
class SilentWSGIRequestHandler(WSGIRequestHandler):
def get_stderr(self):
return io.StringIO()
def log_message(self, format, *args):
pass
class SilentWSGIServer(WSGIServer):
request_timeout = 2
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
return request, client_addr
def handle_error(self, request, client_address):
pass
class SSLWSGIServerMixin:
def finish_request(self, request, client_address):
# The relative location of our test directory (which
# contains the ssl key and certificate files) differs
# between the stdlib and stand-alone asyncio.
# Prefer our own if we can find it.
keyfile = ONLYKEY
certfile = ONLYCERT
context = ssl.SSLContext()
context.load_cert_chain(certfile, keyfile)
ssock = context.wrap_socket(request, server_side=True)
try:
self.RequestHandlerClass(ssock, client_address, self)
ssock.close()
except OSError:
# maybe socket has been closed by peer
pass
class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
pass
def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
def app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
return [b'Test message']
# Run the test WSGI server in a separate thread in order not to
# interfere with event handling in the main thread
server_class = server_ssl_cls if use_ssl else server_cls
httpd = server_class(address, SilentWSGIRequestHandler)
httpd.set_app(app)
httpd.address = httpd.server_address
server_thread = threading.Thread(
target=lambda: httpd.serve_forever(poll_interval=0.05))
server_thread.start()
try:
yield httpd
finally:
httpd.shutdown()
httpd.server_close()
server_thread.join()
if hasattr(socket, 'AF_UNIX'):
class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
def server_bind(self):
socketserver.UnixStreamServer.server_bind(self)
self.server_name = '127.0.0.1'
self.server_port = 80
class UnixWSGIServer(UnixHTTPServer, WSGIServer):
request_timeout = 2
def server_bind(self):
UnixHTTPServer.server_bind(self)
self.setup_environ()
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
# Code in the stdlib expects that get_request
# will return a socket and a tuple (host, port).
# However, this isn't true for UNIX sockets,
# as the second return value will be a path;
# hence we return some fake data sufficient
# to get the tests going
return request, ('127.0.0.1', '')
class SilentUnixWSGIServer(UnixWSGIServer):
def handle_error(self, request, client_address):
pass
class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
pass
def gen_unix_socket_path():
with tempfile.NamedTemporaryFile() as file:
return file.name
@contextlib.contextmanager
def unix_socket_path():
path = gen_unix_socket_path()
try:
yield path
finally:
try:
os.unlink(path)
except OSError:
pass
@contextlib.contextmanager
def run_test_unix_server(*, use_ssl=False):
with unix_socket_path() as path:
yield from _run_test_server(address=path, use_ssl=use_ssl,
server_cls=SilentUnixWSGIServer,
server_ssl_cls=UnixSSLWSGIServer)
@contextlib.contextmanager
def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
server_cls=SilentWSGIServer,
server_ssl_cls=SSLWSGIServer)
def make_test_protocol(base):
dct = {}
for name in dir(base):
if name.startswith('__') and name.endswith('__'):
# skip magic names
continue
dct[name] = MockCallback(return_value=None)
return type('TestProtocol', (base,) + base.__bases__, dct)()
class TestSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout):
return []
def get_map(self):
return self.keys
class TestLoop(base_events.BaseEventLoop):
"""Loop for unittests.
It manages self time directly.
If something scheduled to be executed later then
on next loop iteration after all ready handlers done
generator passed to __init__ is calling.
Generator should be like this:
def gen():
...
when = yield ...
... = yield time_advance
Value returned by yield is absolute time of next scheduled handler.
Value passed to yield is time advance to move loop's time forward.
"""
def __init__(self, gen=None):
super().__init__()
if gen is None:
def gen():
yield
self._check_on_close = False
else:
self._check_on_close = True
self._gen = gen()
next(self._gen)
self._time = 0
self._clock_resolution = 1e-9
self._timers = []
self._selector = TestSelector()
self.readers = {}
self.writers = {}
self.reset_counters()
self._transports = weakref.WeakValueDictionary()
def time(self):
return self._time
def advance_time(self, advance):
"""Move test time forward."""
if advance:
self._time += advance
def close(self):
super().close()
if self._check_on_close:
try:
self._gen.send(0)
except StopIteration:
pass
else: # pragma: no cover
raise AssertionError("Time generator is not finished")
def _add_reader(self, fd, callback, *args):
self.readers[fd] = events.Handle(callback, args, self)
def _remove_reader(self, fd):
self.remove_reader_count[fd] += 1
if fd in self.readers:
del self.readers[fd]
return True
else:
return False
def assert_reader(self, fd, callback, *args):
if fd not in self.readers:
raise AssertionError(f'fd {fd} is not registered')
handle = self.readers[fd]
if handle._callback != callback:
raise AssertionError(
f'unexpected callback: {handle._callback} != {callback}')
if handle._args != args:
raise AssertionError(
f'unexpected callback args: {handle._args} != {args}')
def assert_no_reader(self, fd):
if fd in self.readers:
raise AssertionError(f'fd {fd} is registered')
def _add_writer(self, fd, callback, *args):
self.writers[fd] = events.Handle(callback, args, self)
def _remove_writer(self, fd):
self.remove_writer_count[fd] += 1
if fd in self.writers:
del self.writers[fd]
return True
else:
return False
def assert_writer(self, fd, callback, *args):
assert fd in self.writers, 'fd {} is not registered'.format(fd)
handle = self.writers[fd]
assert handle._callback == callback, '{!r} != {!r}'.format(
handle._callback, callback)
assert handle._args == args, '{!r} != {!r}'.format(
handle._args, args)
def _ensure_fd_no_transport(self, fd):
try:
transport = self._transports[fd]
except KeyError:
pass
else:
raise RuntimeError(
'File descriptor {!r} is used by transport {!r}'.format(
fd, transport))
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._ensure_fd_no_transport(fd)
return self._add_reader(fd, callback, *args)
def remove_reader(self, fd):
"""Remove a reader callback."""
self._ensure_fd_no_transport(fd)
return self._remove_reader(fd)
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._ensure_fd_no_transport(fd)
return self._add_writer(fd, callback, *args)
def remove_writer(self, fd):
"""Remove a writer callback."""
self._ensure_fd_no_transport(fd)
return self._remove_writer(fd)
def reset_counters(self):
self.remove_reader_count = collections.defaultdict(int)
self.remove_writer_count = collections.defaultdict(int)
def _run_once(self):
super()._run_once()
for when in self._timers:
advance = self._gen.send(when)
self.advance_time(advance)
self._timers = []
def call_at(self, when, callback, *args):
self._timers.append(when)
return super().call_at(when, callback, *args)
def _process_events(self, event_list):
return
def _write_to_self(self):
pass
def MockCallback(**kwargs):
return mock.Mock(spec=['__call__'], **kwargs)
class MockPattern(str):
"""A regex based str with a fuzzy __eq__.
Use this helper with 'mock.assert_called_with', or anywhere
where a regex comparison between strings is needed.
For instance:
mock_call.assert_called_with(MockPattern('spam.*ham'))
"""
def __eq__(self, other):
return bool(re.search(str(self), other, re.S))
def get_function_source(func):
source = events._get_function_source(func)
if source is None:
raise ValueError("unable to get the source of %r" % (func,))
return source
class TestCase(unittest.TestCase):
@staticmethod
def close_loop(loop):
executor = loop._default_executor
if executor is not None:
executor.shutdown(wait=True)
loop.close()
def set_event_loop(self, loop, *, cleanup=True):
assert loop is not None
# ensure that the event loop is passed explicitly in asyncio
events.set_event_loop(None)
if cleanup:
self.addCleanup(self.close_loop, loop)
def new_test_loop(self, gen=None):
loop = TestLoop(gen)
self.set_event_loop(loop)
return loop
def unpatch_get_running_loop(self):
events._get_running_loop = self._get_running_loop
def setUp(self):
self._get_running_loop = events._get_running_loop
events._get_running_loop = lambda: None
self._thread_cleanup = support.threading_setup()
def tearDown(self):
self.unpatch_get_running_loop()
events.set_event_loop(None)
# Detect CPython bug #23353: ensure that yield/yield-from is not used
# in an except block of a generator
self.assertEqual(sys.exc_info(), (None, None, None))
self.doCleanups()
support.threading_cleanup(*self._thread_cleanup)
support.reap_children()
if not compat.PY34:
# Python 3.3 compatibility
def subTest(self, *args, **kwargs):
class EmptyCM:
def __enter__(self):
pass
def __exit__(self, *exc):
pass
return EmptyCM()
@contextlib.contextmanager
def disable_logger():
"""Context manager to disable asyncio logger.
For example, it can be used to ignore warnings in debug mode.
"""
old_level = logger.level
try:
logger.setLevel(logging.CRITICAL+1)
yield
finally:
logger.setLevel(old_level)
def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM,
family=socket.AF_INET):
"""Create a mock of a non-blocking socket."""
sock = mock.MagicMock(socket.socket)
sock.proto = proto
sock.type = type
sock.family = family
sock.gettimeout.return_value = 0.0
return sock
def force_legacy_ssl_support():
return mock.patch('asyncio.sslproto._is_sslproto_available',
return_value=False)
| 15,091 | 538 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/proactor_events.py | """Event loop using a proactor and related classes.
A proactor is a "notify-on-completion" multiplexer. Currently a
proactor is only implemented on Windows with IOCP.
"""
__all__ = ['BaseProactorEventLoop']
import socket
import warnings
from . import base_events
from . import compat
from . import constants
from . import futures
from . import sslproto
from . import transports
from .log import logger
class _ProactorBasePipeTransport(transports._FlowControlMixin,
transports.BaseTransport):
"""Base class for pipe and socket transports."""
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
super().__init__(extra, loop)
self._set_extra(sock)
self._sock = sock
self._protocol = protocol
self._server = server
self._buffer = None # None or bytearray.
self._read_fut = None
self._write_fut = None
self._pending_write = 0
self._conn_lost = 0
self._closing = False # Set when close() called.
self._eof_written = False
if self._server is not None:
self._server._attach()
self._loop.call_soon(self._protocol.connection_made, self)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(futures._set_result_unless_cancelled,
waiter, None)
def __repr__(self):
info = [self.__class__.__name__]
if self._sock is None:
info.append('closed')
elif self._closing:
info.append('closing')
if self._sock is not None:
info.append('fd=%s' % self._sock.fileno())
if self._read_fut is not None:
info.append('read=%s' % self._read_fut)
if self._write_fut is not None:
info.append("write=%r" % self._write_fut)
if self._buffer:
bufsize = len(self._buffer)
info.append('write_bufsize=%s' % bufsize)
if self._eof_written:
info.append('EOF written')
return '<%s>' % ' '.join(info)
def _set_extra(self, sock):
self._extra['pipe'] = sock
def set_protocol(self, protocol):
self._protocol = protocol
def get_protocol(self):
return self._protocol
def is_closing(self):
return self._closing
def close(self):
if self._closing:
return
self._closing = True
self._conn_lost += 1
if not self._buffer and self._write_fut is None:
self._loop.call_soon(self._call_connection_lost, None)
if self._read_fut is not None:
self._read_fut.cancel()
self._read_fut = None
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if compat.PY34:
def __del__(self):
if self._sock is not None:
warnings.warn("unclosed transport %r" % self, ResourceWarning,
source=self)
self.close()
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
self._force_close(exc)
def _force_close(self, exc):
if self._closing:
return
self._closing = True
self._conn_lost += 1
if self._write_fut:
self._write_fut.cancel()
self._write_fut = None
if self._read_fut:
self._read_fut.cancel()
self._read_fut = None
self._pending_write = 0
self._buffer = None
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
# XXX If there is a pending overlapped read on the other
# end then it may fail with ERROR_NETNAME_DELETED if we
# just close our end. First calling shutdown() seems to
# cure it, but maybe using DisconnectEx() would be better.
if hasattr(self._sock, 'shutdown'):
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
self._sock = None
server = self._server
if server is not None:
server._detach()
self._server = None
def get_write_buffer_size(self):
size = self._pending_write
if self._buffer is not None:
size += len(self._buffer)
return size
class _ProactorReadPipeTransport(_ProactorBasePipeTransport,
transports.ReadTransport):
"""Transport for read pipes."""
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
super().__init__(loop, sock, protocol, waiter, extra, server)
self._paused = False
self._reschedule_on_resume = False
self._loop.call_soon(self._loop_reading)
def pause_reading(self):
if self._closing or self._paused:
return
self._paused = True
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
if self._closing or not self._paused:
return
self._paused = False
if self._reschedule_on_resume:
self._loop.call_soon(self._loop_reading, self._read_fut)
self._reschedule_on_resume = False
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _loop_reading(self, fut=None):
if self._paused:
self._reschedule_on_resume = True
return
data = None
try:
if fut is not None:
assert self._read_fut is fut or (self._read_fut is None and
self._closing)
self._read_fut = None
data = fut.result() # deliver data later in "finally" clause
if self._closing:
# since close() has been called we ignore any read data
data = None
return
if data == b'':
# we got end-of-file so no need to reschedule a new read
return
# reschedule a new read
self._read_fut = self._loop._proactor.recv(self._sock, 4096)
except ConnectionAbortedError as exc:
if not self._closing:
self._fatal_error(exc, 'Fatal read error on pipe transport')
elif self._loop.get_debug():
logger.debug("Read error on pipe transport while closing",
exc_info=True)
except ConnectionResetError as exc:
self._force_close(exc)
except OSError as exc:
self._fatal_error(exc, 'Fatal read error on pipe transport')
except futures.CancelledError:
if not self._closing:
raise
else:
self._read_fut.add_done_callback(self._loop_reading)
finally:
if data:
self._protocol.data_received(data)
elif data is not None:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
keep_open = self._protocol.eof_received()
if not keep_open:
self.close()
class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
transports.WriteTransport):
"""Transport for write pipes."""
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
msg = ("data argument must be a bytes-like object, not '%s'" %
type(data).__name__)
raise TypeError(msg)
if self._eof_written:
raise RuntimeError('write_eof() already called')
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
# Observable states:
# 1. IDLE: _write_fut and _buffer both None
# 2. WRITING: _write_fut set; _buffer None
# 3. BACKED UP: _write_fut set; _buffer a bytearray
# We always copy the data, so the caller can't modify it
# while we're still waiting for the I/O to happen.
if self._write_fut is None: # IDLE -> WRITING
assert self._buffer is None
# Pass a copy, except if it's already immutable.
self._loop_writing(data=bytes(data))
elif not self._buffer: # WRITING -> BACKED UP
# Make a mutable copy which we can extend.
self._buffer = bytearray(data)
self._maybe_pause_protocol()
else: # BACKED UP
# Append to buffer (also copies).
self._buffer.extend(data)
self._maybe_pause_protocol()
def _loop_writing(self, f=None, data=None):
try:
assert f is self._write_fut
self._write_fut = None
self._pending_write = 0
if f:
f.result()
if data is None:
data = self._buffer
self._buffer = None
if not data:
if self._closing:
self._loop.call_soon(self._call_connection_lost, None)
if self._eof_written:
self._sock.shutdown(socket.SHUT_WR)
# Now that we've reduced the buffer size, tell the
# protocol to resume writing if it was paused. Note that
# we do this last since the callback is called immediately
# and it may add more data to the buffer (even causing the
# protocol to be paused again).
self._maybe_resume_protocol()
else:
self._write_fut = self._loop._proactor.send(self._sock, data)
if not self._write_fut.done():
assert self._pending_write == 0
self._pending_write = len(data)
self._write_fut.add_done_callback(self._loop_writing)
self._maybe_pause_protocol()
else:
self._write_fut.add_done_callback(self._loop_writing)
except ConnectionResetError as exc:
self._force_close(exc)
except OSError as exc:
self._fatal_error(exc, 'Fatal write error on pipe transport')
def can_write_eof(self):
return True
def write_eof(self):
self.close()
def abort(self):
self._force_close(None)
class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self._read_fut = self._loop._proactor.recv(self._sock, 16)
self._read_fut.add_done_callback(self._pipe_closed)
def _pipe_closed(self, fut):
if fut.cancelled():
# the transport has been closed
return
assert fut.result() == b''
if self._closing:
assert self._read_fut is None
return
assert fut is self._read_fut, (fut, self._read_fut)
self._read_fut = None
if self._write_fut is not None:
self._force_close(BrokenPipeError())
else:
self.close()
class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport,
_ProactorBaseWritePipeTransport,
transports.Transport):
"""Transport for duplex pipes."""
def can_write_eof(self):
return False
def write_eof(self):
raise NotImplementedError
class _ProactorSocketTransport(_ProactorReadPipeTransport,
_ProactorBaseWritePipeTransport,
transports.Transport):
"""Transport for connected sockets."""
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
super().__init__(loop, sock, protocol, waiter, extra, server)
base_events._set_nodelay(sock)
def _set_extra(self, sock):
self._extra['socket'] = sock
try:
self._extra['sockname'] = sock.getsockname()
except (socket.error, AttributeError):
if self._loop.get_debug():
logger.warning("getsockname() failed on %r",
sock, exc_info=True)
if 'peername' not in self._extra:
try:
self._extra['peername'] = sock.getpeername()
except (socket.error, AttributeError):
if self._loop.get_debug():
logger.warning("getpeername() failed on %r",
sock, exc_info=True)
def can_write_eof(self):
return True
def write_eof(self):
if self._closing or self._eof_written:
return
self._eof_written = True
if self._write_fut is None:
self._sock.shutdown(socket.SHUT_WR)
class BaseProactorEventLoop(base_events.BaseEventLoop):
def __init__(self, proactor):
super().__init__()
logger.debug('Using proactor: %s', proactor.__class__.__name__)
self._proactor = proactor
self._selector = proactor # convenient alias
self._self_reading_future = None
self._accept_futures = {} # socket file descriptor => Future
proactor.set_loop(self)
self._make_self_pipe()
def _make_socket_transport(self, sock, protocol, waiter=None,
extra=None, server=None):
return _ProactorSocketTransport(self, sock, protocol, waiter,
extra, server)
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None):
if not sslproto._is_sslproto_available():
raise NotImplementedError("Proactor event loop requires Python 3.5"
" or newer (ssl.MemoryBIO) to support "
"SSL")
ssl_protocol = sslproto.SSLProtocol(self, protocol, sslcontext, waiter,
server_side, server_hostname)
_ProactorSocketTransport(self, rawsock, ssl_protocol,
extra=extra, server=server)
return ssl_protocol._app_transport
def _make_duplex_pipe_transport(self, sock, protocol, waiter=None,
extra=None):
return _ProactorDuplexPipeTransport(self,
sock, protocol, waiter, extra)
def _make_read_pipe_transport(self, sock, protocol, waiter=None,
extra=None):
return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra)
def _make_write_pipe_transport(self, sock, protocol, waiter=None,
extra=None):
# We want connection_lost() to be called when other end closes
return _ProactorWritePipeTransport(self,
sock, protocol, waiter, extra)
def close(self):
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self.is_closed():
return
# Call these methods before closing the event loop (before calling
# BaseEventLoop.close), because they can schedule callbacks with
# call_soon(), which is forbidden when the event loop is closed.
self._stop_accept_futures()
self._close_self_pipe()
self._proactor.close()
self._proactor = None
self._selector = None
# Close the event loop
super().close()
def sock_recv(self, sock, n):
return self._proactor.recv(sock, n)
def sock_sendall(self, sock, data):
return self._proactor.send(sock, data)
def sock_connect(self, sock, address):
return self._proactor.connect(sock, address)
def sock_accept(self, sock):
return self._proactor.accept(sock)
def _socketpair(self):
raise NotImplementedError
def _close_self_pipe(self):
if self._self_reading_future is not None:
self._self_reading_future.cancel()
self._self_reading_future = None
self._ssock.close()
self._ssock = None
self._csock.close()
self._csock = None
self._internal_fds -= 1
def _make_self_pipe(self):
# A self-socket, really. :-)
self._ssock, self._csock = self._socketpair()
self._ssock.setblocking(False)
self._csock.setblocking(False)
self._internal_fds += 1
self.call_soon(self._loop_self_reading)
def _loop_self_reading(self, f=None):
try:
if f is not None:
f.result() # may raise
f = self._proactor.recv(self._ssock, 4096)
except futures.CancelledError:
# _close_self_pipe() has been called, stop waiting for data
return
except Exception as exc:
self.call_exception_handler({
'message': 'Error on reading from the event loop self pipe',
'exception': exc,
'loop': self,
})
else:
self._self_reading_future = f
f.add_done_callback(self._loop_self_reading)
def _write_to_self(self):
self._csock.send(b'\0')
def _start_serving(self, protocol_factory, sock,
sslcontext=None, server=None, backlog=100):
def loop(f=None):
try:
if f is not None:
conn, addr = f.result()
if self._debug:
logger.debug("%r got a new connection from %r: %r",
server, addr, conn)
protocol = protocol_factory()
if sslcontext is not None:
self._make_ssl_transport(
conn, protocol, sslcontext, server_side=True,
extra={'peername': addr}, server=server)
else:
self._make_socket_transport(
conn, protocol,
extra={'peername': addr}, server=server)
if self.is_closed():
return
f = self._proactor.accept(sock)
except OSError as exc:
if sock.fileno() != -1:
self.call_exception_handler({
'message': 'Accept failed on a socket',
'exception': exc,
'socket': sock,
})
sock.close()
elif self._debug:
logger.debug("Accept failed on socket %r",
sock, exc_info=True)
except futures.CancelledError:
sock.close()
else:
self._accept_futures[sock.fileno()] = f
f.add_done_callback(loop)
self.call_soon(loop)
def _process_events(self, event_list):
# Events are processed in the IocpProactor._poll() method
pass
def _stop_accept_futures(self):
for future in self._accept_futures.values():
future.cancel()
self._accept_futures.clear()
def _stop_serving(self, sock):
self._stop_accept_futures()
self._proactor._stop_serving(sock)
sock.close()
| 20,403 | 557 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/futures.py | """A Future class similar to the one in PEP 3148."""
__all__ = ['CancelledError', 'TimeoutError', 'InvalidStateError',
'Future', 'wrap_future', 'isfuture']
import concurrent.futures
import logging
import sys
import traceback
from . import base_futures
from . import compat
from . import events
CancelledError = base_futures.CancelledError
InvalidStateError = base_futures.InvalidStateError
TimeoutError = base_futures.TimeoutError
isfuture = base_futures.isfuture
_PENDING = base_futures._PENDING
_CANCELLED = base_futures._CANCELLED
_FINISHED = base_futures._FINISHED
STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging
class _TracebackLogger:
"""Helper to log a traceback upon destruction if not cleared.
This solves a nasty problem with Futures and Tasks that have an
exception set: if nobody asks for the exception, the exception is
never logged. This violates the Zen of Python: 'Errors should
never pass silently. Unless explicitly silenced.'
However, we don't want to log the exception as soon as
set_exception() is called: if the calling code is written
properly, it will get the exception and handle it properly. But
we *do* want to log it if result() or exception() was never called
-- otherwise developers waste a lot of time wondering why their
buggy code fails silently.
An earlier attempt added a __del__() method to the Future class
itself, but this backfired because the presence of __del__()
prevents garbage collection from breaking cycles. A way out of
this catch-22 is to avoid having a __del__() method on the Future
class itself, but instead to have a reference to a helper object
with a __del__() method that logs the traceback, where we ensure
that the helper object doesn't participate in cycles, and only the
Future has a reference to it.
The helper object is added when set_exception() is called. When
the Future is collected, and the helper is present, the helper
object is also collected, and its __del__() method will log the
traceback. When the Future's result() or exception() method is
called (and a helper object is present), it removes the helper
object, after calling its clear() method to prevent it from
logging.
One downside is that we do a fair amount of work to extract the
traceback from the exception, even when it is never logged. It
would seem cheaper to just store the exception object, but that
references the traceback, which references stack frames, which may
reference the Future, which references the _TracebackLogger, and
then the _TracebackLogger would be included in a cycle, which is
what we're trying to avoid! As an optimization, we don't
immediately format the exception; we only do the work when
activate() is called, which call is delayed until after all the
Future's callbacks have run. Since usually a Future has at least
one callback (typically set by 'yield from') and usually that
callback extracts the callback, thereby removing the need to
format the exception.
PS. I don't claim credit for this solution. I first heard of it
in a discussion about closing files when they are collected.
"""
__slots__ = ('loop', 'source_traceback', 'exc', 'tb')
def __init__(self, future, exc):
self.loop = future._loop
self.source_traceback = future._source_traceback
self.exc = exc
self.tb = None
def activate(self):
exc = self.exc
if exc is not None:
self.exc = None
self.tb = traceback.format_exception(exc.__class__, exc,
exc.__traceback__)
def clear(self):
self.exc = None
self.tb = None
def __del__(self):
if self.tb:
msg = 'Future/Task exception was never retrieved\n'
if self.source_traceback:
src = ''.join(traceback.format_list(self.source_traceback))
msg += 'Future/Task created at (most recent call last):\n'
msg += '%s\n' % src.rstrip()
msg += ''.join(self.tb).rstrip()
self.loop.call_exception_handler({'message': msg})
class Future:
"""This class is *almost* compatible with concurrent.futures.Future.
Differences:
- This class is not thread-safe.
- result() and exception() do not take a timeout argument and
raise an exception when the future isn't done yet.
- Callbacks registered with add_done_callback() are always called
via the event loop's call_soon().
- This class is not compatible with the wait() and as_completed()
methods in the concurrent.futures package.
(In Python 3.4 or later we may be able to unify the implementations.)
"""
# Class variables serving as defaults for instance variables.
_state = _PENDING
_result = None
_exception = None
_loop = None
_source_traceback = None
# This field is used for a dual purpose:
# - Its presence is a marker to declare that a class implements
# the Future protocol (i.e. is intended to be duck-type compatible).
# The value must also be not-None, to enable a subclass to declare
# that it is not compatible by setting this to None.
# - It is set by __iter__() below so that Task._step() can tell
# the difference between `yield from Future()` (correct) vs.
# `yield Future()` (incorrect).
_asyncio_future_blocking = False
_log_traceback = False
def __init__(self, *, loop=None):
"""Initialize the future.
The optional event_loop argument allows explicitly setting the event
loop object used by the future. If it's not provided, the future uses
the default event loop.
"""
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
self._callbacks = []
if self._loop.get_debug():
self._source_traceback = events.extract_stack(sys._getframe(1))
_repr_info = base_futures._future_repr_info
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, ' '.join(self._repr_info()))
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if compat.PY34:
def __del__(self):
if not self._log_traceback:
# set_exception() was not called, or result() or exception()
# has consumed the exception
return
exc = self._exception
context = {
'message': ('%s exception was never retrieved'
% self.__class__.__name__),
'exception': exc,
'future': self,
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
def cancel(self):
"""Cancel the future and schedule callbacks.
If the future is already done or cancelled, return False. Otherwise,
change the future's state to cancelled, schedule the callbacks and
return True.
"""
self._log_traceback = False
if self._state != _PENDING:
return False
self._state = _CANCELLED
self._schedule_callbacks()
return True
def _schedule_callbacks(self):
"""Internal: Ask the event loop to call all callbacks.
The callbacks are scheduled to be called as soon as possible. Also
clears the callback list.
"""
callbacks = self._callbacks[:]
if not callbacks:
return
self._callbacks[:] = []
for callback in callbacks:
self._loop.call_soon(callback, self)
def cancelled(self):
"""Return True if the future was cancelled."""
return self._state == _CANCELLED
# Don't implement running(); see http://bugs.python.org/issue18699
def done(self):
"""Return True if the future is done.
Done means either that a result / exception are available, or that the
future was cancelled.
"""
return self._state != _PENDING
def result(self):
"""Return the result this future represents.
If the future has been cancelled, raises CancelledError. If the
future's result isn't yet available, raises InvalidStateError. If
the future is done and has an exception set, this exception is raised.
"""
if self._state == _CANCELLED:
raise CancelledError
if self._state != _FINISHED:
raise InvalidStateError('Result is not ready.')
self._log_traceback = False
if self._exception is not None:
raise self._exception
return self._result
def exception(self):
"""Return the exception that was set on this future.
The exception (or None if no exception was set) is returned only if
the future is done. If the future has been cancelled, raises
CancelledError. If the future isn't done yet, raises
InvalidStateError.
"""
if self._state == _CANCELLED:
raise CancelledError
if self._state != _FINISHED:
raise InvalidStateError('Exception is not set.')
self._log_traceback = False
return self._exception
def add_done_callback(self, fn):
"""Add a callback to be run when the future becomes done.
The callback is called with a single argument - the future object. If
the future is already done when this is called, the callback is
scheduled with call_soon.
"""
if self._state != _PENDING:
self._loop.call_soon(fn, self)
else:
self._callbacks.append(fn)
# New method not in PEP 3148.
def remove_done_callback(self, fn):
"""Remove all instances of a callback from the "call when done" list.
Returns the number of callbacks removed.
"""
filtered_callbacks = [f for f in self._callbacks if f != fn]
removed_count = len(self._callbacks) - len(filtered_callbacks)
if removed_count:
self._callbacks[:] = filtered_callbacks
return removed_count
# So-called internal methods (note: no set_running_or_notify_cancel()).
def set_result(self, result):
"""Mark the future done and set its result.
If the future is already done when this method is called, raises
InvalidStateError.
"""
if self._state != _PENDING:
raise InvalidStateError('{}: {!r}'.format(self._state, self))
self._result = result
self._state = _FINISHED
self._schedule_callbacks()
def set_exception(self, exception):
"""Mark the future done and set an exception.
If the future is already done when this method is called, raises
InvalidStateError.
"""
if self._state != _PENDING:
raise InvalidStateError('{}: {!r}'.format(self._state, self))
if isinstance(exception, type):
exception = exception()
if type(exception) is StopIteration:
raise TypeError("StopIteration interacts badly with generators "
"and cannot be raised into a Future")
self._exception = exception
self._state = _FINISHED
self._schedule_callbacks()
if compat.PY34:
self._log_traceback = True
else:
self._tb_logger = _TracebackLogger(self, exception)
# Arrange for the logger to be activated after all callbacks
# have had a chance to call result() or exception().
self._loop.call_soon(self._tb_logger.activate)
def __iter__(self):
if not self.done():
self._asyncio_future_blocking = True
yield self # This tells Task to wait for completion.
assert self.done(), "yield from wasn't used with future"
return self.result() # May raise too.
if compat.PY35:
__await__ = __iter__ # make compatible with 'await' expression
# Needed for testing purposes.
_PyFuture = Future
def _set_result_unless_cancelled(fut, result):
"""Helper setting the result only if the future was not cancelled."""
if fut.cancelled():
return
fut.set_result(result)
def _set_concurrent_future_state(concurrent, source):
"""Copy state from a future to a concurrent.futures.Future."""
assert source.done()
if source.cancelled():
concurrent.cancel()
if not concurrent.set_running_or_notify_cancel():
return
exception = source.exception()
if exception is not None:
concurrent.set_exception(exception)
else:
result = source.result()
concurrent.set_result(result)
def _copy_future_state(source, dest):
"""Internal helper to copy state from another Future.
The other Future may be a concurrent.futures.Future.
"""
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(exception)
else:
result = source.result()
dest.set_result(result)
def _chain_future(source, destination):
"""Chain two futures so that when one completes, so does the other.
The result (or exception) of source will be copied to destination.
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
"""
if not isfuture(source) and not isinstance(source,
concurrent.futures.Future):
raise TypeError('A future is required for source argument')
if not isfuture(destination) and not isinstance(destination,
concurrent.futures.Future):
raise TypeError('A future is required for destination argument')
source_loop = source._loop if isfuture(source) else None
dest_loop = destination._loop if isfuture(destination) else None
def _set_state(future, other):
if isfuture(future):
_copy_future_state(other, future)
else:
_set_concurrent_future_state(future, other)
def _call_check_cancel(destination):
if destination.cancelled():
if source_loop is None or source_loop is dest_loop:
source.cancel()
else:
source_loop.call_soon_threadsafe(source.cancel)
def _call_set_state(source):
if (destination.cancelled() and
dest_loop is not None and dest_loop.is_closed()):
return
if dest_loop is None or dest_loop is source_loop:
_set_state(destination, source)
else:
dest_loop.call_soon_threadsafe(_set_state, destination, source)
destination.add_done_callback(_call_check_cancel)
source.add_done_callback(_call_set_state)
def wrap_future(future, *, loop=None):
"""Wrap concurrent.futures.Future object."""
if isfuture(future):
return future
assert isinstance(future, concurrent.futures.Future), \
'concurrent.futures.Future is expected, got {!r}'.format(future)
if loop is None:
loop = events.get_event_loop()
new_future = loop.create_future()
_chain_future(future, new_future)
return new_future
try:
import _asyncio
except ImportError:
pass
else:
# _CFuture is needed for tests.
Future = _CFuture = _asyncio.Future
| 15,901 | 443 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/constants.py | """Constants."""
# After the connection is lost, log warnings after this many write()s.
LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5
# Seconds to wait before retrying accept().
ACCEPT_RETRY_DELAY = 1
# Number of stack entries to capture in debug mode.
# The large the number, the slower the operation in debug mode
# (see extract_stack() in events.py)
DEBUG_STACK_DEPTH = 10
| 371 | 13 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/tasks.py | """Support for tasks, coroutines and the scheduler."""
__all__ = ['Task',
'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
'wait', 'wait_for', 'as_completed', 'sleep', 'async',
'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe',
]
import concurrent.futures
import functools
import inspect
import warnings
import weakref
from . import base_tasks
from . import compat
from . import coroutines
from . import events
from . import futures
from .coroutines import coroutine
class Task(futures.Future):
"""A coroutine wrapped in a Future."""
# An important invariant maintained while a Task not done:
#
# - Either _fut_waiter is None, and _step() is scheduled;
# - or _fut_waiter is some Future, and _step() is *not* scheduled.
#
# The only transition from the latter to the former is through
# _wakeup(). When _fut_waiter is not None, one of its callbacks
# must be _wakeup().
# Weak set containing all tasks alive.
_all_tasks = weakref.WeakSet()
# Dictionary containing tasks that are currently active in
# all running event loops. {EventLoop: Task}
_current_tasks = {}
# If False, don't log a message if the task is destroyed whereas its
# status is still pending
_log_destroy_pending = True
@classmethod
def current_task(cls, loop=None):
"""Return the currently running task in an event loop or None.
By default the current task for the current event loop is returned.
None is returned when called not in the context of a Task.
"""
if loop is None:
loop = events.get_event_loop()
return cls._current_tasks.get(loop)
@classmethod
def all_tasks(cls, loop=None):
"""Return a set of all tasks for an event loop.
By default all tasks for the current event loop are returned.
"""
if loop is None:
loop = events.get_event_loop()
return {t for t in cls._all_tasks if t._loop is loop}
def __init__(self, coro, *, loop=None):
assert coroutines.iscoroutine(coro), repr(coro)
super().__init__(loop=loop)
if self._source_traceback:
del self._source_traceback[-1]
self._coro = coro
self._fut_waiter = None
self._must_cancel = False
self._loop.call_soon(self._step)
self.__class__._all_tasks.add(self)
# On Python 3.3 or older, objects with a destructor that are part of a
# reference cycle are never destroyed. That's not the case any more on
# Python 3.4 thanks to the PEP 442.
if compat.PY34:
def __del__(self):
if self._state == futures._PENDING and self._log_destroy_pending:
context = {
'task': self,
'message': 'Task was destroyed but it is pending!',
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
futures.Future.__del__(self)
def _repr_info(self):
return base_tasks._task_repr_info(self)
def get_stack(self, *, limit=None):
"""Return the list of stack frames for this task's coroutine.
If the coroutine is not done, this returns the stack where it is
suspended. If the coroutine has completed successfully or was
cancelled, this returns an empty list. If the coroutine was
terminated by an exception, this returns the list of traceback
frames.
The frames are always ordered from oldest to newest.
The optional limit gives the maximum number of frames to
return; by default all available frames are returned. Its
meaning differs depending on whether a stack or a traceback is
returned: the newest frames of a stack are returned, but the
oldest frames of a traceback are returned. (This matches the
behavior of the traceback module.)
For reasons beyond our control, only one stack frame is
returned for a suspended coroutine.
"""
return base_tasks._task_get_stack(self, limit)
def print_stack(self, *, limit=None, file=None):
"""Print the stack or traceback for this task's coroutine.
This produces output similar to that of the traceback module,
for the frames retrieved by get_stack(). The limit argument
is passed to get_stack(). The file argument is an I/O stream
to which the output is written; by default output is written
to sys.stderr.
"""
return base_tasks._task_print_stack(self, limit, file)
def cancel(self):
"""Request that this task cancel itself.
This arranges for a CancelledError to be thrown into the
wrapped coroutine on the next cycle through the event loop.
The coroutine then has a chance to clean up or even deny
the request using try/except/finally.
Unlike Future.cancel, this does not guarantee that the
task will be cancelled: the exception might be caught and
acted upon, delaying cancellation of the task or preventing
cancellation completely. The task may also return a value or
raise a different exception.
Immediately after this method is called, Task.cancelled() will
not return True (unless the task was already cancelled). A
task will be marked as cancelled when the wrapped coroutine
terminates with a CancelledError exception (even if cancel()
was not called).
"""
self._log_traceback = False
if self.done():
return False
if self._fut_waiter is not None:
if self._fut_waiter.cancel():
# Leave self._fut_waiter; it may be a Task that
# catches and ignores the cancellation so we may have
# to cancel it again later.
return True
# It must be the case that self._step is already scheduled.
self._must_cancel = True
return True
def _step(self, exc=None):
assert not self.done(), \
'_step(): already done: {!r}, {!r}'.format(self, exc)
if self._must_cancel:
if not isinstance(exc, futures.CancelledError):
exc = futures.CancelledError()
self._must_cancel = False
coro = self._coro
self._fut_waiter = None
self.__class__._current_tasks[self._loop] = self
# Call either coro.throw(exc) or coro.send(None).
try:
if exc is None:
# We use the `send` method directly, because coroutines
# don't have `__iter__` and `__next__` methods.
result = coro.send(None)
else:
result = coro.throw(exc)
except StopIteration as exc:
if self._must_cancel:
# Task is cancelled right before coro stops.
self._must_cancel = False
self.set_exception(futures.CancelledError())
else:
self.set_result(exc.value)
except futures.CancelledError:
super().cancel() # I.e., Future.cancel(self).
except Exception as exc:
self.set_exception(exc)
except BaseException as exc:
self.set_exception(exc)
raise
else:
blocking = getattr(result, '_asyncio_future_blocking', None)
if blocking is not None:
# Yielded Future must come from Future.__iter__().
if result._loop is not self._loop:
self._loop.call_soon(
self._step,
RuntimeError(
'Task {!r} got Future {!r} attached to a '
'different loop'.format(self, result)))
elif blocking:
if result is self:
self._loop.call_soon(
self._step,
RuntimeError(
'Task cannot await on itself: {!r}'.format(
self)))
else:
result._asyncio_future_blocking = False
result.add_done_callback(self._wakeup)
self._fut_waiter = result
if self._must_cancel:
if self._fut_waiter.cancel():
self._must_cancel = False
else:
self._loop.call_soon(
self._step,
RuntimeError(
'yield was used instead of yield from '
'in task {!r} with {!r}'.format(self, result)))
elif result is None:
# Bare yield relinquishes control for one event loop iteration.
self._loop.call_soon(self._step)
elif inspect.isgenerator(result):
# Yielding a generator is just wrong.
self._loop.call_soon(
self._step,
RuntimeError(
'yield was used instead of yield from for '
'generator in task {!r} with {!r}'.format(
self, result)))
else:
# Yielding something else is an error.
self._loop.call_soon(
self._step,
RuntimeError(
'Task got bad yield: {!r}'.format(result)))
finally:
self.__class__._current_tasks.pop(self._loop)
self = None # Needed to break cycles when an exception occurs.
def _wakeup(self, future):
try:
future.result()
except Exception as exc:
# This may also be a cancellation.
self._step(exc)
else:
# Don't pass the value of `future.result()` explicitly,
# as `Future.__iter__` and `Future.__await__` don't need it.
# If we call `_step(value, None)` instead of `_step()`,
# Python eval loop would use `.send(value)` method call,
# instead of `__next__()`, which is slower for futures
# that return non-generator iterators from their `__iter__`.
self._step()
self = None # Needed to break cycles when an exception occurs.
_PyTask = Task
try:
import _asyncio
except ImportError:
pass
else:
# _CTask is needed for tests.
Task = _CTask = _asyncio.Task
# wait() and as_completed() similar to those in PEP 3148.
FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED
FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
@coroutine
def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the Futures and coroutines given by fs to complete.
The sequence futures must not be empty.
Coroutines will be wrapped in Tasks.
Returns two sets of Future: (done, pending).
Usage:
done, pending = yield from asyncio.wait(fs)
Note: This does not raise TimeoutError! Futures that aren't done
when the timeout occurs are returned in the second set.
"""
if futures.isfuture(fs) or coroutines.iscoroutine(fs):
raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
if not fs:
raise ValueError('Set of coroutines/Futures is empty.')
if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED):
raise ValueError('Invalid return_when value: {}'.format(return_when))
if loop is None:
loop = events.get_event_loop()
fs = {ensure_future(f, loop=loop) for f in set(fs)}
return (yield from _wait(fs, timeout, return_when, loop))
def _release_waiter(waiter, *args):
if not waiter.done():
waiter.set_result(None)
@coroutine
def wait_for(fut, timeout, *, loop=None):
"""Wait for the single Future or coroutine to complete, with timeout.
Coroutine will be wrapped in Task.
Returns result of the Future or coroutine. When a timeout occurs,
it cancels the task and raises TimeoutError. To avoid the task
cancellation, wrap it in shield().
If the wait is cancelled, the task is also cancelled.
This function is a coroutine.
"""
if loop is None:
loop = events.get_event_loop()
if timeout is None:
return (yield from fut)
waiter = loop.create_future()
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
cb = functools.partial(_release_waiter, waiter)
fut = ensure_future(fut, loop=loop)
fut.add_done_callback(cb)
try:
# wait until the future completes or the timeout
try:
yield from waiter
except futures.CancelledError:
fut.remove_done_callback(cb)
fut.cancel()
raise
if fut.done():
return fut.result()
else:
fut.remove_done_callback(cb)
fut.cancel()
raise futures.TimeoutError()
finally:
timeout_handle.cancel()
@coroutine
def _wait(fs, timeout, return_when, loop):
"""Internal helper for wait() and wait_for().
The fs argument must be a collection of Futures.
"""
assert fs, 'Set of Futures is empty.'
waiter = loop.create_future()
timeout_handle = None
if timeout is not None:
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
counter = len(fs)
def _on_completion(f):
nonlocal counter
counter -= 1
if (counter <= 0 or
return_when == FIRST_COMPLETED or
return_when == FIRST_EXCEPTION and (not f.cancelled() and
f.exception() is not None)):
if timeout_handle is not None:
timeout_handle.cancel()
if not waiter.done():
waiter.set_result(None)
for f in fs:
f.add_done_callback(_on_completion)
try:
yield from waiter
finally:
if timeout_handle is not None:
timeout_handle.cancel()
done, pending = set(), set()
for f in fs:
f.remove_done_callback(_on_completion)
if f.done():
done.add(f)
else:
pending.add(f)
return done, pending
# This is *not* a @coroutine! It is just an iterator (yielding Futures).
def as_completed(fs, *, loop=None, timeout=None):
"""Return an iterator whose values are coroutines.
When waiting for the yielded coroutines you'll get the results (or
exceptions!) of the original Futures (or coroutines), in the order
in which and as soon as they complete.
This differs from PEP 3148; the proper way to use this is:
for f in as_completed(fs):
result = yield from f # The 'yield from' may raise.
# Use result.
If a timeout is specified, the 'yield from' will raise
TimeoutError when the timeout occurs before all Futures are done.
Note: The futures 'f' are not necessarily members of fs.
"""
if futures.isfuture(fs) or coroutines.iscoroutine(fs):
raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
loop = loop if loop is not None else events.get_event_loop()
todo = {ensure_future(f, loop=loop) for f in set(fs)}
from .queues import Queue # Import here to avoid circular import problem.
done = Queue(loop=loop)
timeout_handle = None
def _on_timeout():
for f in todo:
f.remove_done_callback(_on_completion)
done.put_nowait(None) # Queue a dummy value for _wait_for_one().
todo.clear() # Can't do todo.remove(f) in the loop.
def _on_completion(f):
if not todo:
return # _on_timeout() was here first.
todo.remove(f)
done.put_nowait(f)
if not todo and timeout_handle is not None:
timeout_handle.cancel()
@coroutine
def _wait_for_one():
f = yield from done.get()
if f is None:
# Dummy value from _on_timeout().
raise futures.TimeoutError
return f.result() # May raise f.exception().
for f in todo:
f.add_done_callback(_on_completion)
if todo and timeout is not None:
timeout_handle = loop.call_later(timeout, _on_timeout)
for _ in range(len(todo)):
yield _wait_for_one()
@coroutine
def sleep(delay, result=None, *, loop=None):
"""Coroutine that completes after a given time (in seconds)."""
if delay == 0:
yield
return result
if loop is None:
loop = events.get_event_loop()
future = loop.create_future()
h = future._loop.call_later(delay,
futures._set_result_unless_cancelled,
future, result)
try:
return (yield from future)
finally:
h.cancel()
def async_(coro_or_future, *, loop=None):
"""Wrap a coroutine in a future.
If the argument is a Future, it is returned directly.
This function is deprecated in 3.5. Use asyncio.ensure_future() instead.
"""
warnings.warn("asyncio.async() function is deprecated, use ensure_future()",
DeprecationWarning,
stacklevel=2)
return ensure_future(coro_or_future, loop=loop)
# Silence DeprecationWarning:
globals()['async'] = async_
async_.__name__ = 'async'
del async_
def ensure_future(coro_or_future, *, loop=None):
"""Wrap a coroutine or an awaitable in a future.
If the argument is a Future, it is returned directly.
"""
if futures.isfuture(coro_or_future):
if loop is not None and loop is not coro_or_future._loop:
raise ValueError('loop argument must agree with Future')
return coro_or_future
elif coroutines.iscoroutine(coro_or_future):
if loop is None:
loop = events.get_event_loop()
task = loop.create_task(coro_or_future)
if task._source_traceback:
del task._source_traceback[-1]
return task
elif compat.PY35 and inspect.isawaitable(coro_or_future):
return ensure_future(_wrap_awaitable(coro_or_future), loop=loop)
else:
raise TypeError('An asyncio.Future, a coroutine or an awaitable is '
'required')
@coroutine
def _wrap_awaitable(awaitable):
"""Helper for asyncio.ensure_future().
Wraps awaitable (an object with __await__) into a coroutine
that will later be wrapped in a Task by ensure_future().
"""
return (yield from awaitable.__await__())
class _GatheringFuture(futures.Future):
"""Helper for gather().
This overrides cancel() to cancel all the children and act more
like Task.cancel(), which doesn't immediately mark itself as
cancelled.
"""
def __init__(self, children, *, loop=None):
super().__init__(loop=loop)
self._children = children
self._cancel_requested = False
def cancel(self):
if self.done():
return False
ret = False
for child in self._children:
if child.cancel():
ret = True
if ret:
# If any child tasks were actually cancelled, we should
# propagate the cancellation request regardless of
# *return_exceptions* argument. See issue 32684.
self._cancel_requested = True
return ret
def gather(*coros_or_futures, loop=None, return_exceptions=False):
"""Return a future aggregating results from the given coroutines
or futures.
Coroutines will be wrapped in a future and scheduled in the event
loop. They will not necessarily be scheduled in the same order as
passed in.
All futures must share the same event loop. If all the tasks are
done successfully, the returned future's result is the list of
results (in the order of the original sequence, not necessarily
the order of results arrival). If *return_exceptions* is True,
exceptions in the tasks are treated the same as successful
results, and gathered in the result list; otherwise, the first
raised exception will be immediately propagated to the returned
future.
Cancellation: if the outer Future is cancelled, all children (that
have not completed yet) are also cancelled. If any child is
cancelled, this is treated as if it raised CancelledError --
the outer Future is *not* cancelled in this case. (This is to
prevent the cancellation of one child to cause other children to
be cancelled.)
"""
if not coros_or_futures:
if loop is None:
loop = events.get_event_loop()
outer = loop.create_future()
outer.set_result([])
return outer
arg_to_fut = {}
for arg in set(coros_or_futures):
if not futures.isfuture(arg):
fut = ensure_future(arg, loop=loop)
if loop is None:
loop = fut._loop
# The caller cannot control this future, the "destroy pending task"
# warning should not be emitted.
fut._log_destroy_pending = False
else:
fut = arg
if loop is None:
loop = fut._loop
elif fut._loop is not loop:
raise ValueError("futures are tied to different event loops")
arg_to_fut[arg] = fut
children = [arg_to_fut[arg] for arg in coros_or_futures]
nchildren = len(children)
outer = _GatheringFuture(children, loop=loop)
nfinished = 0
results = [None] * nchildren
def _done_callback(i, fut):
nonlocal nfinished
if outer.done():
if not fut.cancelled():
# Mark exception retrieved.
fut.exception()
return
if fut.cancelled():
res = futures.CancelledError()
if not return_exceptions:
outer.set_exception(res)
return
elif fut._exception is not None:
res = fut.exception() # Mark exception retrieved.
if not return_exceptions:
outer.set_exception(res)
return
else:
res = fut._result
results[i] = res
nfinished += 1
if nfinished == nchildren:
if outer._cancel_requested:
outer.set_exception(futures.CancelledError())
else:
outer.set_result(results)
for i, fut in enumerate(children):
fut.add_done_callback(functools.partial(_done_callback, i))
return outer
def shield(arg, *, loop=None):
"""Wait for a future, shielding it from cancellation.
The statement
res = yield from shield(something())
is exactly equivalent to the statement
res = yield from something()
*except* that if the coroutine containing it is cancelled, the
task running in something() is not cancelled. From the POV of
something(), the cancellation did not happen. But its caller is
still cancelled, so the yield-from expression still raises
CancelledError. Note: If something() is cancelled by other means
this will still cancel shield().
If you want to completely ignore cancellation (not recommended)
you can combine shield() with a try/except clause, as follows:
try:
res = yield from shield(something())
except CancelledError:
res = None
"""
inner = ensure_future(arg, loop=loop)
if inner.done():
# Shortcut.
return inner
loop = inner._loop
outer = loop.create_future()
def _done_callback(inner):
if outer.cancelled():
if not inner.cancelled():
# Mark inner's result as retrieved.
inner.exception()
return
if inner.cancelled():
outer.cancel()
else:
exc = inner.exception()
if exc is not None:
outer.set_exception(exc)
else:
outer.set_result(inner.result())
inner.add_done_callback(_done_callback)
return outer
def run_coroutine_threadsafe(coro, loop):
"""Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required')
future = concurrent.futures.Future()
def callback():
try:
futures._chain_future(ensure_future(coro, loop=loop), future)
except Exception as exc:
if future.set_running_or_notify_cancel():
future.set_exception(exc)
raise
loop.call_soon_threadsafe(callback)
return future
| 25,060 | 727 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/protocols.py | """Abstract Protocol class."""
__all__ = ['BaseProtocol', 'Protocol', 'DatagramProtocol',
'SubprocessProtocol']
class BaseProtocol:
"""Common base class for protocol interfaces.
Usually user implements protocols that derived from BaseProtocol
like Protocol or ProcessProtocol.
The only case when BaseProtocol should be implemented directly is
write-only transport like write pipe
"""
def connection_made(self, transport):
"""Called when a connection is made.
The argument is the transport representing the pipe connection.
To receive data, wait for data_received() calls.
When the connection is closed, connection_lost() is called.
"""
def connection_lost(self, exc):
"""Called when the connection is lost or closed.
The argument is an exception object or None (the latter
meaning a regular EOF is received or the connection was
aborted or closed).
"""
def pause_writing(self):
"""Called when the transport's buffer goes over the high-water mark.
Pause and resume calls are paired -- pause_writing() is called
once when the buffer goes strictly over the high-water mark
(even if subsequent writes increases the buffer size even
more), and eventually resume_writing() is called once when the
buffer size reaches the low-water mark.
Note that if the buffer size equals the high-water mark,
pause_writing() is not called -- it must go strictly over.
Conversely, resume_writing() is called when the buffer size is
equal or lower than the low-water mark. These end conditions
are important to ensure that things go as expected when either
mark is zero.
NOTE: This is the only Protocol callback that is not called
through EventLoop.call_soon() -- if it were, it would have no
effect when it's most needed (when the app keeps writing
without yielding until pause_writing() is called).
"""
def resume_writing(self):
"""Called when the transport's buffer drains below the low-water mark.
See pause_writing() for details.
"""
class Protocol(BaseProtocol):
"""Interface for stream protocol.
The user should implement this interface. They can inherit from
this class but don't need to. The implementations here do
nothing (they don't raise exceptions).
When the user wants to requests a transport, they pass a protocol
factory to a utility function (e.g., EventLoop.create_connection()).
When the connection is made successfully, connection_made() is
called with a suitable transport object. Then data_received()
will be called 0 or more times with data (bytes) received from the
transport; finally, connection_lost() will be called exactly once
with either an exception object or None as an argument.
State machine of calls:
start -> CM [-> DR*] [-> ER?] -> CL -> end
* CM: connection_made()
* DR: data_received()
* ER: eof_received()
* CL: connection_lost()
"""
def data_received(self, data):
"""Called when some data is received.
The argument is a bytes object.
"""
def eof_received(self):
"""Called when the other end calls write_eof() or equivalent.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol.
"""
class DatagramProtocol(BaseProtocol):
"""Interface for datagram protocol."""
def datagram_received(self, data, addr):
"""Called when some datagram is received."""
def error_received(self, exc):
"""Called when a send or receive operation raises an OSError.
(Other than BlockingIOError or InterruptedError.)
"""
class SubprocessProtocol(BaseProtocol):
"""Interface for protocol for subprocess calls."""
def pipe_data_received(self, fd, data):
"""Called when the subprocess writes data into stdout/stderr pipe.
fd is int file descriptor.
data is bytes object.
"""
def pipe_connection_lost(self, fd, exc):
"""Called when a file descriptor associated with the child process is
closed.
fd is the int file descriptor that was closed.
"""
def process_exited(self):
"""Called when subprocess has exited."""
| 4,512 | 135 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/base_tasks.py | import linecache
import traceback
from . import base_futures
from . import coroutines
def _task_repr_info(task):
info = base_futures._future_repr_info(task)
if task._must_cancel:
# replace status
info[0] = 'cancelling'
coro = coroutines._format_coroutine(task._coro)
info.insert(1, 'coro=<%s>' % coro)
if task._fut_waiter is not None:
info.insert(2, 'wait_for=%r' % task._fut_waiter)
return info
def _task_get_stack(task, limit):
frames = []
try:
# 'async def' coroutines
f = task._coro.cr_frame
except AttributeError:
f = task._coro.gi_frame
if f is not None:
while f is not None:
if limit is not None:
if limit <= 0:
break
limit -= 1
frames.append(f)
f = f.f_back
frames.reverse()
elif task._exception is not None:
tb = task._exception.__traceback__
while tb is not None:
if limit is not None:
if limit <= 0:
break
limit -= 1
frames.append(tb.tb_frame)
tb = tb.tb_next
return frames
def _task_print_stack(task, limit, file):
extracted_list = []
checked = set()
for f in task.get_stack(limit=limit):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
if filename not in checked:
checked.add(filename)
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
extracted_list.append((filename, lineno, name, line))
exc = task._exception
if not extracted_list:
print('No stack for %r' % task, file=file)
elif exc is not None:
print('Traceback for %r (most recent call last):' % task,
file=file)
else:
print('Stack for %r (most recent call last):' % task,
file=file)
traceback.print_list(extracted_list, file=file)
if exc is not None:
for line in traceback.format_exception_only(exc.__class__, exc):
print(line, file=file, end='')
| 2,186 | 77 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/locks.py | """Synchronization primitives."""
__all__ = ['Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore']
import collections
from . import compat
from . import events
from . import futures
from .coroutines import coroutine
class _ContextManager:
"""Context manager.
This enables the following idiom for acquiring and releasing a
lock around a block:
with (yield from lock):
<block>
while failing loudly when accidentally using:
with lock:
<block>
"""
def __init__(self, lock):
self._lock = lock
def __enter__(self):
# We have no use for the "as ..." clause in the with
# statement for locks.
return None
def __exit__(self, *args):
try:
self._lock.release()
finally:
self._lock = None # Crudely prevent reuse.
class _ContextManagerMixin:
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
# This must exist because __enter__ exists, even though that
# always raises; that's how the with-statement works.
pass
@coroutine
def __iter__(self):
# This is not a coroutine. It is meant to enable the idiom:
#
# with (yield from lock):
# <block>
#
# as an alternative to:
#
# yield from lock.acquire()
# try:
# <block>
# finally:
# lock.release()
yield from self.acquire()
return _ContextManager(self)
if compat.PY35:
def __await__(self):
# To make "with await lock" work.
yield from self.acquire()
return _ContextManager(self)
@coroutine
def __aenter__(self):
yield from self.acquire()
# We have no use for the "as ..." clause in the with
# statement for locks.
return None
@coroutine
def __aexit__(self, exc_type, exc, tb):
self.release()
class Lock(_ContextManagerMixin):
"""Primitive lock objects.
A primitive lock is a synchronization primitive that is not owned
by a particular coroutine when locked. A primitive lock is in one
of two states, 'locked' or 'unlocked'.
It is created in the unlocked state. It has two basic methods,
acquire() and release(). When the state is unlocked, acquire()
changes the state to locked and returns immediately. When the
state is locked, acquire() blocks until a call to release() in
another coroutine changes it to unlocked, then the acquire() call
resets it to locked and returns. The release() method should only
be called in the locked state; it changes the state to unlocked
and returns immediately. If an attempt is made to release an
unlocked lock, a RuntimeError will be raised.
When more than one coroutine is blocked in acquire() waiting for
the state to turn to unlocked, only one coroutine proceeds when a
release() call resets the state to unlocked; first coroutine which
is blocked in acquire() is being processed.
acquire() is a coroutine and should be called with 'yield from'.
Locks also support the context management protocol. '(yield from lock)'
should be used as the context manager expression.
Usage:
lock = Lock()
...
yield from lock
try:
...
finally:
lock.release()
Context manager usage:
lock = Lock()
...
with (yield from lock):
...
Lock objects can be tested for locking state:
if not lock.locked():
yield from lock
else:
# lock is acquired
...
"""
def __init__(self, *, loop=None):
self._waiters = collections.deque()
self._locked = False
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self._locked else 'unlocked'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def locked(self):
"""Return True if lock is acquired."""
return self._locked
@coroutine
def acquire(self):
"""Acquire a lock.
This method blocks until the lock is unlocked, then sets it to
locked and returns True.
"""
if not self._locked and all(w.cancelled() for w in self._waiters):
self._locked = True
return True
fut = self._loop.create_future()
self._waiters.append(fut)
# Finally block should be called before the CancelledError
# handling as we don't want CancelledError to call
# _wake_up_first() and attempt to wake up itself.
try:
try:
yield from fut
finally:
self._waiters.remove(fut)
except futures.CancelledError:
if not self._locked:
self._wake_up_first()
raise
self._locked = True
return True
def release(self):
"""Release a lock.
When the lock is locked, reset it to unlocked, and return.
If any other coroutines are blocked waiting for the lock to become
unlocked, allow exactly one of them to proceed.
When invoked on an unlocked lock, a RuntimeError is raised.
There is no return value.
"""
if self._locked:
self._locked = False
self._wake_up_first()
else:
raise RuntimeError('Lock is not acquired.')
def _wake_up_first(self):
"""Wake up the first waiter if it isn't done."""
try:
fut = next(iter(self._waiters))
except StopIteration:
return
# .done() necessarily means that a waiter will wake up later on and
# either take the lock, or, if it was cancelled and lock wasn't
# taken already, will hit this again and wake up a new waiter.
if not fut.done():
fut.set_result(True)
class Event:
"""Asynchronous equivalent to threading.Event.
Class implementing event objects. An event manages a flag that can be set
to true with the set() method and reset to false with the clear() method.
The wait() method blocks until the flag is true. The flag is initially
false.
"""
def __init__(self, *, loop=None):
self._waiters = collections.deque()
self._value = False
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'set' if self._value else 'unset'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def is_set(self):
"""Return True if and only if the internal flag is true."""
return self._value
def set(self):
"""Set the internal flag to true. All coroutines waiting for it to
become true are awakened. Coroutine that call wait() once the flag is
true will not block at all.
"""
if not self._value:
self._value = True
for fut in self._waiters:
if not fut.done():
fut.set_result(True)
def clear(self):
"""Reset the internal flag to false. Subsequently, coroutines calling
wait() will block until set() is called to set the internal flag
to true again."""
self._value = False
@coroutine
def wait(self):
"""Block until the internal flag is true.
If the internal flag is true on entry, return True
immediately. Otherwise, block until another coroutine calls
set() to set the flag to true, then return True.
"""
if self._value:
return True
fut = self._loop.create_future()
self._waiters.append(fut)
try:
yield from fut
return True
finally:
self._waiters.remove(fut)
class Condition(_ContextManagerMixin):
"""Asynchronous equivalent to threading.Condition.
This class implements condition variable objects. A condition variable
allows one or more coroutines to wait until they are notified by another
coroutine.
A new Lock object is created and used as the underlying lock.
"""
def __init__(self, lock=None, *, loop=None):
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
if lock is None:
lock = Lock(loop=self._loop)
elif lock._loop is not self._loop:
raise ValueError("loop argument must agree with lock")
self._lock = lock
# Export the lock's locked(), acquire() and release() methods.
self.locked = lock.locked
self.acquire = lock.acquire
self.release = lock.release
self._waiters = collections.deque()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self.locked() else 'unlocked'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
@coroutine
def wait(self):
"""Wait until notified.
If the calling coroutine has not acquired the lock when this
method is called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks
until it is awakened by a notify() or notify_all() call for
the same condition variable in another coroutine. Once
awakened, it re-acquires the lock and returns True.
"""
if not self.locked():
raise RuntimeError('cannot wait on un-acquired lock')
self.release()
try:
fut = self._loop.create_future()
self._waiters.append(fut)
try:
yield from fut
return True
finally:
self._waiters.remove(fut)
finally:
# Must reacquire lock even if wait is cancelled
cancelled = False
while True:
try:
yield from self.acquire()
break
except futures.CancelledError:
cancelled = True
if cancelled:
raise futures.CancelledError
@coroutine
def wait_for(self, predicate):
"""Wait until a predicate becomes true.
The predicate should be a callable which result will be
interpreted as a boolean value. The final predicate value is
the return value.
"""
result = predicate()
while not result:
yield from self.wait()
result = predicate()
return result
def notify(self, n=1):
"""By default, wake up one coroutine waiting on this condition, if any.
If the calling coroutine has not acquired the lock when this method
is called, a RuntimeError is raised.
This method wakes up at most n of the coroutines waiting for the
condition variable; it is a no-op if no coroutines are waiting.
Note: an awakened coroutine does not actually return from its
wait() call until it can reacquire the lock. Since notify() does
not release the lock, its caller should.
"""
if not self.locked():
raise RuntimeError('cannot notify on un-acquired lock')
idx = 0
for fut in self._waiters:
if idx >= n:
break
if not fut.done():
idx += 1
fut.set_result(False)
def notify_all(self):
"""Wake up all threads waiting on this condition. This method acts
like notify(), but wakes up all waiting threads instead of one. If the
calling thread has not acquired the lock when this method is called,
a RuntimeError is raised.
"""
self.notify(len(self._waiters))
class Semaphore(_ContextManagerMixin):
"""A Semaphore implementation.
A semaphore manages an internal counter which is decremented by each
acquire() call and incremented by each release() call. The counter
can never go below zero; when acquire() finds that it is zero, it blocks,
waiting until some other thread calls release().
Semaphores also support the context management protocol.
The optional argument gives the initial value for the internal
counter; it defaults to 1. If the value given is less than 0,
ValueError is raised.
"""
def __init__(self, value=1, *, loop=None):
if value < 0:
raise ValueError("Semaphore initial value must be >= 0")
self._value = value
self._waiters = collections.deque()
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self.locked() else 'unlocked,value:{}'.format(
self._value)
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def _wake_up_next(self):
while self._waiters:
waiter = self._waiters.popleft()
if not waiter.done():
waiter.set_result(None)
return
def locked(self):
"""Returns True if semaphore can not be acquired immediately."""
return self._value == 0
@coroutine
def acquire(self):
"""Acquire a semaphore.
If the internal counter is larger than zero on entry,
decrement it by one and return True immediately. If it is
zero on entry, block, waiting until some other coroutine has
called release() to make it larger than 0, and then return
True.
"""
while self._value <= 0:
fut = self._loop.create_future()
self._waiters.append(fut)
try:
yield from fut
except:
# See the similar code in Queue.get.
fut.cancel()
if self._value > 0 and not fut.cancelled():
self._wake_up_next()
raise
self._value -= 1
return True
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When it was zero on entry and another coroutine is waiting for it to
become larger than zero again, wake up that coroutine.
"""
self._value += 1
self._wake_up_next()
class BoundedSemaphore(Semaphore):
"""A bounded semaphore implementation.
This raises ValueError in release() if it would increase the value
above the initial value.
"""
def __init__(self, value=1, *, loop=None):
self._bound_value = value
super().__init__(value, loop=loop)
def release(self):
if self._value >= self._bound_value:
raise ValueError('BoundedSemaphore released too many times')
super().release()
| 15,582 | 502 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/queues.py | """Queues"""
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty']
import collections
import heapq
from . import compat
from . import events
from . import locks
from .coroutines import coroutine
class QueueEmpty(Exception):
"""Exception raised when Queue.get_nowait() is called on a Queue object
which is empty.
"""
pass
class QueueFull(Exception):
"""Exception raised when the Queue.put_nowait() method is called on a Queue
object which is full.
"""
pass
class Queue:
"""A queue, useful for coordinating producer and consumer coroutines.
If maxsize is less than or equal to zero, the queue size is infinite. If it
is an integer greater than 0, then "yield from put()" will block when the
queue reaches maxsize, until an item is removed by get().
Unlike the standard library Queue, you can reliably know this Queue's size
with qsize(), since your single-threaded asyncio application won't be
interrupted between calling qsize() and doing an operation on the Queue.
"""
def __init__(self, maxsize=0, *, loop=None):
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
self._maxsize = maxsize
# Futures.
self._getters = collections.deque()
# Futures.
self._putters = collections.deque()
self._unfinished_tasks = 0
self._finished = locks.Event(loop=self._loop)
self._finished.set()
self._init(maxsize)
# These three are overridable in subclasses.
def _init(self, maxsize):
self._queue = collections.deque()
def _get(self):
return self._queue.popleft()
def _put(self, item):
self._queue.append(item)
# End of the overridable methods.
def _wakeup_next(self, waiters):
# Wake up the next waiter (if any) that isn't cancelled.
while waiters:
waiter = waiters.popleft()
if not waiter.done():
waiter.set_result(None)
break
def __repr__(self):
return '<{} at {:#x} {}>'.format(
type(self).__name__, id(self), self._format())
def __str__(self):
return '<{} {}>'.format(type(self).__name__, self._format())
def _format(self):
result = 'maxsize={!r}'.format(self._maxsize)
if getattr(self, '_queue', None):
result += ' _queue={!r}'.format(list(self._queue))
if self._getters:
result += ' _getters[{}]'.format(len(self._getters))
if self._putters:
result += ' _putters[{}]'.format(len(self._putters))
if self._unfinished_tasks:
result += ' tasks={}'.format(self._unfinished_tasks)
return result
def qsize(self):
"""Number of items in the queue."""
return len(self._queue)
@property
def maxsize(self):
"""Number of items allowed in the queue."""
return self._maxsize
def empty(self):
"""Return True if the queue is empty, False otherwise."""
return not self._queue
def full(self):
"""Return True if there are maxsize items in the queue.
Note: if the Queue was initialized with maxsize=0 (the default),
then full() is never True.
"""
if self._maxsize <= 0:
return False
else:
return self.qsize() >= self._maxsize
@coroutine
def put(self, item):
"""Put an item into the queue.
Put an item into the queue. If the queue is full, wait until a free
slot is available before adding item.
This method is a coroutine.
"""
while self.full():
putter = self._loop.create_future()
self._putters.append(putter)
try:
yield from putter
except:
putter.cancel() # Just in case putter is not done yet.
if not self.full() and not putter.cancelled():
# We were woken up by get_nowait(), but can't take
# the call. Wake up the next in line.
self._wakeup_next(self._putters)
raise
return self.put_nowait(item)
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise QueueFull.
"""
if self.full():
raise QueueFull
self._put(item)
self._unfinished_tasks += 1
self._finished.clear()
self._wakeup_next(self._getters)
@coroutine
def get(self):
"""Remove and return an item from the queue.
If queue is empty, wait until an item is available.
This method is a coroutine.
"""
while self.empty():
getter = self._loop.create_future()
self._getters.append(getter)
try:
yield from getter
except:
getter.cancel() # Just in case getter is not done yet.
try:
self._getters.remove(getter)
except ValueError:
pass
if not self.empty() and not getter.cancelled():
# We were woken up by put_nowait(), but can't take
# the call. Wake up the next in line.
self._wakeup_next(self._getters)
raise
return self.get_nowait()
def get_nowait(self):
"""Remove and return an item from the queue.
Return an item if one is immediately available, else raise QueueEmpty.
"""
if self.empty():
raise QueueEmpty
item = self._get()
self._wakeup_next(self._putters)
return item
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises ValueError if called more times than there were items placed in
the queue.
"""
if self._unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set()
@coroutine
def join(self):
"""Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer calls task_done() to
indicate that the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
if self._unfinished_tasks > 0:
yield from self._finished.wait()
class PriorityQueue(Queue):
"""A subclass of Queue; retrieves entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
"""
def _init(self, maxsize):
self._queue = []
def _put(self, item, heappush=heapq.heappush):
heappush(self._queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self._queue)
class LifoQueue(Queue):
"""A subclass of Queue that retrieves most recently added entries first."""
def _init(self, maxsize):
self._queue = []
def _put(self, item):
self._queue.append(item)
def _get(self):
return self._queue.pop()
if not compat.PY35:
JoinableQueue = Queue
"""Deprecated alias for Queue."""
__all__.append('JoinableQueue')
| 7,957 | 260 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/selector_events.py | """Event loop using a selector and related classes.
A selector is a "notify-when-ready" multiplexer. For a subclass which
also includes support for signal handling, see the unix_events sub-module.
"""
__all__ = ['BaseSelectorEventLoop']
import collections
import errno
import functools
import socket
import warnings
import weakref
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from . import compat
from . import constants
from . import events
from . import futures
import selectors
from . import transports
from . import sslproto
from .coroutines import coroutine
from .log import logger
def _test_selector_event(selector, fd, event):
# Test if the selector is monitoring 'event' events
# for the file descriptor 'fd'.
try:
key = selector.get_key(fd)
except KeyError:
return False
else:
return bool(key.events & event)
class BaseSelectorEventLoop(base_events.BaseEventLoop):
"""Selector event loop.
See events.EventLoop for API specification.
"""
def __init__(self, selector=None):
super().__init__()
if selector is None:
selector = selectors.DefaultSelector()
logger.debug('Using selector: %s', selector.__class__.__name__)
self._selector = selector
self._make_self_pipe()
self._transports = weakref.WeakValueDictionary()
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
return _SelectorSocketTransport(self, sock, protocol, waiter,
extra, server)
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None):
if not sslproto._is_sslproto_available():
return self._make_legacy_ssl_transport(
rawsock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
extra=extra, server=server)
ssl_protocol = sslproto.SSLProtocol(self, protocol, sslcontext, waiter,
server_side, server_hostname)
_SelectorSocketTransport(self, rawsock, ssl_protocol,
extra=extra, server=server)
return ssl_protocol._app_transport
def _make_legacy_ssl_transport(self, rawsock, protocol, sslcontext,
waiter, *,
server_side=False, server_hostname=None,
extra=None, server=None):
# Use the legacy API: SSL_write, SSL_read, etc. The legacy API is used
# on Python 3.4 and older, when ssl.MemoryBIO is not available.
return _SelectorSslTransport(
self, rawsock, protocol, sslcontext, waiter,
server_side, server_hostname, extra, server)
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
return _SelectorDatagramTransport(self, sock, protocol,
address, waiter, extra)
def close(self):
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self.is_closed():
return
self._close_self_pipe()
super().close()
if self._selector is not None:
self._selector.close()
self._selector = None
def _socketpair(self):
raise NotImplementedError
def _close_self_pipe(self):
self._remove_reader(self._ssock.fileno())
self._ssock.close()
self._ssock = None
self._csock.close()
self._csock = None
self._internal_fds -= 1
def _make_self_pipe(self):
# A self-socket, really. :-)
self._ssock, self._csock = self._socketpair()
self._ssock.setblocking(False)
self._csock.setblocking(False)
self._internal_fds += 1
self._add_reader(self._ssock.fileno(), self._read_from_self)
def _process_self_data(self, data):
pass
def _read_from_self(self):
while True:
try:
data = self._ssock.recv(4096)
if not data:
break
self._process_self_data(data)
except InterruptedError:
continue
except BlockingIOError:
break
def _write_to_self(self):
# This may be called from a different thread, possibly after
# _close_self_pipe() has been called or even while it is
# running. Guard for self._csock being None or closed. When
# a socket is closed, send() raises OSError (with errno set to
# EBADF, but let's not rely on the exact error code).
csock = self._csock
if csock is not None:
try:
csock.send(b'\0')
except OSError:
if self._debug:
logger.debug("Fail to write a null byte into the "
"self-pipe socket",
exc_info=True)
def _start_serving(self, protocol_factory, sock,
sslcontext=None, server=None, backlog=100):
self._add_reader(sock.fileno(), self._accept_connection,
protocol_factory, sock, sslcontext, server, backlog)
def _accept_connection(self, protocol_factory, sock,
sslcontext=None, server=None, backlog=100):
# This method is only called once for each event loop tick where the
# listening socket has triggered an EVENT_READ. There may be multiple
# connections waiting for an .accept() so it is called in a loop.
# See https://bugs.python.org/issue27906 for more details.
for _ in range(backlog):
try:
conn, addr = sock.accept()
if self._debug:
logger.debug("%r got a new connection from %r: %r",
server, addr, conn)
conn.setblocking(False)
except (BlockingIOError, InterruptedError, ConnectionAbortedError):
# Early exit because the socket accept buffer is empty.
return None
except OSError as exc:
# There's nowhere to send the error, so just log it.
if exc.errno in (errno.EMFILE, errno.ENFILE,
errno.ENOBUFS, errno.ENOMEM):
# Some platforms (e.g. Linux keep reporting the FD as
# ready, so we remove the read handler temporarily.
# We'll try again in a while.
self.call_exception_handler({
'message': 'socket.accept() out of system resource',
'exception': exc,
'socket': sock,
})
self._remove_reader(sock.fileno())
self.call_later(constants.ACCEPT_RETRY_DELAY,
self._start_serving,
protocol_factory, sock, sslcontext, server,
backlog)
else:
raise # The event loop will catch, log and ignore it.
else:
extra = {'peername': addr}
accept = self._accept_connection2(protocol_factory, conn, extra,
sslcontext, server)
self.create_task(accept)
@coroutine
def _accept_connection2(self, protocol_factory, conn, extra,
sslcontext=None, server=None):
protocol = None
transport = None
try:
protocol = protocol_factory()
waiter = self.create_future()
if sslcontext:
transport = self._make_ssl_transport(
conn, protocol, sslcontext, waiter=waiter,
server_side=True, extra=extra, server=server)
else:
transport = self._make_socket_transport(
conn, protocol, waiter=waiter, extra=extra,
server=server)
try:
yield from waiter
except:
transport.close()
raise
# It's now up to the protocol to handle the connection.
except Exception as exc:
if self._debug:
context = {
'message': ('Error on transport creation '
'for incoming connection'),
'exception': exc,
}
if protocol is not None:
context['protocol'] = protocol
if transport is not None:
context['transport'] = transport
self.call_exception_handler(context)
def _ensure_fd_no_transport(self, fd):
try:
transport = self._transports[fd]
except KeyError:
pass
else:
if not transport.is_closing():
raise RuntimeError(
'File descriptor {!r} is used by transport {!r}'.format(
fd, transport))
def _add_reader(self, fd, callback, *args):
self._check_closed()
handle = events.Handle(callback, args, self)
try:
key = self._selector.get_key(fd)
except KeyError:
self._selector.register(fd, selectors.EVENT_READ,
(handle, None))
else:
mask, (reader, writer) = key.events, key.data
self._selector.modify(fd, mask | selectors.EVENT_READ,
(handle, writer))
if reader is not None:
reader.cancel()
def _remove_reader(self, fd):
if self.is_closed():
return False
try:
key = self._selector.get_key(fd)
except KeyError:
return False
else:
mask, (reader, writer) = key.events, key.data
mask &= ~selectors.EVENT_READ
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (None, writer))
if reader is not None:
reader.cancel()
return True
else:
return False
def _add_writer(self, fd, callback, *args):
self._check_closed()
handle = events.Handle(callback, args, self)
try:
key = self._selector.get_key(fd)
except KeyError:
self._selector.register(fd, selectors.EVENT_WRITE,
(None, handle))
else:
mask, (reader, writer) = key.events, key.data
self._selector.modify(fd, mask | selectors.EVENT_WRITE,
(reader, handle))
if writer is not None:
writer.cancel()
def _remove_writer(self, fd):
"""Remove a writer callback."""
if self.is_closed():
return False
try:
key = self._selector.get_key(fd)
except KeyError:
return False
else:
mask, (reader, writer) = key.events, key.data
# Remove both writer and connector.
mask &= ~selectors.EVENT_WRITE
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (reader, None))
if writer is not None:
writer.cancel()
return True
else:
return False
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._ensure_fd_no_transport(fd)
return self._add_reader(fd, callback, *args)
def remove_reader(self, fd):
"""Remove a reader callback."""
self._ensure_fd_no_transport(fd)
return self._remove_reader(fd)
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._ensure_fd_no_transport(fd)
return self._add_writer(fd, callback, *args)
def remove_writer(self, fd):
"""Remove a writer callback."""
self._ensure_fd_no_transport(fd)
return self._remove_writer(fd)
def sock_recv(self, sock, n):
"""Receive data from the socket.
The return value is a bytes object representing the data received.
The maximum amount of data to be received at once is specified by
nbytes.
This method is a coroutine.
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = self.create_future()
self._sock_recv(fut, None, sock, n)
return fut
def _sock_recv(self, fut, registered_fd, sock, n):
# _sock_recv() can add itself as an I/O callback if the operation can't
# be done immediately. Don't use it directly, call sock_recv().
if registered_fd is not None:
# Remove the callback early. It should be rare that the
# selector says the fd is ready but the call still returns
# EAGAIN, and I am willing to take a hit in that case in
# order to simplify the common case.
self.remove_reader(registered_fd)
if fut.cancelled():
return
try:
data = sock.recv(n)
except (BlockingIOError, InterruptedError):
fd = sock.fileno()
self.add_reader(fd, self._sock_recv, fut, fd, sock, n)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(data)
def sock_sendall(self, sock, data):
"""Send data to the socket.
The socket must be connected to a remote socket. This method continues
to send data from data until either all data has been sent or an
error occurs. None is returned on success. On error, an exception is
raised, and there is no way to determine how much data, if any, was
successfully processed by the receiving end of the connection.
This method is a coroutine.
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = self.create_future()
if data:
self._sock_sendall(fut, None, sock, data)
else:
fut.set_result(None)
return fut
def _sock_sendall(self, fut, registered_fd, sock, data):
if registered_fd is not None:
self.remove_writer(registered_fd)
if fut.cancelled():
return
try:
n = sock.send(data)
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
fut.set_exception(exc)
return
if n == len(data):
fut.set_result(None)
else:
if n:
data = data[n:]
fd = sock.fileno()
self.add_writer(fd, self._sock_sendall, fut, fd, sock, data)
@coroutine
def sock_connect(self, sock, address):
"""Connect to a remote socket at address.
This method is a coroutine.
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
if not hasattr(socket, 'AF_UNIX') or sock.family != socket.AF_UNIX:
resolved = base_events._ensure_resolved(
address, family=sock.family, proto=sock.proto, loop=self)
if not resolved.done():
yield from resolved
_, _, _, _, address = resolved.result()[0]
fut = self.create_future()
self._sock_connect(fut, sock, address)
return (yield from fut)
def _sock_connect(self, fut, sock, address):
fd = sock.fileno()
try:
sock.connect(address)
except (BlockingIOError, InterruptedError):
# Issue #23618: When the C function connect() fails with EINTR, the
# connection runs in background. We have to wait until the socket
# becomes writable to be notified when the connection succeed or
# fails.
fut.add_done_callback(
functools.partial(self._sock_connect_done, fd))
self.add_writer(fd, self._sock_connect_cb, fut, sock, address)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(None)
def _sock_connect_done(self, fd, fut):
self.remove_writer(fd)
def _sock_connect_cb(self, fut, sock, address):
if fut.cancelled():
return
try:
err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# Jump to any except clause below.
raise OSError(err, 'Connect call failed %s' % (address,))
except (BlockingIOError, InterruptedError):
# socket is still registered, the callback will be retried later
pass
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(None)
def sock_accept(self, sock):
"""Accept a connection.
The socket must be bound to an address and listening for connections.
The return value is a pair (conn, address) where conn is a new socket
object usable to send and receive data on the connection, and address
is the address bound to the socket on the other end of the connection.
This method is a coroutine.
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = self.create_future()
self._sock_accept(fut, False, sock)
return fut
def _sock_accept(self, fut, registered, sock):
fd = sock.fileno()
if registered:
self.remove_reader(fd)
if fut.cancelled():
return
try:
conn, address = sock.accept()
conn.setblocking(False)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_accept, fut, True, sock)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result((conn, address))
def _process_events(self, event_list):
for key, mask in event_list:
fileobj, (reader, writer) = key.fileobj, key.data
if mask & selectors.EVENT_READ and reader is not None:
if reader._cancelled:
self._remove_reader(fileobj)
else:
self._add_callback(reader)
if mask & selectors.EVENT_WRITE and writer is not None:
if writer._cancelled:
self._remove_writer(fileobj)
else:
self._add_callback(writer)
def _stop_serving(self, sock):
self._remove_reader(sock.fileno())
sock.close()
class _SelectorTransport(transports._FlowControlMixin,
transports.Transport):
max_size = 256 * 1024 # Buffer size passed to recv().
_buffer_factory = bytearray # Constructs initial value for self._buffer.
# Attribute used in the destructor: it must be set even if the constructor
# is not called (see _SelectorSslTransport which may start by raising an
# exception)
_sock = None
def __init__(self, loop, sock, protocol, extra=None, server=None):
super().__init__(extra, loop)
self._extra['socket'] = sock
self._extra['sockname'] = sock.getsockname()
if 'peername' not in self._extra:
try:
self._extra['peername'] = sock.getpeername()
except socket.error:
self._extra['peername'] = None
self._sock = sock
self._sock_fd = sock.fileno()
self._protocol = protocol
self._protocol_connected = True
self._server = server
self._buffer = self._buffer_factory()
self._conn_lost = 0 # Set when call to connection_lost scheduled.
self._closing = False # Set when close() called.
if self._server is not None:
self._server._attach()
loop._transports[self._sock_fd] = self
def __repr__(self):
info = [self.__class__.__name__]
if self._sock is None:
info.append('closed')
elif self._closing:
info.append('closing')
info.append('fd=%s' % self._sock_fd)
# test if the transport was closed
if self._loop is not None and not self._loop.is_closed():
polling = _test_selector_event(self._loop._selector,
self._sock_fd, selectors.EVENT_READ)
if polling:
info.append('read=polling')
else:
info.append('read=idle')
polling = _test_selector_event(self._loop._selector,
self._sock_fd,
selectors.EVENT_WRITE)
if polling:
state = 'polling'
else:
state = 'idle'
bufsize = self.get_write_buffer_size()
info.append('write=<%s, bufsize=%s>' % (state, bufsize))
return '<%s>' % ' '.join(info)
def abort(self):
self._force_close(None)
def set_protocol(self, protocol):
self._protocol = protocol
def get_protocol(self):
return self._protocol
def is_closing(self):
return self._closing
def close(self):
if self._closing:
return
self._closing = True
self._loop._remove_reader(self._sock_fd)
if not self._buffer:
self._conn_lost += 1
self._loop._remove_writer(self._sock_fd)
self._loop.call_soon(self._call_connection_lost, None)
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if compat.PY34:
def __del__(self):
if self._sock is not None:
warnings.warn("unclosed transport %r" % self, ResourceWarning,
source=self)
self._sock.close()
def _fatal_error(self, exc, message='Fatal error on transport'):
# Should be called from exception handler only.
if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
self._force_close(exc)
def _force_close(self, exc):
if self._conn_lost:
return
if self._buffer:
self._buffer.clear()
self._loop._remove_writer(self._sock_fd)
if not self._closing:
self._closing = True
self._loop._remove_reader(self._sock_fd)
self._conn_lost += 1
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
if self._protocol_connected:
self._protocol.connection_lost(exc)
finally:
self._sock.close()
self._sock = None
self._protocol = None
self._loop = None
server = self._server
if server is not None:
server._detach()
self._server = None
def get_write_buffer_size(self):
return len(self._buffer)
def _add_reader(self, fd, callback, *args):
if self._closing:
return
self._loop._add_reader(fd, callback, *args)
class _SelectorSocketTransport(_SelectorTransport):
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
super().__init__(loop, sock, protocol, extra, server)
self._eof = False
self._paused = False
# Disable the Nagle algorithm -- small writes will be
# sent without waiting for the TCP ACK. This generally
# decreases the latency (in some cases significantly.)
base_events._set_nodelay(self._sock)
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
self._loop.call_soon(self._add_reader,
self._sock_fd, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(futures._set_result_unless_cancelled,
waiter, None)
def pause_reading(self):
if self._closing or self._paused:
return
self._paused = True
self._loop._remove_reader(self._sock_fd)
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
if self._closing or not self._paused:
return
self._paused = False
self._add_reader(self._sock_fd, self._read_ready)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _read_ready(self):
if self._conn_lost:
return
try:
data = self._sock.recv(self.max_size)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on socket transport')
else:
if data:
self._protocol.data_received(data)
else:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
keep_open = self._protocol.eof_received()
if keep_open:
# We're keeping the connection open so the
# protocol can write more, but we still can't
# receive more, so remove the reader callback.
self._loop._remove_reader(self._sock_fd)
else:
self.close()
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be a bytes-like object, '
'not %r' % type(data).__name__)
if self._eof:
raise RuntimeError('Cannot call write() after write_eof()')
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Optimization: try to send now.
try:
n = self._sock.send(data)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._fatal_error(exc, 'Fatal write error on socket transport')
return
else:
data = data[n:]
if not data:
return
# Not all was written; register write handler.
self._loop._add_writer(self._sock_fd, self._write_ready)
# Add it to the buffer.
self._buffer.extend(data)
self._maybe_pause_protocol()
def _write_ready(self):
assert self._buffer, 'Data should not be empty'
if self._conn_lost:
return
try:
n = self._sock.send(self._buffer)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._loop._remove_writer(self._sock_fd)
self._buffer.clear()
self._fatal_error(exc, 'Fatal write error on socket transport')
else:
if n:
del self._buffer[:n]
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop._remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
elif self._eof:
self._sock.shutdown(socket.SHUT_WR)
def write_eof(self):
if self._closing or self._eof:
return
self._eof = True
if not self._buffer:
self._sock.shutdown(socket.SHUT_WR)
def can_write_eof(self):
return True
class _SelectorSslTransport(_SelectorTransport):
_buffer_factory = bytearray
def __init__(self, loop, rawsock, protocol, sslcontext, waiter=None,
server_side=False, server_hostname=None,
extra=None, server=None):
if ssl is None:
raise RuntimeError('stdlib ssl module not available')
if not sslcontext:
sslcontext = sslproto._create_transport_context(server_side, server_hostname)
wrap_kwargs = {
'server_side': server_side,
'do_handshake_on_connect': False,
}
if server_hostname and not server_side:
wrap_kwargs['server_hostname'] = server_hostname
sslsock = sslcontext.wrap_socket(rawsock, **wrap_kwargs)
super().__init__(loop, sslsock, protocol, extra, server)
# the protocol connection is only made after the SSL handshake
self._protocol_connected = False
self._server_hostname = server_hostname
self._waiter = waiter
self._sslcontext = sslcontext
self._paused = False
# SSL-specific extra info. (peercert is set later)
self._extra.update(sslcontext=sslcontext)
if self._loop.get_debug():
logger.debug("%r starts SSL handshake", self)
start_time = self._loop.time()
else:
start_time = None
self._on_handshake(start_time)
def _wakeup_waiter(self, exc=None):
if self._waiter is None:
return
if not self._waiter.cancelled():
if exc is not None:
self._waiter.set_exception(exc)
else:
self._waiter.set_result(None)
self._waiter = None
def _on_handshake(self, start_time):
try:
self._sock.do_handshake()
except ssl.SSLWantReadError:
self._loop._add_reader(self._sock_fd,
self._on_handshake, start_time)
return
except ssl.SSLWantWriteError:
self._loop._add_writer(self._sock_fd,
self._on_handshake, start_time)
return
except BaseException as exc:
if self._loop.get_debug():
logger.warning("%r: SSL handshake failed",
self, exc_info=True)
self._loop._remove_reader(self._sock_fd)
self._loop._remove_writer(self._sock_fd)
self._sock.close()
self._wakeup_waiter(exc)
if isinstance(exc, Exception):
return
else:
raise
self._loop._remove_reader(self._sock_fd)
self._loop._remove_writer(self._sock_fd)
peercert = self._sock.getpeercert()
if not hasattr(self._sslcontext, 'check_hostname'):
# Verify hostname if requested, Python 3.4+ uses check_hostname
# and checks the hostname in do_handshake()
if (self._server_hostname and
self._sslcontext.verify_mode != ssl.CERT_NONE):
try:
ssl.match_hostname(peercert, self._server_hostname)
except Exception as exc:
if self._loop.get_debug():
logger.warning("%r: SSL handshake failed "
"on matching the hostname",
self, exc_info=True)
self._sock.close()
self._wakeup_waiter(exc)
return
# Add extra info that becomes available after handshake.
self._extra.update(peercert=peercert,
cipher=self._sock.cipher(),
compression=self._sock.compression(),
ssl_object=self._sock,
)
self._read_wants_write = False
self._write_wants_read = False
self._loop._add_reader(self._sock_fd, self._read_ready)
self._protocol_connected = True
self._loop.call_soon(self._protocol.connection_made, self)
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(self._wakeup_waiter)
if self._loop.get_debug():
dt = self._loop.time() - start_time
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
def pause_reading(self):
# XXX This is a bit icky, given the comment at the top of
# _read_ready(). Is it possible to evoke a deadlock? I don't
# know, although it doesn't look like it; write() will still
# accept more data for the buffer and eventually the app will
# call resume_reading() again, and things will flow again.
if self._closing:
raise RuntimeError('Cannot pause_reading() when closing')
if self._paused:
raise RuntimeError('Already paused')
self._paused = True
self._loop._remove_reader(self._sock_fd)
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
if not self._paused:
raise RuntimeError('Not paused')
self._paused = False
if self._closing:
return
self._loop._add_reader(self._sock_fd, self._read_ready)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _read_ready(self):
if self._conn_lost:
return
if self._write_wants_read:
self._write_wants_read = False
self._write_ready()
if self._buffer:
self._loop._add_writer(self._sock_fd, self._write_ready)
try:
data = self._sock.recv(self.max_size)
except (BlockingIOError, InterruptedError, ssl.SSLWantReadError):
pass
except ssl.SSLWantWriteError:
self._read_wants_write = True
self._loop._remove_reader(self._sock_fd)
self._loop._add_writer(self._sock_fd, self._write_ready)
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on SSL transport')
else:
if data:
self._protocol.data_received(data)
else:
try:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
keep_open = self._protocol.eof_received()
if keep_open:
logger.warning('returning true from eof_received() '
'has no effect when using ssl')
finally:
self.close()
def _write_ready(self):
if self._conn_lost:
return
if self._read_wants_write:
self._read_wants_write = False
self._read_ready()
if not (self._paused or self._closing):
self._loop._add_reader(self._sock_fd, self._read_ready)
if self._buffer:
try:
n = self._sock.send(self._buffer)
except (BlockingIOError, InterruptedError, ssl.SSLWantWriteError):
n = 0
except ssl.SSLWantReadError:
n = 0
self._loop._remove_writer(self._sock_fd)
self._write_wants_read = True
except Exception as exc:
self._loop._remove_writer(self._sock_fd)
self._buffer.clear()
self._fatal_error(exc, 'Fatal write error on SSL transport')
return
if n:
del self._buffer[:n]
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop._remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be a bytes-like object, '
'not %r' % type(data).__name__)
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
self._loop._add_writer(self._sock_fd, self._write_ready)
# Add it to the buffer.
self._buffer.extend(data)
self._maybe_pause_protocol()
def can_write_eof(self):
return False
class _SelectorDatagramTransport(_SelectorTransport):
_buffer_factory = collections.deque
def __init__(self, loop, sock, protocol, address=None,
waiter=None, extra=None):
super().__init__(loop, sock, protocol, extra)
self._address = address
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
self._loop.call_soon(self._add_reader,
self._sock_fd, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(futures._set_result_unless_cancelled,
waiter, None)
def get_write_buffer_size(self):
return sum(len(data) for data, _ in self._buffer)
def _read_ready(self):
if self._conn_lost:
return
try:
data, addr = self._sock.recvfrom(self.max_size)
except (BlockingIOError, InterruptedError):
pass
except OSError as exc:
self._protocol.error_received(exc)
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on datagram transport')
else:
self._protocol.datagram_received(data, addr)
def sendto(self, data, addr=None):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be a bytes-like object, '
'not %r' % type(data).__name__)
if not data:
return
if self._address and addr not in (None, self._address):
raise ValueError('Invalid address: must be None or %s' %
(self._address,))
if self._conn_lost and self._address:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Attempt to send it right away first.
try:
if self._address:
self._sock.send(data)
else:
self._sock.sendto(data, addr)
return
except (BlockingIOError, InterruptedError):
self._loop._add_writer(self._sock_fd, self._sendto_ready)
except OSError as exc:
self._protocol.error_received(exc)
return
except Exception as exc:
self._fatal_error(exc,
'Fatal write error on datagram transport')
return
# Ensure that what we buffer is immutable.
self._buffer.append((bytes(data), addr))
self._maybe_pause_protocol()
def _sendto_ready(self):
while self._buffer:
data, addr = self._buffer.popleft()
try:
if self._address:
self._sock.send(data)
else:
self._sock.sendto(data, addr)
except (BlockingIOError, InterruptedError):
self._buffer.appendleft((data, addr)) # Try again later.
break
except OSError as exc:
self._protocol.error_received(exc)
return
except Exception as exc:
self._fatal_error(exc,
'Fatal write error on datagram transport')
return
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop._remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
| 41,633 | 1,133 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/coroutines.py | __all__ = ['coroutine',
'iscoroutinefunction', 'iscoroutine']
import functools
import inspect
import opcode
import os
import sys
import traceback
import types
from . import compat
from . import constants
from . import events
from . import base_futures
from .log import logger
# Opcode of "yield from" instruction
_YIELD_FROM = opcode.opmap['YIELD_FROM']
# If you set _DEBUG to true, @coroutine will wrap the resulting
# generator objects in a CoroWrapper instance (defined below). That
# instance will log a message when the generator is never iterated
# over, which may happen when you forget to use "yield from" with a
# coroutine call. Note that the value of the _DEBUG flag is taken
# when the decorator is used, so to be of any use it must be set
# before you define your coroutines. A downside of using this feature
# is that tracebacks show entries for the CoroWrapper.__next__ method
# when _DEBUG is true.
_DEBUG = (not sys.flags.ignore_environment and
bool(os.environ.get('PYTHONASYNCIODEBUG')))
try:
_types_coroutine = types.coroutine
_types_CoroutineType = types.CoroutineType
except AttributeError:
# Python 3.4
_types_coroutine = None
_types_CoroutineType = None
try:
_inspect_iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
# Python 3.4
_inspect_iscoroutinefunction = lambda func: False
try:
from collections.abc import Coroutine as _CoroutineABC, \
Awaitable as _AwaitableABC
except ImportError:
_CoroutineABC = _AwaitableABC = None
# Check for CPython issue #21209
def has_yield_from_bug():
class MyGen:
def __init__(self):
self.send_args = None
def __iter__(self):
return self
def __next__(self):
return 42
def send(self, *what):
self.send_args = what
return None
def yield_from_gen(gen):
yield from gen
value = (1, 2, 3)
gen = MyGen()
coro = yield_from_gen(gen)
next(coro)
coro.send(value)
return gen.send_args != (value,)
_YIELD_FROM_BUG = has_yield_from_bug()
del has_yield_from_bug
def debug_wrapper(gen):
# This function is called from 'sys.set_coroutine_wrapper'.
# We only wrap here coroutines defined via 'async def' syntax.
# Generator-based coroutines are wrapped in @coroutine
# decorator.
return CoroWrapper(gen, None)
class CoroWrapper:
# Wrapper for coroutine object in _DEBUG mode.
def __init__(self, gen, func=None):
assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen
self.gen = gen
self.func = func # Used to unwrap @coroutine decorator
self._source_traceback = events.extract_stack(sys._getframe(1))
self.__name__ = getattr(gen, '__name__', None)
self.__qualname__ = getattr(gen, '__qualname__', None)
def __repr__(self):
coro_repr = _format_coroutine(self)
if self._source_traceback:
frame = self._source_traceback[-1]
coro_repr += ', created at %s:%s' % (frame[0], frame[1])
return '<%s %s>' % (self.__class__.__name__, coro_repr)
def __iter__(self):
return self
def __next__(self):
return self.gen.send(None)
if _YIELD_FROM_BUG:
# For for CPython issue #21209: using "yield from" and a custom
# generator, generator.send(tuple) unpacks the tuple instead of passing
# the tuple unchanged. Check if the caller is a generator using "yield
# from" to decide if the parameter should be unpacked or not.
def send(self, *value):
frame = sys._getframe()
caller = frame.f_back
assert caller.f_lasti >= 0
if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM:
value = value[0]
return self.gen.send(value)
else:
def send(self, value):
return self.gen.send(value)
def throw(self, type, value=None, traceback=None):
return self.gen.throw(type, value, traceback)
def close(self):
return self.gen.close()
@property
def gi_frame(self):
return self.gen.gi_frame
@property
def gi_running(self):
return self.gen.gi_running
@property
def gi_code(self):
return self.gen.gi_code
if compat.PY35:
def __await__(self):
cr_await = getattr(self.gen, 'cr_await', None)
if cr_await is not None:
raise RuntimeError(
"Cannot await on coroutine {!r} while it's "
"awaiting for {!r}".format(self.gen, cr_await))
return self
@property
def gi_yieldfrom(self):
return self.gen.gi_yieldfrom
@property
def cr_await(self):
return self.gen.cr_await
@property
def cr_running(self):
return self.gen.cr_running
@property
def cr_code(self):
return self.gen.cr_code
@property
def cr_frame(self):
return self.gen.cr_frame
def __del__(self):
# Be careful accessing self.gen.frame -- self.gen might not exist.
gen = getattr(self, 'gen', None)
frame = getattr(gen, 'gi_frame', None)
if frame is None:
frame = getattr(gen, 'cr_frame', None)
if frame is not None and frame.f_lasti == -1:
msg = '%r was never yielded from' % self
tb = getattr(self, '_source_traceback', ())
if tb:
tb = ''.join(traceback.format_list(tb))
msg += (f'\nCoroutine object created at '
f'(most recent call last, truncated to '
f'{constants.DEBUG_STACK_DEPTH} last lines):\n')
msg += tb.rstrip()
logger.error(msg)
def coroutine(func):
"""Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged.
"""
if _inspect_iscoroutinefunction(func):
# In Python 3.5 that's all we need to do for coroutines
# defined with "async def".
# Wrapping in CoroWrapper will happen via
# 'sys.set_coroutine_wrapper' function.
return func
if inspect.isgeneratorfunction(func):
coro = func
else:
@functools.wraps(func)
def coro(*args, **kw):
res = func(*args, **kw)
if (base_futures.isfuture(res) or inspect.isgenerator(res) or
isinstance(res, CoroWrapper)):
res = yield from res
elif _AwaitableABC is not None:
# If 'func' returns an Awaitable (new in 3.5) we
# want to run it.
try:
await_meth = res.__await__
except AttributeError:
pass
else:
if isinstance(res, _AwaitableABC):
res = yield from await_meth()
return res
if not _DEBUG:
if _types_coroutine is None:
wrapper = coro
else:
wrapper = _types_coroutine(coro)
else:
@functools.wraps(func)
def wrapper(*args, **kwds):
w = CoroWrapper(coro(*args, **kwds), func=func)
if w._source_traceback:
del w._source_traceback[-1]
# Python < 3.5 does not implement __qualname__
# on generator objects, so we set it manually.
# We use getattr as some callables (such as
# functools.partial may lack __qualname__).
w.__name__ = getattr(func, '__name__', None)
w.__qualname__ = getattr(func, '__qualname__', None)
return w
wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction().
return wrapper
# A marker for iscoroutinefunction.
_is_coroutine = object()
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function."""
return (getattr(func, '_is_coroutine', None) is _is_coroutine or
_inspect_iscoroutinefunction(func))
_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
if _CoroutineABC is not None:
_COROUTINE_TYPES += (_CoroutineABC,)
if _types_CoroutineType is not None:
# Prioritize native coroutine check to speed-up
# asyncio.iscoroutine.
_COROUTINE_TYPES = (_types_CoroutineType,) + _COROUTINE_TYPES
def iscoroutine(obj):
"""Return True if obj is a coroutine object."""
return isinstance(obj, _COROUTINE_TYPES)
def _format_coroutine(coro):
assert iscoroutine(coro)
if not hasattr(coro, 'cr_code') and not hasattr(coro, 'gi_code'):
# Most likely a built-in type or a Cython coroutine.
# Built-in types might not have __qualname__ or __name__.
coro_name = getattr(
coro, '__qualname__',
getattr(coro, '__name__', type(coro).__name__))
coro_name = '{}()'.format(coro_name)
running = False
try:
running = coro.cr_running
except AttributeError:
try:
running = coro.gi_running
except AttributeError:
pass
if running:
return '{} running'.format(coro_name)
else:
return coro_name
coro_name = None
if isinstance(coro, CoroWrapper):
func = coro.func
coro_name = coro.__qualname__
if coro_name is not None:
coro_name = '{}()'.format(coro_name)
else:
func = coro
if coro_name is None:
coro_name = events._format_callback(func, (), {})
coro_code = None
if hasattr(coro, 'cr_code') and coro.cr_code:
coro_code = coro.cr_code
elif hasattr(coro, 'gi_code') and coro.gi_code:
coro_code = coro.gi_code
coro_frame = None
if hasattr(coro, 'cr_frame') and coro.cr_frame:
coro_frame = coro.cr_frame
elif hasattr(coro, 'gi_frame') and coro.gi_frame:
coro_frame = coro.gi_frame
filename = '<empty co_filename>'
if coro_code and coro_code.co_filename:
filename = coro_code.co_filename
lineno = 0
coro_repr = coro_name
if (isinstance(coro, CoroWrapper) and
not inspect.isgeneratorfunction(coro.func) and
coro.func is not None):
source = events._get_function_source(coro.func)
if source is not None:
filename, lineno = source
if coro_frame is None:
coro_repr = ('%s done, defined at %s:%s'
% (coro_name, filename, lineno))
else:
coro_repr = ('%s running, defined at %s:%s'
% (coro_name, filename, lineno))
elif coro_frame is not None:
lineno = coro_frame.f_lineno
coro_repr = ('%s running at %s:%s'
% (coro_name, filename, lineno))
elif coro_code:
lineno = coro_code.co_firstlineno
coro_repr = ('%s done, defined at %s:%s'
% (coro_name, filename, lineno))
return coro_repr
| 11,135 | 354 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/windows_utils.py | """
Various Windows specific bits and pieces
"""
import sys
if sys.platform != 'win32': # pragma: no cover
raise ImportError('win32 only')
import _winapi
import itertools
import msvcrt
import os
import socket
import subprocess
import tempfile
import warnings
__all__ = ['socketpair', 'pipe', 'Popen', 'PIPE', 'PipeHandle']
# Constants/globals
BUFSIZE = 8192
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
_mmap_counter = itertools.count()
if hasattr(socket, 'socketpair'):
# Since Python 3.5, socket.socketpair() is now also available on Windows
socketpair = socket.socketpair
else:
# Replacement for socket.socketpair()
def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0):
"""A socket pair usable as a self-pipe, for Windows.
Origin: https://gist.github.com/4325783, by Geert Jansen.
Public domain.
"""
if family == socket.AF_INET:
host = '127.0.0.1'
elif family == socket.AF_INET6:
host = '::1'
else:
raise ValueError("Only AF_INET and AF_INET6 socket address "
"families are supported")
if type != socket.SOCK_STREAM:
raise ValueError("Only SOCK_STREAM socket type is supported")
if proto != 0:
raise ValueError("Only protocol zero is supported")
# We create a connected TCP socket. Note the trick with setblocking(0)
# that prevents us from having to create a thread.
lsock = socket.socket(family, type, proto)
try:
lsock.bind((host, 0))
lsock.listen(1)
# On IPv6, ignore flow_info and scope_id
addr, port = lsock.getsockname()[:2]
csock = socket.socket(family, type, proto)
try:
csock.setblocking(False)
try:
csock.connect((addr, port))
except (BlockingIOError, InterruptedError):
pass
csock.setblocking(True)
ssock, _ = lsock.accept()
except:
csock.close()
raise
finally:
lsock.close()
return (ssock, csock)
# Replacement for os.pipe() using handles instead of fds
def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
"""Like os.pipe() but with overlapped support and using handles not fds."""
address = tempfile.mktemp(prefix=r'\\.\pipe\python-pipe-%d-%d-' %
(os.getpid(), next(_mmap_counter)))
if duplex:
openmode = _winapi.PIPE_ACCESS_DUPLEX
access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
obsize, ibsize = bufsize, bufsize
else:
openmode = _winapi.PIPE_ACCESS_INBOUND
access = _winapi.GENERIC_WRITE
obsize, ibsize = 0, bufsize
openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
if overlapped[0]:
openmode |= _winapi.FILE_FLAG_OVERLAPPED
if overlapped[1]:
flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED
else:
flags_and_attribs = 0
h1 = h2 = None
try:
h1 = _winapi.CreateNamedPipe(
address, openmode, _winapi.PIPE_WAIT,
1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
h2 = _winapi.CreateFile(
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
flags_and_attribs, _winapi.NULL)
ov = _winapi.ConnectNamedPipe(h1, overlapped=True)
ov.GetOverlappedResult(True)
return h1, h2
except:
if h1 is not None:
_winapi.CloseHandle(h1)
if h2 is not None:
_winapi.CloseHandle(h2)
raise
# Wrapper for a pipe handle
class PipeHandle:
"""Wrapper for an overlapped pipe handle which is vaguely file-object like.
The IOCP event loop can use these instead of socket objects.
"""
def __init__(self, handle):
self._handle = handle
def __repr__(self):
if self._handle is not None:
handle = 'handle=%r' % self._handle
else:
handle = 'closed'
return '<%s %s>' % (self.__class__.__name__, handle)
@property
def handle(self):
return self._handle
def fileno(self):
if self._handle is None:
raise ValueError("I/O operatioon on closed pipe")
return self._handle
def close(self, *, CloseHandle=_winapi.CloseHandle):
if self._handle is not None:
CloseHandle(self._handle)
self._handle = None
def __del__(self):
if self._handle is not None:
warnings.warn("unclosed %r" % self, ResourceWarning,
source=self)
self.close()
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
# Replacement for subprocess.Popen using overlapped pipe handles
class Popen(subprocess.Popen):
"""Replacement for subprocess.Popen using overlapped pipe handles.
The stdin, stdout, stderr are None or instances of PipeHandle.
"""
def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds):
assert not kwds.get('universal_newlines')
assert kwds.get('bufsize', 0) == 0
stdin_rfd = stdout_wfd = stderr_wfd = None
stdin_wh = stdout_rh = stderr_rh = None
if stdin == PIPE:
stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True)
stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY)
else:
stdin_rfd = stdin
if stdout == PIPE:
stdout_rh, stdout_wh = pipe(overlapped=(True, False))
stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0)
else:
stdout_wfd = stdout
if stderr == PIPE:
stderr_rh, stderr_wh = pipe(overlapped=(True, False))
stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0)
elif stderr == STDOUT:
stderr_wfd = stdout_wfd
else:
stderr_wfd = stderr
try:
super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd,
stderr=stderr_wfd, **kwds)
except:
for h in (stdin_wh, stdout_rh, stderr_rh):
if h is not None:
_winapi.CloseHandle(h)
raise
else:
if stdin_wh is not None:
self.stdin = PipeHandle(stdin_wh)
if stdout_rh is not None:
self.stdout = PipeHandle(stdout_rh)
if stderr_rh is not None:
self.stderr = PipeHandle(stderr_rh)
finally:
if stdin == PIPE:
os.close(stdin_rfd)
if stdout == PIPE:
os.close(stdout_wfd)
if stderr == PIPE:
os.close(stderr_wfd)
| 6,883 | 225 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/log.py | """Logging configuration."""
import logging
# Name the logger after the package.
logger = logging.getLogger(__package__)
| 124 | 8 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/transports.py | """Abstract Transport class."""
from asyncio import compat
__all__ = ['BaseTransport', 'ReadTransport', 'WriteTransport',
'Transport', 'DatagramTransport', 'SubprocessTransport',
]
class BaseTransport:
"""Base class for transports."""
def __init__(self, extra=None):
if extra is None:
extra = {}
self._extra = extra
def get_extra_info(self, name, default=None):
"""Get optional transport information."""
return self._extra.get(name, default)
def is_closing(self):
"""Return True if the transport is closing or closed."""
raise NotImplementedError
def close(self):
"""Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) called
with None as its argument.
"""
raise NotImplementedError
def set_protocol(self, protocol):
"""Set a new protocol."""
raise NotImplementedError
def get_protocol(self):
"""Return the current protocol."""
raise NotImplementedError
class ReadTransport(BaseTransport):
"""Interface for read-only transports."""
def pause_reading(self):
"""Pause the receiving end.
No data will be passed to the protocol's data_received()
method until resume_reading() is called.
"""
raise NotImplementedError
def resume_reading(self):
"""Resume the receiving end.
Data received will once again be passed to the protocol's
data_received() method.
"""
raise NotImplementedError
class WriteTransport(BaseTransport):
"""Interface for write-only transports."""
def set_write_buffer_limits(self, high=None, low=None):
"""Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's
pause_writing() and resume_writing() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to an
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_writing() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_writing() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
raise NotImplementedError
def get_write_buffer_size(self):
"""Return the current size of the write buffer."""
raise NotImplementedError
def write(self, data):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
raise NotImplementedError
def writelines(self, list_of_data):
"""Write a list (or any iterable) of data bytes to the transport.
The default implementation concatenates the arguments and
calls write() on the result.
"""
data = compat.flatten_list_bytes(list_of_data)
self.write(data)
def write_eof(self):
"""Close the write end after flushing buffered data.
(This is like typing ^D into a UNIX program reading from stdin.)
Data may still be received.
"""
raise NotImplementedError
def can_write_eof(self):
"""Return True if this transport supports write_eof(), False if not."""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class Transport(ReadTransport, WriteTransport):
"""Interface representing a bidirectional transport.
There may be several implementations, but typically, the user does
not implement new transports; rather, the platform provides some
useful transports that are implemented using the platform's best
practices.
The user never instantiates a transport directly; they call a
utility function, passing it a protocol factory and other
information necessary to create the transport and protocol. (E.g.
EventLoop.create_connection() or EventLoop.create_server().)
The utility function will asynchronously create a transport and a
protocol and hook them up by calling the protocol's
connection_made() method, passing it the transport.
The implementation here raises NotImplemented for every method
except writelines(), which calls write() in a loop.
"""
class DatagramTransport(BaseTransport):
"""Interface for datagram (UDP) transports."""
def sendto(self, data, addr=None):
"""Send data to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
addr is target socket address.
If addr is None use target address pointed on transport creation.
"""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class SubprocessTransport(BaseTransport):
def get_pid(self):
"""Get subprocess id."""
raise NotImplementedError
def get_returncode(self):
"""Get subprocess returncode.
See also
http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
"""
raise NotImplementedError
def get_pipe_transport(self, fd):
"""Get transport for pipe with number fd."""
raise NotImplementedError
def send_signal(self, signal):
"""Send signal to subprocess.
See also:
docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
"""
raise NotImplementedError
def terminate(self):
"""Stop the subprocess.
Alias for close() method.
On Posix OSs the method sends SIGTERM to the subprocess.
On Windows the Win32 API function TerminateProcess()
is called to stop the subprocess.
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
"""
raise NotImplementedError
def kill(self):
"""Kill the subprocess.
On Posix OSs the function sends SIGKILL to the subprocess.
On Windows kill() is an alias for terminate().
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
"""
raise NotImplementedError
class _FlowControlMixin(Transport):
"""All the logic for (write) flow control in a mix-in base class.
The subclass must implement get_write_buffer_size(). It must call
_maybe_pause_protocol() whenever the write buffer size increases,
and _maybe_resume_protocol() whenever it decreases. It may also
override set_write_buffer_limits() (e.g. to specify different
defaults).
The subclass constructor must call super().__init__(extra). This
will call set_write_buffer_limits().
The user may call set_write_buffer_limits() and
get_write_buffer_size(), and their protocol's pause_writing() and
resume_writing() may be called.
"""
def __init__(self, extra=None, loop=None):
super().__init__(extra)
assert loop is not None
self._loop = loop
self._protocol_paused = False
self._set_write_buffer_limits()
def _maybe_pause_protocol(self):
size = self.get_write_buffer_size()
if size <= self._high_water:
return
if not self._protocol_paused:
self._protocol_paused = True
try:
self._protocol.pause_writing()
except Exception as exc:
self._loop.call_exception_handler({
'message': 'protocol.pause_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def _maybe_resume_protocol(self):
if (self._protocol_paused and
self.get_write_buffer_size() <= self._low_water):
self._protocol_paused = False
try:
self._protocol.resume_writing()
except Exception as exc:
self._loop.call_exception_handler({
'message': 'protocol.resume_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def get_write_buffer_limits(self):
return (self._low_water, self._high_water)
def _set_write_buffer_limits(self, high=None, low=None):
if high is None:
if low is None:
high = 64*1024
else:
high = 4*low
if low is None:
low = high // 4
if not high >= low >= 0:
raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
(high, low))
self._high_water = high
self._low_water = low
def set_write_buffer_limits(self, high=None, low=None):
self._set_write_buffer_limits(high=high, low=low)
self._maybe_pause_protocol()
def get_write_buffer_size(self):
raise NotImplementedError
| 10,066 | 307 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/subprocess.py | __all__ = ['create_subprocess_exec', 'create_subprocess_shell']
import subprocess
from . import events
from . import protocols
from . import streams
from . import tasks
from .coroutines import coroutine
from .log import logger
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
DEVNULL = subprocess.DEVNULL
class SubprocessStreamProtocol(streams.FlowControlMixin,
protocols.SubprocessProtocol):
"""Like StreamReaderProtocol, but for a subprocess."""
def __init__(self, limit, loop):
super().__init__(loop=loop)
self._limit = limit
self.stdin = self.stdout = self.stderr = None
self._transport = None
self._process_exited = False
self._pipe_fds = []
def __repr__(self):
info = [self.__class__.__name__]
if self.stdin is not None:
info.append('stdin=%r' % self.stdin)
if self.stdout is not None:
info.append('stdout=%r' % self.stdout)
if self.stderr is not None:
info.append('stderr=%r' % self.stderr)
return '<%s>' % ' '.join(info)
def connection_made(self, transport):
self._transport = transport
stdout_transport = transport.get_pipe_transport(1)
if stdout_transport is not None:
self.stdout = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stdout.set_transport(stdout_transport)
self._pipe_fds.append(1)
stderr_transport = transport.get_pipe_transport(2)
if stderr_transport is not None:
self.stderr = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stderr.set_transport(stderr_transport)
self._pipe_fds.append(2)
stdin_transport = transport.get_pipe_transport(0)
if stdin_transport is not None:
self.stdin = streams.StreamWriter(stdin_transport,
protocol=self,
reader=None,
loop=self._loop)
def pipe_data_received(self, fd, data):
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader is not None:
reader.feed_data(data)
def pipe_connection_lost(self, fd, exc):
if fd == 0:
pipe = self.stdin
if pipe is not None:
pipe.close()
self.connection_lost(exc)
return
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader != None:
if exc is None:
reader.feed_eof()
else:
reader.set_exception(exc)
if fd in self._pipe_fds:
self._pipe_fds.remove(fd)
self._maybe_close_transport()
def process_exited(self):
self._process_exited = True
self._maybe_close_transport()
def _maybe_close_transport(self):
if len(self._pipe_fds) == 0 and self._process_exited:
self._transport.close()
self._transport = None
class Process:
def __init__(self, transport, protocol, loop):
self._transport = transport
self._protocol = protocol
self._loop = loop
self.stdin = protocol.stdin
self.stdout = protocol.stdout
self.stderr = protocol.stderr
self.pid = transport.get_pid()
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.pid)
@property
def returncode(self):
return self._transport.get_returncode()
@coroutine
def wait(self):
"""Wait until the process exit and return the process return code.
This method is a coroutine."""
return (yield from self._transport._wait())
def send_signal(self, signal):
self._transport.send_signal(signal)
def terminate(self):
self._transport.terminate()
def kill(self):
self._transport.kill()
@coroutine
def _feed_stdin(self, input):
debug = self._loop.get_debug()
self.stdin.write(input)
if debug:
logger.debug('%r communicate: feed stdin (%s bytes)',
self, len(input))
try:
yield from self.stdin.drain()
except (BrokenPipeError, ConnectionResetError) as exc:
# communicate() ignores BrokenPipeError and ConnectionResetError
if debug:
logger.debug('%r communicate: stdin got %r', self, exc)
if debug:
logger.debug('%r communicate: close stdin', self)
self.stdin.close()
@coroutine
def _noop(self):
return None
@coroutine
def _read_stream(self, fd):
transport = self._transport.get_pipe_transport(fd)
if fd == 2:
stream = self.stderr
else:
assert fd == 1
stream = self.stdout
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: read %s', self, name)
output = yield from stream.read()
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: close %s', self, name)
transport.close()
return output
@coroutine
def communicate(self, input=None):
if input is not None:
stdin = self._feed_stdin(input)
else:
stdin = self._noop()
if self.stdout is not None:
stdout = self._read_stream(1)
else:
stdout = self._noop()
if self.stderr is not None:
stderr = self._read_stream(2)
else:
stderr = self._noop()
stdin, stdout, stderr = yield from tasks.gather(stdin, stdout, stderr,
loop=self._loop)
yield from self.wait()
return (stdout, stderr)
@coroutine
def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
loop=None, limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = yield from loop.subprocess_shell(
protocol_factory,
cmd, stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
@coroutine
def create_subprocess_exec(program, *args, stdin=None, stdout=None,
stderr=None, loop=None,
limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = yield from loop.subprocess_exec(
protocol_factory,
program, *args,
stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
| 7,626 | 227 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/unix_events.py | """Selector event loop for Unix with signal handling."""
import errno
import os
import signal
import socket
import stat
import subprocess
import sys
import threading
import warnings
from . import base_events
from . import base_subprocess
from . import compat
from . import constants
from . import coroutines
from . import events
from . import futures
from . import selector_events
import selectors
from . import transports
from .coroutines import coroutine
from .log import logger
__all__ = ['SelectorEventLoop',
'AbstractChildWatcher', 'SafeChildWatcher',
'FastChildWatcher', 'DefaultEventLoopPolicy',
]
if sys.platform == 'win32': # pragma: no cover
raise ImportError('Signals are not really supported on Windows')
def _sighandler_noop(signum, frame):
"""Dummy signal handler."""
pass
try:
_fspath = os.fspath
except AttributeError:
# Python 3.5 or earlier
_fspath = lambda path: path
class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
"""Unix event loop.
Adds signal handling and UNIX Domain Socket support to SelectorEventLoop.
"""
def __init__(self, selector=None):
super().__init__(selector)
self._signal_handlers = {}
def _socketpair(self):
return socket.socketpair()
def close(self):
super().close()
if not sys.is_finalizing():
for sig in list(self._signal_handlers):
self.remove_signal_handler(sig)
else:
if self._signal_handlers:
warnings.warn(f"Closing the loop {self!r} "
f"on interpreter shutdown "
f"stage, skipping signal handlers removal",
ResourceWarning,
source=self)
self._signal_handlers.clear()
def _process_self_data(self, data):
for signum in data:
if not signum:
# ignore null bytes written by _write_to_self()
continue
self._handle_signal(signum)
def add_signal_handler(self, sig, callback, *args):
"""Add a handler for a signal. UNIX only.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
if (coroutines.iscoroutine(callback)
or coroutines.iscoroutinefunction(callback)):
raise TypeError("coroutines cannot be used "
"with add_signal_handler()")
self._check_signal(sig)
self._check_closed()
try:
# set_wakeup_fd() raises ValueError if this is not the
# main thread. By calling it early we ensure that an
# event loop running in another thread cannot add a signal
# handler.
signal.set_wakeup_fd(self._csock.fileno())
except (ValueError, OSError) as exc:
raise RuntimeError(str(exc))
handle = events.Handle(callback, args, self)
self._signal_handlers[sig] = handle
try:
# Register a dummy signal handler to ask Python to write the signal
# number in the wakup file descriptor. _process_self_data() will
# read signal numbers from this file descriptor to handle signals.
signal.signal(sig, _sighandler_noop)
# Set SA_RESTART to limit EINTR occurrences.
signal.siginterrupt(sig, False)
except OSError as exc:
del self._signal_handlers[sig]
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except (ValueError, OSError) as nexc:
logger.info('set_wakeup_fd(-1) failed: %s', nexc)
if exc.errno == errno.EINVAL:
raise RuntimeError('sig {} cannot be caught'.format(sig))
else:
raise
def _handle_signal(self, sig):
"""Internal helper that is the actual signal handler."""
handle = self._signal_handlers.get(sig)
if handle is None:
return # Assume it's some race condition.
if handle._cancelled:
self.remove_signal_handler(sig) # Remove it properly.
else:
self._add_callback_signalsafe(handle)
def remove_signal_handler(self, sig):
"""Remove a handler for a signal. UNIX only.
Return True if a signal handler was removed, False if not.
"""
self._check_signal(sig)
try:
del self._signal_handlers[sig]
except KeyError:
return False
if sig == signal.SIGINT:
handler = signal.default_int_handler
else:
handler = signal.SIG_DFL
try:
signal.signal(sig, handler)
except OSError as exc:
if exc.errno == errno.EINVAL:
raise RuntimeError('sig {} cannot be caught'.format(sig))
else:
raise
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except (ValueError, OSError) as exc:
logger.info('set_wakeup_fd(-1) failed: %s', exc)
return True
def _check_signal(self, sig):
"""Internal helper to validate a signal.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
if not isinstance(sig, int):
raise TypeError('sig must be an int, not {!r}'.format(sig))
if not (1 <= sig < signal.NSIG):
raise ValueError(
'sig {} out of range(1, {})'.format(sig, signal.NSIG))
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra)
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
@coroutine
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
with events.get_child_watcher() as watcher:
waiter = self.create_future()
transp = _UnixSubprocessTransport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
waiter=waiter, extra=extra,
**kwargs)
watcher.add_child_handler(transp.get_pid(),
self._child_watcher_callback, transp)
try:
yield from waiter
except Exception as exc:
# Workaround CPython bug #23353: using yield/yield-from in an
# except block of a generator doesn't clear properly
# sys.exc_info()
err = exc
else:
err = None
if err is not None:
transp.close()
yield from transp._wait()
raise err
return transp
def _child_watcher_callback(self, pid, returncode, transp):
self.call_soon_threadsafe(transp._process_exited, returncode)
@coroutine
def create_unix_connection(self, protocol_factory, path, *,
ssl=None, sock=None,
server_hostname=None):
assert server_hostname is None or isinstance(server_hostname, str)
if ssl:
if server_hostname is None:
raise ValueError(
'you have to pass server_hostname when using ssl')
else:
if server_hostname is not None:
raise ValueError('server_hostname is only meaningful with ssl')
if path is not None:
if sock is not None:
raise ValueError(
'path and sock can not be specified at the same time')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
try:
sock.setblocking(False)
yield from self.sock_connect(sock, path)
except:
sock.close()
raise
else:
if sock is None:
raise ValueError('no path and sock were specified')
if (sock.family != socket.AF_UNIX or
not base_events._is_stream_socket(sock.type)):
raise ValueError(
'A UNIX Domain Stream Socket was expected, got {!r}'
.format(sock))
sock.setblocking(False)
transport, protocol = yield from self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname)
return transport, protocol
@coroutine
def create_unix_server(self, protocol_factory, path=None, *,
sock=None, backlog=100, ssl=None):
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if path is not None:
if sock is not None:
raise ValueError(
'path and sock can not be specified at the same time')
path = _fspath(path)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Check for abstract socket. `str` and `bytes` paths are supported.
if path[0] not in (0, '\x00'):
try:
if stat.S_ISSOCK(os.stat(path).st_mode):
os.remove(path)
except FileNotFoundError:
pass
except OSError as err:
# Directory may have permissions only to create socket.
logger.error('Unable to check or remove stale UNIX socket %r: %r', path, err)
try:
sock.bind(path)
except OSError as exc:
sock.close()
if exc.errno == errno.EADDRINUSE:
# Let's improve the error message by adding
# with what exact address it occurs.
msg = 'Address {!r} is already in use'.format(path)
raise OSError(errno.EADDRINUSE, msg) from None
else:
raise
except:
sock.close()
raise
else:
if sock is None:
raise ValueError(
'path was not specified, and no sock specified')
if (sock.family != socket.AF_UNIX or
not base_events._is_stream_socket(sock.type)):
raise ValueError(
'A UNIX Domain Stream Socket was expected, got {!r}'
.format(sock))
server = base_events.Server(self, [sock])
sock.listen(backlog)
sock.setblocking(False)
self._start_serving(protocol_factory, sock, ssl, server)
return server
if hasattr(os, 'set_blocking'):
def _set_nonblocking(fd):
os.set_blocking(fd, False)
else:
import fcntl
def _set_nonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
class _UnixReadPipeTransport(transports.ReadTransport):
max_size = 256 * 1024 # max bytes we read in one event loop iteration
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
super().__init__(extra)
self._extra['pipe'] = pipe
self._loop = loop
self._pipe = pipe
self._fileno = pipe.fileno()
self._protocol = protocol
self._closing = False
mode = os.fstat(self._fileno).st_mode
if not (stat.S_ISFIFO(mode) or
stat.S_ISSOCK(mode) or
stat.S_ISCHR(mode)):
self._pipe = None
self._fileno = None
self._protocol = None
raise ValueError("Pipe transport is for pipes/sockets only.")
_set_nonblocking(self._fileno)
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
self._loop.call_soon(self._loop._add_reader,
self._fileno, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(futures._set_result_unless_cancelled,
waiter, None)
def __repr__(self):
info = [self.__class__.__name__]
if self._pipe is None:
info.append('closed')
elif self._closing:
info.append('closing')
info.append('fd=%s' % self._fileno)
selector = getattr(self._loop, '_selector', None)
if self._pipe is not None and selector is not None:
polling = selector_events._test_selector_event(
selector,
self._fileno, selectors.EVENT_READ)
if polling:
info.append('polling')
else:
info.append('idle')
elif self._pipe is not None:
info.append('open')
else:
info.append('closed')
return '<%s>' % ' '.join(info)
def _read_ready(self):
try:
data = os.read(self._fileno, self.max_size)
except (BlockingIOError, InterruptedError):
pass
except OSError as exc:
self._fatal_error(exc, 'Fatal read error on pipe transport')
else:
if data:
self._protocol.data_received(data)
else:
if self._loop.get_debug():
logger.info("%r was closed by peer", self)
self._closing = True
self._loop._remove_reader(self._fileno)
self._loop.call_soon(self._protocol.eof_received)
self._loop.call_soon(self._call_connection_lost, None)
def pause_reading(self):
self._loop._remove_reader(self._fileno)
def resume_reading(self):
self._loop._add_reader(self._fileno, self._read_ready)
def set_protocol(self, protocol):
self._protocol = protocol
def get_protocol(self):
return self._protocol
def is_closing(self):
return self._closing
def close(self):
if not self._closing:
self._close(None)
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if compat.PY34:
def __del__(self):
if self._pipe is not None:
warnings.warn("unclosed transport %r" % self, ResourceWarning,
source=self)
self._pipe.close()
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
# should be called by exception handler only
if (isinstance(exc, OSError) and exc.errno == errno.EIO):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
self._close(exc)
def _close(self, exc):
self._closing = True
self._loop._remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._pipe.close()
self._pipe = None
self._protocol = None
self._loop = None
class _UnixWritePipeTransport(transports._FlowControlMixin,
transports.WriteTransport):
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
super().__init__(extra, loop)
self._extra['pipe'] = pipe
self._pipe = pipe
self._fileno = pipe.fileno()
self._protocol = protocol
self._buffer = bytearray()
self._conn_lost = 0
self._closing = False # Set when close() or write_eof() called.
mode = os.fstat(self._fileno).st_mode
is_char = stat.S_ISCHR(mode)
is_fifo = stat.S_ISFIFO(mode)
is_socket = stat.S_ISSOCK(mode)
if not (is_char or is_fifo or is_socket):
self._pipe = None
self._fileno = None
self._protocol = None
raise ValueError("Pipe transport is only for "
"pipes, sockets and character devices")
_set_nonblocking(self._fileno)
self._loop.call_soon(self._protocol.connection_made, self)
# On AIX, the reader trick (to be notified when the read end of the
# socket is closed) only works for sockets. On other platforms it
# works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.)
if is_socket or (is_fifo and not sys.platform.startswith("aix")):
# only start reading when connection_made() has been called
self._loop.call_soon(self._loop._add_reader,
self._fileno, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(futures._set_result_unless_cancelled,
waiter, None)
def __repr__(self):
info = [self.__class__.__name__]
if self._pipe is None:
info.append('closed')
elif self._closing:
info.append('closing')
info.append('fd=%s' % self._fileno)
selector = getattr(self._loop, '_selector', None)
if self._pipe is not None and selector is not None:
polling = selector_events._test_selector_event(
selector,
self._fileno, selectors.EVENT_WRITE)
if polling:
info.append('polling')
else:
info.append('idle')
bufsize = self.get_write_buffer_size()
info.append('bufsize=%s' % bufsize)
elif self._pipe is not None:
info.append('open')
else:
info.append('closed')
return '<%s>' % ' '.join(info)
def get_write_buffer_size(self):
return len(self._buffer)
def _read_ready(self):
# Pipe was closed by peer.
if self._loop.get_debug():
logger.info("%r was closed by peer", self)
if self._buffer:
self._close(BrokenPipeError())
else:
self._close()
def write(self, data):
assert isinstance(data, (bytes, bytearray, memoryview)), repr(data)
if isinstance(data, bytearray):
data = memoryview(data)
if not data:
return
if self._conn_lost or self._closing:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('pipe closed by peer or '
'os.write(pipe, data) raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Attempt to send it right away first.
try:
n = os.write(self._fileno, data)
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
self._conn_lost += 1
self._fatal_error(exc, 'Fatal write error on pipe transport')
return
if n == len(data):
return
elif n > 0:
data = memoryview(data)[n:]
self._loop._add_writer(self._fileno, self._write_ready)
self._buffer += data
self._maybe_pause_protocol()
def _write_ready(self):
assert self._buffer, 'Data should not be empty'
try:
n = os.write(self._fileno, self._buffer)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._buffer.clear()
self._conn_lost += 1
# Remove writer here, _fatal_error() doesn't it
# because _buffer is empty.
self._loop._remove_writer(self._fileno)
self._fatal_error(exc, 'Fatal write error on pipe transport')
else:
if n == len(self._buffer):
self._buffer.clear()
self._loop._remove_writer(self._fileno)
self._maybe_resume_protocol() # May append to buffer.
if self._closing:
self._loop._remove_reader(self._fileno)
self._call_connection_lost(None)
return
elif n > 0:
del self._buffer[:n]
def can_write_eof(self):
return True
def write_eof(self):
if self._closing:
return
assert self._pipe
self._closing = True
if not self._buffer:
self._loop._remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, None)
def set_protocol(self, protocol):
self._protocol = protocol
def get_protocol(self):
return self._protocol
def is_closing(self):
return self._closing
def close(self):
if self._pipe is not None and not self._closing:
# write_eof is all what we needed to close the write pipe
self.write_eof()
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if compat.PY34:
def __del__(self):
if self._pipe is not None:
warnings.warn("unclosed transport %r" % self, ResourceWarning,
source=self)
self._pipe.close()
def abort(self):
self._close(None)
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
# should be called by exception handler only
if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
self._close(exc)
def _close(self, exc=None):
self._closing = True
if self._buffer:
self._loop._remove_writer(self._fileno)
self._buffer.clear()
self._loop._remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._pipe.close()
self._pipe = None
self._protocol = None
self._loop = None
if hasattr(os, 'set_inheritable'):
# Python 3.4 and newer
_set_inheritable = os.set_inheritable
else:
import fcntl
def _set_inheritable(fd, inheritable):
cloexec_flag = getattr(fcntl, 'FD_CLOEXEC', 1)
old = fcntl.fcntl(fd, fcntl.F_GETFD)
if not inheritable:
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
else:
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
stdin_w = None
if stdin == subprocess.PIPE:
# Use a socket pair for stdin, since not all platforms
# support selecting read events on the write end of a
# socket (which we use in order to detect closing of the
# other end). Notably this is needed on AIX, and works
# just fine on other platforms.
stdin, stdin_w = self._loop._socketpair()
# Mark the write end of the stdin pipe as non-inheritable,
# needed by close_fds=False on Python 3.3 and older
# (Python 3.4 implements the PEP 446, socketpair returns
# non-inheritable sockets)
_set_inheritable(stdin_w.fileno(), False)
self._proc = subprocess.Popen(
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
universal_newlines=False, bufsize=bufsize, **kwargs)
if stdin_w is not None:
stdin.close()
self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize)
class AbstractChildWatcher:
"""Abstract base class for monitoring child processes.
Objects derived from this class monitor a collection of subprocesses and
report their termination or interruption by a signal.
New callbacks are registered with .add_child_handler(). Starting a new
process must be done within a 'with' block to allow the watcher to suspend
its activity until the new process if fully registered (this is needed to
prevent a race condition in some implementations).
Example:
with watcher:
proc = subprocess.Popen("sleep 1")
watcher.add_child_handler(proc.pid, callback)
Notes:
Implementations of this class must be thread-safe.
Since child watcher objects may catch the SIGCHLD signal and call
waitpid(-1), there should be only one active object per process.
"""
def add_child_handler(self, pid, callback, *args):
"""Register a new child handler.
Arrange for callback(pid, returncode, *args) to be called when
process 'pid' terminates. Specifying another callback for the same
process replaces the previous handler.
Note: callback() must be thread-safe.
"""
raise NotImplementedError()
def remove_child_handler(self, pid):
"""Removes the handler for process 'pid'.
The function returns True if the handler was successfully removed,
False if there was nothing to remove."""
raise NotImplementedError()
def attach_loop(self, loop):
"""Attach the watcher to an event loop.
If the watcher was previously attached to an event loop, then it is
first detached before attaching to the new loop.
Note: loop may be None.
"""
raise NotImplementedError()
def close(self):
"""Close the watcher.
This must be called to make sure that any underlying resource is freed.
"""
raise NotImplementedError()
def __enter__(self):
"""Enter the watcher's context and allow starting new processes
This function must return self"""
raise NotImplementedError()
def __exit__(self, a, b, c):
"""Exit the watcher's context"""
raise NotImplementedError()
class BaseChildWatcher(AbstractChildWatcher):
def __init__(self):
self._loop = None
self._callbacks = {}
def close(self):
self.attach_loop(None)
def _do_waitpid(self, expected_pid):
raise NotImplementedError()
def _do_waitpid_all(self):
raise NotImplementedError()
def attach_loop(self, loop):
assert loop is None or isinstance(loop, events.AbstractEventLoop)
if self._loop is not None and loop is None and self._callbacks:
warnings.warn(
'A loop is being detached '
'from a child watcher with pending handlers',
RuntimeWarning)
if self._loop is not None:
self._loop.remove_signal_handler(signal.SIGCHLD)
self._loop = loop
if loop is not None:
loop.add_signal_handler(signal.SIGCHLD, self._sig_chld)
# Prevent a race condition in case a child terminated
# during the switch.
self._do_waitpid_all()
def _sig_chld(self):
try:
self._do_waitpid_all()
except Exception as exc:
# self._loop should always be available here
# as '_sig_chld' is added as a signal handler
# in 'attach_loop'
self._loop.call_exception_handler({
'message': 'Unknown exception in SIGCHLD handler',
'exception': exc,
})
def _compute_returncode(self, status):
if os.WIFSIGNALED(status):
# The child process died because of a signal.
return -os.WTERMSIG(status)
elif os.WIFEXITED(status):
# The child process exited (e.g sys.exit()).
return os.WEXITSTATUS(status)
else:
# The child exited, but we don't understand its status.
# This shouldn't happen, but if it does, let's just
# return that status; perhaps that helps debug it.
return status
class SafeChildWatcher(BaseChildWatcher):
"""'Safe' child watcher implementation.
This implementation avoids disrupting other code spawning processes by
polling explicitly each process in the SIGCHLD handler instead of calling
os.waitpid(-1).
This is a safe solution but it has a significant overhead when handling a
big number of children (O(n) each time SIGCHLD is raised)
"""
def close(self):
self._callbacks.clear()
super().close()
def __enter__(self):
return self
def __exit__(self, a, b, c):
pass
def add_child_handler(self, pid, callback, *args):
if self._loop is None:
raise RuntimeError(
"Cannot add child handler, "
"the child watcher does not have a loop attached")
self._callbacks[pid] = (callback, args)
# Prevent a race condition in case the child is already terminated.
self._do_waitpid(pid)
def remove_child_handler(self, pid):
try:
del self._callbacks[pid]
return True
except KeyError:
return False
def _do_waitpid_all(self):
for pid in list(self._callbacks):
self._do_waitpid(pid)
def _do_waitpid(self, expected_pid):
assert expected_pid > 0
try:
pid, status = os.waitpid(expected_pid, os.WNOHANG)
except ChildProcessError:
# The child process is already reaped
# (may happen if waitpid() is called elsewhere).
pid = expected_pid
returncode = 255
logger.warning(
"Unknown child process pid %d, will report returncode 255",
pid)
else:
if pid == 0:
# The child process is still alive.
return
returncode = self._compute_returncode(status)
if self._loop.get_debug():
logger.debug('process %s exited with returncode %s',
expected_pid, returncode)
try:
callback, args = self._callbacks.pop(pid)
except KeyError: # pragma: no cover
# May happen if .remove_child_handler() is called
# after os.waitpid() returns.
if self._loop.get_debug():
logger.warning("Child watcher got an unexpected pid: %r",
pid, exc_info=True)
else:
callback(pid, returncode, *args)
class FastChildWatcher(BaseChildWatcher):
"""'Fast' child watcher implementation.
This implementation reaps every terminated processes by calling
os.waitpid(-1) directly, possibly breaking other code spawning processes
and waiting for their termination.
There is no noticeable overhead when handling a big number of children
(O(1) each time a child terminates).
"""
def __init__(self):
super().__init__()
self._lock = threading.Lock()
self._zombies = {}
self._forks = 0
def close(self):
self._callbacks.clear()
self._zombies.clear()
super().close()
def __enter__(self):
with self._lock:
self._forks += 1
return self
def __exit__(self, a, b, c):
with self._lock:
self._forks -= 1
if self._forks or not self._zombies:
return
collateral_victims = str(self._zombies)
self._zombies.clear()
logger.warning(
"Caught subprocesses termination from unknown pids: %s",
collateral_victims)
def add_child_handler(self, pid, callback, *args):
assert self._forks, "Must use the context manager"
if self._loop is None:
raise RuntimeError(
"Cannot add child handler, "
"the child watcher does not have a loop attached")
with self._lock:
try:
returncode = self._zombies.pop(pid)
except KeyError:
# The child is running.
self._callbacks[pid] = callback, args
return
# The child is dead already. We can fire the callback.
callback(pid, returncode, *args)
def remove_child_handler(self, pid):
try:
del self._callbacks[pid]
return True
except KeyError:
return False
def _do_waitpid_all(self):
# Because of signal coalescing, we must keep calling waitpid() as
# long as we're able to reap a child.
while True:
try:
pid, status = os.waitpid(-1, os.WNOHANG)
except ChildProcessError:
# No more child processes exist.
return
else:
if pid == 0:
# A child process is still alive.
return
returncode = self._compute_returncode(status)
with self._lock:
try:
callback, args = self._callbacks.pop(pid)
except KeyError:
# unknown child
if self._forks:
# It may not be registered yet.
self._zombies[pid] = returncode
if self._loop.get_debug():
logger.debug('unknown process %s exited '
'with returncode %s',
pid, returncode)
continue
callback = None
else:
if self._loop.get_debug():
logger.debug('process %s exited with returncode %s',
pid, returncode)
if callback is None:
logger.warning(
"Caught subprocess termination from unknown pid: "
"%d -> %d", pid, returncode)
else:
callback(pid, returncode, *args)
class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
"""UNIX event loop policy with a watcher for child processes."""
_loop_factory = _UnixSelectorEventLoop
def __init__(self):
super().__init__()
self._watcher = None
def _init_watcher(self):
with events._lock:
if self._watcher is None: # pragma: no branch
self._watcher = SafeChildWatcher()
if isinstance(threading.current_thread(),
threading._MainThread):
self._watcher.attach_loop(self._local._loop)
def set_event_loop(self, loop):
"""Set the event loop.
As a side effect, if a child watcher was set before, then calling
.set_event_loop() from the main thread will call .attach_loop(loop) on
the child watcher.
"""
super().set_event_loop(loop)
if self._watcher is not None and \
isinstance(threading.current_thread(), threading._MainThread):
self._watcher.attach_loop(loop)
def get_child_watcher(self):
"""Get the watcher for child processes.
If not yet set, a SafeChildWatcher object is automatically created.
"""
if self._watcher is None:
self._init_watcher()
return self._watcher
def set_child_watcher(self, watcher):
"""Set the watcher for child processes."""
assert watcher is None or isinstance(watcher, AbstractChildWatcher)
if self._watcher is not None:
self._watcher.close()
self._watcher = watcher
SelectorEventLoop = _UnixSelectorEventLoop
DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy
| 37,261 | 1,084 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asyncio/__init__.py | """The asyncio package, tracking PEP 3156."""
import sys
# The selectors module is in the stdlib in Python 3.4 but not in 3.3.
# Do this first, so the other submodules can use "from . import selectors".
# Prefer asyncio/selectors.py over the stdlib one, as ours may be newer.
import selectors # Will also be exported.
# This relies on each of the submodules having an __all__ variable.
from .base_events import *
from .coroutines import *
from .events import *
from .futures import *
from .locks import *
from .protocols import *
from .queues import *
from .streams import *
from .subprocess import *
from .tasks import *
from .transports import *
__all__ = (base_events.__all__ +
coroutines.__all__ +
events.__all__ +
futures.__all__ +
locks.__all__ +
protocols.__all__ +
queues.__all__ +
streams.__all__ +
subprocess.__all__ +
tasks.__all__ +
transports.__all__)
from .unix_events import * # pragma: no cover
__all__ += unix_events.__all__
| 1,060 | 38 | jart/cosmopolitan | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.