prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176:
|
pd.Timestamp("2012-10-25 00:00:00")
|
pandas.Timestamp
|
import os
import unittest
from unittest import mock
from unittest.mock import Mock, MagicMock
import numpy as np
import pandas as pd
import pygrams
from scripts import FilePaths
from scripts.utils.pygrams_exception import PygramsException
class TestPyGrams(unittest.TestCase):
data_source_name = 'dummy.pkl.bz2'
out_name = 'out'
def setUp(self):
self.global_stopwords = '''the
'''
self.ngram_stopwords = '''with
'''
self.unigram_stopwords = '''of
'''
def assertListAlmostEqual(self, list_a, list_b, places=7):
self.assertEqual(len(list_a), len(list_b), 'Lists must be same length')
for a, b in zip(list_a, list_b):
self.assertAlmostEqual(a, b, places=places)
def preparePyGrams(self, fake_df_data, mock_read_pickle, mock_open, mock_bz2file, mock_path_isfile):
self.number_of_rows = len(fake_df_data['abstract'])
self.patent_id_auto_tested = 'patent_id' not in fake_df_data
self.application_id_auto_tested = 'application_id' not in fake_df_data
self.application_date_auto_tested = 'application_date' not in fake_df_data
self.publication_date_auto_tested = 'publication_date' not in fake_df_data
self.invention_title_auto_tested = 'invention_title' not in fake_df_data
self.classifications_cpc_auto_tested = 'classifications_cpc' not in fake_df_data
self.inventor_names_auto_tested = 'inventor_names' not in fake_df_data
self.inventor_countries_auto_tested = 'inventor_countries' not in fake_df_data
self.inventor_cities_auto_tested = 'inventor_cities' not in fake_df_data
self.applicant_organisation_auto_tested = 'applicant_organisation' not in fake_df_data
self.applicant_countries_auto_tested = 'applicant_countries' not in fake_df_data
self.applicant_cities_auto_tested = 'applicant_cities' not in fake_df_data
if self.patent_id_auto_tested:
fake_df_data['patent_id'] = [f'patent_id-{pid}' for pid in range(self.number_of_rows)]
if self.application_id_auto_tested:
fake_df_data['application_id'] = [f'application_id-{pid}' for pid in range(self.number_of_rows)]
if self.application_date_auto_tested:
fake_df_data['application_date'] = [pd.Timestamp('1998-01-01 00:00:00') +
|
pd.DateOffset(weeks=row)
|
pandas.DateOffset
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
""" test get/set & misc """
import pytest
from datetime import timedelta
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_scalar
from pandas import (Series, DataFrame, MultiIndex,
Timestamp, Timedelta, Categorical)
from pandas.tseries.offsets import BDay
from pandas.compat import lrange, range
from pandas.util.testing import (assert_series_equal)
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestMisc(TestData):
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
assert result == 4
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
assert self.series[idx1] == self.series.get(idx1)
assert self.objSeries[idx2] == self.objSeries.get(idx2)
assert self.series[idx1] == self.series[5]
assert self.objSeries[idx2] == self.objSeries[5]
assert self.series.get(-1) == self.series.get(self.series.index[-1])
assert self.series[5] == self.series.get(self.series.index[5])
# missing
d = self.ts.index[0] - BDay()
pytest.raises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
assert result is None
def test_getitem_int64(self):
idx = np.int64(5)
assert self.ts[idx] == self.ts[5]
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
assert self.series.index[2] == slice1.index[1]
assert self.objSeries.index[2] == slice2.index[1]
assert self.series[2] == slice1[1]
assert self.objSeries[2] == slice2[1]
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
@pytest.mark.parametrize(
'result_1, duplicate_item, expected_1',
[
[
pd.Series({1: 12, 2: [1, 2, 2, 3]}), pd.Series({1: 313}),
pd.Series({1: 12, }, dtype=object),
],
[
pd.Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),
pd.Series({1: [1, 2, 3]}), pd.Series({1: [1, 2, 3], }),
],
])
def test_getitem_with_duplicates_indices(
self, result_1, duplicate_item, expected_1):
# GH 17610
result = result_1.append(duplicate_item)
expected = expected_1.append(duplicate_item)
assert_series_equal(result[1], expected)
assert result[2] == result_1[2]
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
pytest.raises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
pytest.raises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
assert s.iloc[0] == s['a']
s.iloc[0] = 5
tm.assert_almost_equal(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
assert isinstance(value, np.float64)
def test_series_box_timestamp(self):
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng)
assert isinstance(ser[5], pd.Timestamp)
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng, index=rng)
assert isinstance(ser[5], pd.Timestamp)
assert isinstance(ser.iat[5], pd.Timestamp)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
pytest.raises(KeyError, s.__getitem__, 1)
pytest.raises(KeyError, s.loc.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
assert is_scalar(obj['c'])
assert obj['c'] == 0
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .loc internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = s.loc[['foo', 'bar', 'bah', 'bam']]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
pytest.raises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
assert result == s.loc['A']
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.loc[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
assert self.series.index[9] not in numSlice.index
assert self.objSeries.index[9] not in objSlice.index
assert len(numSlice) == len(numSlice.index)
assert self.series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == self.series.index[11]
assert tm.equalContents(numSliceEnd, np.array(self.series)[-10:])
# Test return view.
sl = self.series[10:20]
sl[:] = 0
assert (self.series[10:20] == 0).all()
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
assert np.isnan(self.ts[6])
assert np.isnan(self.ts[2])
self.ts[np.isnan(self.ts)] = 5
assert not np.isnan(self.ts[2])
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
assert (series[::2] == 0).all()
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = self.ts.set_value(idx, 0)
assert res is self.ts
assert self.ts[idx] == 0
# equiv
s = self.series.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = s.set_value('foobar', 0)
assert res is s
assert res.index[-1] == 'foobar'
assert res['foobar'] == 0
s = self.series.copy()
s.loc['foobar'] = 0
assert s.index[-1] == 'foobar'
assert s['foobar'] == 0
def test_setslice(self):
sl = self.ts[5:20]
assert len(sl) == len(sl.index)
assert sl.index.is_unique
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
pytest.raises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
pytest.raises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.loc[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
assert result == expected
result = s.iloc[0]
assert result == expected
result = s['a']
assert result == expected
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00-04:00', tz=tz),
pd.Timestamp('2011-01-01 00:00-05:00', tz=tz),
pd.Timestamp('2016-11-06 01:00-05:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_categorial_assigning_ops(self):
orig = Series(Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]),
index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3], categories=[1, 2, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_take(self):
s = Series([-1, 5, 6, 2, 4])
actual = s.take([1, 3, 4])
expected = Series([5, 2, 4], index=[1, 3, 4])
tm.assert_series_equal(actual, expected)
actual = s.take([-1, 3, 4])
expected = Series([4, 2, 4], index=[4, 3, 4])
tm.assert_series_equal(actual, expected)
pytest.raises(IndexError, s.take, [1, 10])
pytest.raises(IndexError, s.take, [2, 5])
with tm.assert_produces_warning(FutureWarning):
s.take([-1, 3, 4], convert=False)
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.loc[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.iloc[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.loc[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.loc[d1] = 4
self.series.loc[d2] = 6
assert self.series[d1] == 4
assert self.series[d2] == 6
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# gets coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
pytest.raises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame({c: [1, 2, 3] for c in ['a', 'b', 'c']})
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
assert df['bb'].iloc[0] == 0.15
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
assert not np.isnan(self.ts[10])
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
pytest.raises(KeyError, s.drop, 'bc')
pytest.raises(KeyError, s.drop, ('a',))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.iloc[1:]
assert_series_equal(result, expected)
# bad axis
pytest.raises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2, 3], index=[True, False])
assert s.index.is_object()
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
# GH 16877
s = Series([2, 3], index=[0, 1])
with tm.assert_raises_regex(KeyError, 'not contained in axis'):
s.drop([False, True])
def test_select(self):
# deprecated: gh-12410
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
n = len(self.ts)
result = self.ts.select(lambda x: x >= self.ts.index[n // 2])
expected = self.ts.reindex(self.ts.index[n // 2:])
assert_series_equal(result, expected)
result = self.ts.select(lambda x: x.weekday() == 2)
expected = self.ts[self.ts.index.weekday == 2]
assert_series_equal(result, expected)
def test_cast_on_putmask(self):
# GH 2746
# need to upcast
s = Series([1, 2], index=[1, 2], dtype='int64')
s[[True, False]] = Series([0], index=[1], dtype='int64')
expected = Series([0, 2], index=[1, 2], dtype='int64')
assert_series_equal(s, expected)
def test_type_promote_putmask(self):
# GH8387: test that changing types does not break alignment
ts = Series(np.random.randn(100), index=np.arange(100, 0, -1)).round(5)
left, mask = ts.copy(), ts > 0
right = ts[mask].copy().map(str)
left[mask] = right
assert_series_equal(left, ts.map(lambda t: str(t) if t > 0 else t))
s = Series([0, 1, 2, 0])
mask = s > 0
s2 = s[mask].map(str)
s[mask] = s2
assert_series_equal(s, Series([0, '1', '2', 0]))
s =
|
Series([0, 'foo', 'bar', 0])
|
pandas.Series
|
import nrrd
import nibabel as nib
import numpy as np
import pandas as pd
from algorithm.config import *
def load_img_dataset():
img_list = os.listdir(DATA_IMG_EXTENSION_PATH)
try:
img_list.remove('.DS_Store')
except:
print('Linux OS')
print('img file count: ', len(img_list))
pid_list = list(map(lambda x: x.split('-')[0], img_list))
df_img = pd.DataFrame({'pid': pid_list, 'file': img_list})
df_img = df_img.groupby(by=['pid'], axis=0, as_index=False).agg(['count', lambda x: ', '.join(x)])
df_img_valid = df_img[df_img['file']['count'] == 4]
df_img_invalid = df_img[df_img['file']['count'] != 4]
print('pid with 4 files: ', len(df_img_valid))
# df_img_invalid.to_csv('./error_img.csv', index=True)
return {'valid': df_img_valid, 'invalid': df_img_invalid}
def load_trg_label_dataset():
df_label =
|
pd.read_excel(TRG_LABEL_PATH)
|
pandas.read_excel
|
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 =
|
pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
|
pandas.TimedeltaIndex
|
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
# GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
pd.DatetimeIndex([ts]).round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
assert result == expected
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
assert idx.resolution == expected
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with pytest.raises(TypeError):
dti + dti
with pytest.raises(TypeError):
dti_tz + dti_tz
with pytest.raises(TypeError):
dti_tz + dti
with pytest.raises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
assert idx[0] in idx
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]),
check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
assert pd.DatetimeIndex._na_value is pd.NaT
assert pd.DatetimeIndex([])._na_value is pd.NaT
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert not idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.asobject)
assert idx.asobject.equals(idx)
assert idx.asobject.equals(idx.asobject)
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.asobject)
assert not idx.asobject.equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.asobject)
assert not idx.asobject.equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
class TestDateTimeIndexToJulianDate(object):
def test_1700(self):
r1 = Float64Index([2345897.5, 2345898.5, 2345899.5, 2345900.5,
2345901.5])
r2 = date_range(start=Timestamp('1710-10-01'), periods=5,
freq='D').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
r1 = Float64Index([2451601.5, 2451602.5, 2451603.5, 2451604.5,
2451605.5])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='D').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
r1 = Float64Index(
[2451601.5, 2451601.5416666666666666, 2451601.5833333333333333,
2451601.625, 2451601.6666666666666666])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='H').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_minute(self):
r1 = Float64Index(
[2451601.5, 2451601.5006944444444444, 2451601.5013888888888888,
2451601.5020833333333333, 2451601.5027777777777777])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='T').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_second(self):
r1 = Float64Index(
[2451601.5, 2451601.500011574074074, 2451601.5000231481481481,
2451601.5000347222222222, 2451601.5000462962962962])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='S').to_julian_date()
assert isinstance(r2, Float64Index)
|
tm.assert_index_equal(r1, r2)
|
pandas.util.testing.assert_index_equal
|
# LLEPE: Liquid-Liquid Equilibrium Parameter Estimator
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See LICENSE for more details.
from datetime import datetime
import cantera as ct
import pandas as pd
import numpy as np
from scipy.optimize import minimize
# noinspection PyPep8Naming
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
import shutil
import copy
from inspect import signature
import os
import re
import pkg_resources
from .utils import set_size
class LLEPE:
r"""
Liquid-Liquid Extraction Parameter estimator
.. note::
The order in which the extracted species (ES) appear in the csv file
must be the same order as they appear in the xml, complex_names and
extracted_species_ion_names.
For example, say in exp_data, ES_1 is Nd ES_2 is Pr,
and
.. code-block:: python
aq_solvent_name = 'H2O(L)'
extractant_name = '(HA)2(org)'
diluent_name = 'dodecane'
Then:
The exp_data column ordering must be (names do not matter):
[h_i, h_eq, z_i, z_eq, Nd_aq_i, Nd_aq_eq, Nd_d_eq,
Pr_aq_i, Pr_aq_eq, Pr_d_eq]
The aqueous speciesArray must be
"H2O(L) H+ OH- Cl- Nd+++ Pr+++"
The organic speciesArray must be
"(HA)2(org) dodecane Nd(H(A)2)3(org) Pr(H(A)2)3(org)"
.. code-block:: python
complex_names = ['Nd(H(A)2)3(org)', 'Pr(H(A)2)3(org)']
extracted_species_ion_names = ['Nd+++', 'Pr+++']
:param exp_data: (str or pd.DataFrame) csv file name
or DataFrame with experimental data
In the .csv file, the rows are different experiments and
columns are the measured quantities.
The ordering of the columns needs to be:
[h_i, h_eq, z_i, z_eq,
{ES_1}_aq_i, {ES_1}_aq_eq, {ES_1}_d_eq,
{ES_2}_aq_i, {ES_2}_aq_eq, {ES_2}_d_eq,...
{ES_N}_aq_i, {ES_N}_aq_eq, {ES_N}_d_eq]
Naming does not matter, just the order.
Where {ES_1}-{ES_N} are the extracted species names of interest
e.g. Nd, Pr, La, etc.
Below is an explanation of the columns.
+-------+------------+------------------------------------------+
| Index | Column | Meaning |
+=======+============+==========================================+
| 0 | h_i | Initial Concentration of |
| | | H+ ions (mol/L) |
+-------+------------+------------------------------------------+
| 1 | h_eq | Equilibrium concentration of |
| | | H+ ions (mol/L) |
+-------+------------+------------------------------------------+
| 2 | z_i | Initial concentration of |
| | | extractant (mol/L) |
+-------+------------+------------------------------------------+
| 3 | z_eq | Equilibrium concentration of |
| | | extractant (mol/L) |
+-------+------------+------------------------------------------+
| 4 | {ES}_aq_i | Initial concentration of ES ions (mol/L) |
+-------+------------+------------------------------------------+
| 5 | {ES}_aq_eq | Equilibrium concentration of ES ions |
| | | in aqueous phase (mol/L) |
+-------+------------+------------------------------------------+
| 6 | {ES}_d_eq | Equilibrium Ratio between amount of |
| | | ES atoms in organic to aqueous |
+-------+------------+------------------------------------------+
:param phases_xml_filename: (str) xml file with parameters
for equilibrium calc
Would recommend copying and modifying xmls located in data/xmls
or in Cantera's "data" folder
speciesArray fields need specific ordering.
In aqueous phase: aq_solvent_name, H+, OH-, Cl-, ES_1, ES_2, ..., ES_N
(ES_1-ES_N) represent ES ion names e.g. Nd+++, Pr+++
In organic phase : extractant_name, diluant_name, ES_1, ES_2, ..., ES_N
(ES_1-ES_N) represent ES complex names
e.g. Nd(H(A)2)3(org), Pr(H(A)2)3(org)
:param phase_names: (list) names of phases in xml file
Found in the xml file under <phase ... id={phase_name}>
:param aq_solvent_name: (str) name of aqueous solvent in xml file
:param extractant_name: (str) name of extractant in xml file
:param diluant_name: (str) name of diluant in xml file
:param complex_names: (list) names of complexes in xml file.
:param extracted_species_ion_names: (list) names of extracted species ions
in xml file
:param extracted_species_list: (list) names of extracted species elements.
If ``None``, extracted_species_list will be extracted_species_ion_names
without '+' e.g. 'Nd+++'->'Nd'
:param aq_solvent_rho: (float) density of solvent (g/L)
If ``None``, molar volume/molecular weight is used from xml
:param extractant_rho: (float) density of extractant (g/L)
If ``None``, molar volume/molecular weight is used from xml
:param diluant_rho: (float) density of diluant (g/L)
If ``None``, molar volume/molecular weight is used from xml
:param opt_dict: (dict) dictionary containing info about which
species parameters are updated to fit model to experimental data
Should have the format as below. Dictionary keys under user defined
parameter name must be named as shown below ('upper_element_name',
'upper_attrib_name', etc.). 'attrib_name's and 'attrib_value's can
be None. {} denotes areas for user to fill in.
.. code-block:: python
opt_dict = {"{user_defined_name_for_parameter_1}":
{'upper_element_name': {param_upper_element},
'upper_attrib_name': {param_upper_attrib_name},
'upper_attrib_value': {param_upper_attrib_value},
'lower_element_name': {param_lower_element},
'lower_attrib_name': {param_lower_attrib_name},
'lower_attrib_value': {param_lower_attrib_value},
'input_format': {str format to input input_value}
'input_value': {guess_value}},
"{user_defined_name_for_parameter_2}":
...
...
}
See example files for more examples.
:param objective_function: (function or str) function to compute objective
By default, the objective function is log mean squared error
of distribution ratio
.. code-block:: python
np.sum((np.log10(d_pred)-np.log10(d_meas))^2)
Function needs to take inputs:
.. code-block:: python
objective_function(predicted_dict, measured_df, kwargs)
``kwargs`` is optional
Function needs to return: (float) value computed by objective function
Below is the guide for referencing predicted values
+---------------------------+--------------------------------+
| To access | Use |
+===========================+================================+
| hydrogen ion conc in aq | predicted_dict['h_eq'] |
+---------------------------+--------------------------------+
| extractant conc in org | predicted_dict['z_eq'] |
+---------------------------+--------------------------------+
| ES ion eq conc in aq | predicted_dict['{ES}_aq_eq'] |
+---------------------------+--------------------------------+
| ES complex eq conc in org | predicted_dict['{ES}_org_eq'] |
+---------------------------+--------------------------------+
| ES distribution ratio | predicted_dict['{ES}_d_eq'] |
+---------------------------+--------------------------------+
Replace "{ES}" with extracted species element e.g. Nd, La, etc.
For measured values, use the same names, but
replace ``predicted_dict`` with ``measured_df``
:param optimizer: (function or str) function to perform optimization
.. note::
The optimized variables are not directly the species parameters,
but instead are first multiplied by the initial guess before
sending becoming the species parameters.
For example, say
.. code-block:: python
opt_dict = {'Nd(H(A)2)3(org):'h0':-4.7e6}
If the bounds on h0 need to be [-4.7e7,-4.7e5], then
divide the bounds by the guess and get
.. code-block:: python
"bounds": [(1e-1, 1e1)]
By default, the optimizer is scipy's optimize function with
.. code-block:: python
default_kwargs= {"method": 'SLSQP',
"bounds": [(1e-1, 1e1)] * len(x_guess),
"constraints": (),
"options": {'disp': True,
'maxiter': 1000,
'ftol': 1e-6}}
Function needs to take inputs:
``optimizer(objective_function, x_guess, kwargs)``
``kwargs`` is optional
Function needs to return: ((np.ndarray, float)) Optimized parameters,
objective_function value
:param temp_xml_file_path: (str) path to temporary xml file.
This xml file is a duplicate of the phases_xml_file name and is
modified during the optimization process to avoid changing the original
xml file
default is local temp folder
:param dependant_params_dict: (dict) dictionary containing information
about parameters dependant on opt_dict. Has a similar structure to
opt_dict except instead of input values, it has 3 other fields:
'function', 'kwargs', and 'independent_params.
'function' is a function of the form
``function(independent_param__value_list, custom_objects_dict,
**kwargs)``
'kwargs' are the extra arguments to pass to function
'independent_params' is a list of parameter names in opt_dict that the
dependent_param is a function of.
'custom_objects_dict' is for accessing the estimator's internal
custom_objects_dict and must be included in the arguments, even if the
custom_objects_dict is not set and is None.
See example code for usage.
:param custom_objects_dict: (dict) dictionary containing custom objects
format: {<object_name_string>: <object>,...}
"""
def __init__(self,
exp_data,
phases_xml_filename,
phase_names,
aq_solvent_name,
extractant_name,
diluant_name,
complex_names,
extracted_species_ion_names,
extracted_species_list=None,
aq_solvent_rho=None,
extractant_rho=None,
diluant_rho=None,
opt_dict=None,
objective_function='Log-MSE',
optimizer='scipy_minimize',
temp_xml_file_path=None,
dependant_params_dict=None,
custom_objects_dict=None
):
self._built_in_obj_list = ['Log-MSE']
self._built_in_opt_list = ['scipy_minimize']
self._exp_data = exp_data
self._phases_xml_filename = phases_xml_filename
self._opt_dict = opt_dict
self._phase_names = phase_names
self._aq_solvent_name = aq_solvent_name
self._extractant_name = extractant_name
self._diluant_name = diluant_name
self._complex_names = complex_names
self._extracted_species_ion_names = extracted_species_ion_names
self._aq_solvent_rho = aq_solvent_rho
self._extractant_rho = extractant_rho
self._diluant_rho = diluant_rho
self._objective_function = None
self.set_objective_function(objective_function)
self._optimizer = None
self._extracted_species_list = extracted_species_list
self.set_optimizer(optimizer)
if temp_xml_file_path is None:
temp_xml_file_path = r'{0}/temp.xml'.format(os.getenv('TEMP'))
self._temp_xml_file_path = temp_xml_file_path
self._dependant_params_dict = dependant_params_dict
self._custom_objects_dict = custom_objects_dict
# Try and except for adding package data to path.
# This only works for sdist, not bdist
# If bdist is needed, research "manifest.in" python setup files
try:
shutil.copyfile(self._phases_xml_filename,
self._temp_xml_file_path)
self._phases = ct.import_phases(self._phases_xml_filename,
phase_names)
except FileNotFoundError:
self._phases_xml_filename = \
pkg_resources.resource_filename('llepe',
'../data/xmls/{0}'.format(
phases_xml_filename))
shutil.copyfile(self._phases_xml_filename,
self._temp_xml_file_path)
self._phases = ct.import_phases(self._phases_xml_filename,
phase_names)
if isinstance(self._exp_data, str):
try:
self._exp_df =
|
pd.read_csv(self._exp_data)
|
pandas.read_csv
|
from mpi4py import MPI
import process_helpers.wordCloud as wordCloud
import process_helpers.bagOfWords as bagOfWords
import process_helpers.sentimentAnalysis as sentimentAnalysis
import process_helpers.outputter as outputter
import configs
import pandas as pd
from collections import OrderedDict
import re # we can do "import regex" if needed since python re module does not support \K which is a regex resetting the beginning of a match and starts from the current point
from datetime import datetime
import timeit
import functools
print = functools.partial(print, flush=True) #flush print functions by default (needed to see outputs of multiple processes in a more correct order)
FILES_TO_READ = ['ELECTRONICS (LAPTOPS)', 'SPORTS', 'TOOLS & HOME IMPROVEMENT' ] # csv files
START_TIME = datetime.now()
NUMBER_OF_ROWS_PROCESSED = 0 # set by the master process in multi-processing or by the only process in single-processing in the process() function
def main():
# COMM VARIABLES
global comm, nprocs, rank
comm = MPI.COMM_WORLD
nprocs = comm.Get_size() # for multiprocessing there are nprocs-1 slaves (their ranks are 1, 2, ... nprocs-1) and 1 master (its rank is 0) whereas for single-processing nprocs is 1 and the process' rank is 0.
rank = comm.Get_rank()
if nprocs > 1:
if rank == configs.MASTER_PROCESS_RANK: # print it only once
print("Parallel execution")
else:
print("Serial Execution")
tp = timeit.Timer("process()", "from __main__ import process")
average_duration_seconds = tp.timeit(number=configs.NUMBER_OF_REPEATS_TIMEIT) / configs.NUMBER_OF_REPEATS_TIMEIT # calls process function (for each process) NUMBER_OF_REPEATS_TIMEIT times.
if (nprocs > 1 and rank == configs.MASTER_PROCESS_RANK) or (nprocs == 1 and rank == 0):
outputter.output_timing_results(average_duration_seconds, START_TIME, nprocs, NUMBER_OF_ROWS_PROCESSED)
def process():
global NUMBER_OF_ROWS_PROCESSED
if nprocs > 1:
if rank != configs.MASTER_PROCESS_RANK: # if slave
df_correspondingRows = comm.recv(source=configs.MASTER_PROCESS_RANK) # process the urls assigned to this slave
comm.send(get_wordCloud_bagOfWords_dicts_and_getSentimentAnalysis_df(df_correspondingRows) , dest=configs.MASTER_PROCESS_RANK) # send processed results to master
else: # if master
all_dfs = readAllFiles_and_return_df()
NUMBER_OF_ROWS_PROCESSED = all_dfs.shape[0]
print("Total #of rows processed is: {0} ({1}% of each of the input csv file rows are processed)\n".format(NUMBER_OF_ROWS_PROCESSED, configs.READING_RATIO_FOR_INPUT_CSVs * 100))
################## LOAD BALANCE THE DATAFRAME ROWS ACROSS ALL PROCESSES ##################
distributed_dfs_forEachProcess, startAndEnds_for_distributed_dfs_forEachProcess = loadBalance_dataframe_toProcesses(all_dfs, nprocs-1)
distributed_dfs_index = 0
for proc_index in range(nprocs):
if proc_index != configs.MASTER_PROCESS_RANK:
print("Proccess {0} is responsible for the rows between {1} and {2}\n".format(proc_index, *startAndEnds_for_distributed_dfs_forEachProcess[distributed_dfs_index] ) )
comm.send(distributed_dfs_forEachProcess[distributed_dfs_index], dest=proc_index)
distributed_dfs_index += 1
wordCloudDict_merged = {}
bagOfWords_dict_merged = OrderedDict()
sentimentAnalysis_dict_merged = OrderedDict()
df_sentimentAnalysis_merged = pd.DataFrame()
for proc_index in range(nprocs):
if proc_index != configs.MASTER_PROCESS_RANK:
wordCloudDict, bagOfWords_dict, sentimentAnalysis_dict, df_sentimentAnalysis = comm.recv(source=proc_index)
wordCloud.append_wordCloudDict(wordCloudDict_merged, wordCloudDict)
bagOfWords.append_bagOfWords_dict(bagOfWords_dict_merged, bagOfWords_dict)
sentimentAnalysis.append_sentimentAnalysis_dict(sentimentAnalysis_dict_merged, sentimentAnalysis_dict)
df_sentimentAnalysis_merged = appendAndReturn_df(df_sentimentAnalysis_merged, df_sentimentAnalysis)
outputter.finalize_wordCloud_bagOfWords_sentimentAnalysis_outputs(wordCloudDict_merged, bagOfWords_dict_merged, sentimentAnalysis_dict_merged, df_sentimentAnalysis_merged)
else: # IF A SINGLE PROCESS RUNS ONLY (nprocs == 1, process with rank 0)
all_dfs = readAllFiles_and_return_df()
NUMBER_OF_ROWS_PROCESSED = all_dfs.shape[0]
print("Total #of rows processed is: {0} ({1}% of each of the input csv file rows are processed)\n".format(NUMBER_OF_ROWS_PROCESSED, configs.READING_RATIO_FOR_INPUT_CSVs * 100))
wordCloudDict, bagOfWords_dict, sentimentAnalysis_dict, df_sentimentAnalysis = get_wordCloud_bagOfWords_dicts_and_getSentimentAnalysis_df(all_dfs)
outputter.finalize_wordCloud_bagOfWords_sentimentAnalysis_outputs(wordCloudDict, bagOfWords_dict, sentimentAnalysis_dict, df_sentimentAnalysis)
def readAllFiles_and_return_df():
category_for_each_row = []
subcategory_for_each_row = []
df_list = []
for file_to_read in FILES_TO_READ:
curr_df = read_csv_custom(file_to_read)
df_list .append(curr_df)
category, subcategory = get_category_subcategory(file_to_read)
category_for_each_row .extend( [category] * curr_df.shape[0] )
subcategory_for_each_row .extend( [subcategory] * curr_df.shape[0] )
all_dfs = pd.concat(df_list, ignore_index=True)
all_dfs['Category'] = category_for_each_row
all_dfs['Subcategory'] = subcategory_for_each_row
all_dfs = all_dfs[all_dfs['Product Ratings']!='Product Ratings'] # remove multiple headers (multiple headers can be produced if we run webscraper multiple times to create the output .csv category)
all_dfs['Product Ratings'] = pd.to_numeric(all_dfs['Product Ratings'], downcast='integer')
return all_dfs
def appendAndReturn_df(df_merged, df_to_append):
'''
pandas dataframe does not support in-place append; so we return the new dataframe
Assign the result to "df_merged" in the calling function to see the effect (to update the original df_merged)
'''
if not df_to_append.empty:
return df_merged.append(df_to_append, ignore_index=not df_merged.empty)
return df_merged
def read_csv_custom(file_to_read):
'''
Parameters:
file_to_read (str): csv file name to read without the extension
Returns:
pandas dataframe as a result of reading the file while also considering 'READING_RATIO_FOR_INPUT_CSVs' config parameter.
'''
df = pd.read_csv(file_to_read+".csv", quotechar='"', encoding='utf-8')
numberOfRows_toProcess = int(configs.READING_RATIO_FOR_INPUT_CSVs * df.shape[0])
return df[0:numberOfRows_toProcess]
def loadBalance_dataframe_toProcesses(df_to_distribute, numberOfSlaveProcesses):
'''
Parameters:
- df_to_distribute (pd.DataFrame object) The whole dataframe to be divided among multiple processes
- numberOfSlaveProcesses (int): #of worker processes that the dataframe should be distributed to equally (or almost equally)
Returns:
- distributed_dfs_forEachProcess: A list of pd.DataFrame objects for each process respectively (the object at index 0, 1, 2 represents the dataframe to process for process 0, 1, 2 ... etc.). At each index, this variable contains a certain portion (some rows) of the 'df_to_distribute' input parameter.
- startAndEnds_for_distributed_dfs_forEachProcess: A list of (start, end) index pairs to know starting / ending rows for each process to process.
NOTE: This function is only meaningful when nprocs > 1 is True
'''
distributed_dfs_forEachProcess = []
startAndEnds_for_distributed_dfs_forEachProcess = []
number_of_rows_to_process = df_to_distribute.shape[0]
# number_of_rows_each_process holds the #of rows distributed to each process (e.g. for a total of 299 rows and 3 slave processes: 100, 100 and 99 rows respectively for process 0, 1 and 2 respectively.)
least_number_of_rows_for_each_process=number_of_rows_to_process // numberOfSlaveProcesses
number_of_processes_with_one_extra_row=number_of_rows_to_process % numberOfSlaveProcesses
number_of_rows_each_process=[least_number_of_rows_for_each_process+1 if i<number_of_processes_with_one_extra_row
else least_number_of_rows_for_each_process
for i in range(numberOfSlaveProcesses)]
# send relevant portions of the dataframe to corresponding processes (e.g. for 299 dataframes and 3 slave processes: 0:100, 100:200, 200:299 for process 0, 1 and 2 respectively)
start = 0
end = 0
for slave_proc_index in range(numberOfSlaveProcesses):
end = number_of_rows_each_process[slave_proc_index] + end
startAndEnds_for_distributed_dfs_forEachProcess.append((start, end))
distributed_dfs_forEachProcess.append(df_to_distribute[start:end])
start = end
return distributed_dfs_forEachProcess, startAndEnds_for_distributed_dfs_forEachProcess
def get_wordCloud_bagOfWords_dicts_and_getSentimentAnalysis_df(df_correspondingRows):
# print("category is: " + category)
# print("subcategory is: " + subcategory)
wordCloudDict = {}
bagOfWords_dict = OrderedDict()
sentimentAnalysis_dict = OrderedDict()
df_sentimentAnalysis =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import numpy as np
from cassandra.cluster import Cluster
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.query import dict_factory
from pandas import DataFrame
from pandas import pandas as pd
def eth_address_to_hex(address):
if type(address) != bytes:
return address
return '0x' + address.hex()
def eth_address_from_hex(address):
# eth addresses are case insensitive
try:
b = bytes.fromhex(address[2:].lower())
except Exception:
return None
return b
_CONCURRENCY = 100
class GraphSense(object):
def __init__(self, hosts: list, ks_map: dict):
self.hosts = hosts
self.ks_map = ks_map
self.cluster = Cluster(hosts)
self.session = self.cluster.connect()
self.session.row_factory = dict_factory
def close(self):
self.cluster.shutdown()
print(f"Disconnected from {self.hosts}")
def _execute_query(self, statement, parameters):
"""Generic query execution"""
results = execute_concurrent_with_args(
self.session, statement, parameters, concurrency=_CONCURRENCY)
i = 0
all_results = []
for (success, result) in results:
if not success:
print('failed' + result)
else:
for row in result:
i = i+1
all_results.append(row)
return
|
pd.DataFrame.from_dict(all_results)
|
pandas.pandas.DataFrame.from_dict
|
# -*- coding: utf-8 -*-
import pandas
import numpy
import sys
import unittest
from datetime import datetime
from pandas.testing import assert_frame_equal, assert_series_equal
import os
import copy
sys.path.append("..")
import warnings
import nPYc
from nPYc.enumerations import SampleType
from nPYc.enumerations import AssayRole
from nPYc.enumerations import VariableType
from generateTestDataset import generateTestDataset
import tempfile
from isatools import isatab
class test_msdataset_synthetic(unittest.TestCase):
"""
Test MSDataset object functions with synthetic data
"""
def setUp(self):
self.msData = nPYc.MSDataset('', fileType='empty')
self.msData.sampleMetadata = pandas.DataFrame(
{'Sample File Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'Sample Base Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'AssayRole': [AssayRole.Assay, AssayRole.PrecisionReference, AssayRole.PrecisionReference],
'SampleType': [SampleType.StudySample, SampleType.StudyPool, SampleType.ExternalReference],
'Sample Name': ['Sample1', 'Sample2', 'Sample3'], 'Acqu Date': ['26-May-17', '26-May-17', '26-May-17'],
'Acqu Time': ['16:42:57', '16:58:49', '17:14:41'], 'Vial': ['1:A,1', '1:A,2', '1:A,3'],
'Instrument': ['XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest'],
'Acquired Time': [datetime(2017, 5, 26, 16, 42, 57), datetime(2017, 5, 26, 16, 58, 49),
datetime(2017, 5, 26, 17, 14, 41)], 'Run Order': [0, 1, 2], 'Batch': [1, 1, 2],
'Correction Batch': [numpy.nan, 1, 2], 'Matrix': ['U', 'U', 'U'],
'Subject ID': ['subject1', 'subject1', 'subject2'], 'Sample ID': ['sample1', 'sample2', 'sample3'],
'Dilution': [numpy.nan, '60.0', '100.0'],'Exclusion Details': ['','','']})
self.msData.featureMetadata = pandas.DataFrame(
{'Feature Name': ['Feature1', 'Feature2', 'Feature3'], 'Retention Time': [6.2449, 2.7565, 5.0564],
'm/z': [249.124281, 381.433191, 471.132083]})
self.msData.featureMetadata['Exclusion Details'] = None
self.msData.featureMetadata['User Excluded'] = False
self.msData.featureMetadata[['rsdFilter', 'varianceRatioFilter', 'correlationToDilutionFilter', 'blankFilter',
'artifactualFilter']] = pandas.DataFrame([[True, True, True, True, True]],
index=self.msData.featureMetadata.index)
self.msData.featureMetadata[['rsdSP', 'rsdSS/rsdSP', 'correlationToDilution', 'blankValue']] \
= pandas.DataFrame([[numpy.nan, numpy.nan, numpy.nan, numpy.nan]], index=self.msData.featureMetadata.index)
self.msData._intensityData = numpy.array([[10.2, 20.95, 30.37], [10.1, 20.03, 30.74], [3.065, 15.83, 30.16]])
# Attributes
self.msData.Attributes['FeatureExtractionSoftware'] = 'UnitTestSoftware'
# excluded data
self.msData.sampleMetadataExcluded = []
self.msData.intensityDataExcluded = []
self.msData.featureMetadataExcluded = []
self.msData.excludedFlag = []
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[0, :])
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata)
self.msData.excludedFlag.append('Samples')
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[:, 0])
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata)
self.msData.excludedFlag.append('Features')
# finish
self.msData.VariableType = VariableType.Discrete
self.msData.initialiseMasks()
def test_rsd_raises(self):
msData = nPYc.MSDataset('', fileType='empty')
with self.subTest(msg='No reference samples'):
msData.sampleMetadata = pandas.DataFrame(None)
with self.assertRaises(ValueError):
msData.rsdSP
with self.subTest(msg='Only one reference sample'):
msData.sampleMetadata = pandas.DataFrame([[nPYc.enumerations.AssayRole.PrecisionReference, nPYc.enumerations.SampleType.StudyPool]], columns=['AssayRole', 'SampleType'])
with self.assertRaises(ValueError):
msData.rsdSP
def test_getsamplemetadatafromfilename(self):
"""
Test we are parsing NPC MS filenames correctly (PCSOP.081).
"""
# Create an empty object with simple filenames
msData = nPYc.MSDataset('', fileType='empty')
msData.sampleMetadata['Sample File Name'] = ['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02_x',
'Test2_RPOS_ToF02_U2W03_b',
'Test3_RNEG_ToF03_S3W04_2',
'Test4_RPOS_ToF04_B1S1_SR_q',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01_9',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21']
msData._getSampleMetadataFromFilename(msData.Attributes['filenameSpec'])
##
# Check basename
##
basename = pandas.Series(['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_RPOS_ToF04_B1S1_SR',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21'],
name='Sample Base Name',
dtype='str')
assert_series_equal(msData.sampleMetadata['Sample Base Name'], basename)
##
# Check Study
##
study = pandas.Series(['Test1',
'Test2',
'Test3',
'Test4',
'Test5',
'Test6',
'Test1',
'Test2',
'Test3',
'Test4',
'Test5',
'Test6',
'Test1',
'Test1',
'Test1'],
name='Study',
dtype='str')
assert_series_equal(msData.sampleMetadata['Study'], study)
##
#
##
chromatography = pandas.Series(['H',
'R',
'R',
'L',
'L',
'H',
'H',
'R',
'R',
'R',
'L',
'L',
'H',
'H',
'H'],
name='Chromatography',
dtype='str')
assert_series_equal(msData.sampleMetadata['Chromatography'], chromatography)
##
#
##
ionisation = pandas.Series(['POS',
'POS',
'NEG',
'POS',
'NEG',
'POS',
'POS',
'POS',
'NEG',
'POS',
'POS',
'NEG',
'POS',
'POS',
'POS'],
name='Ionisation',
dtype='str')
assert_series_equal(msData.sampleMetadata['Ionisation'], ionisation)
##
#
##
instrument = pandas.Series(['ToF01',
'ToF02',
'ToF03',
'ToF04',
'ToF05',
'ToF06',
'ToF01',
'ToF02',
'ToF03',
'ToF04',
'ToF05',
'ToF06',
'ToF06',
'ToF06',
'ToF06'],
name='Instrument',
dtype='str')
assert_series_equal(msData.sampleMetadata['Instrument'], instrument)
##
#
##
reRun = pandas.Series(['',
'',
'',
'',
'',
'',
'',
'b',
'',
'q',
'',
'',
'',
'',
''],
name='Re-Run',
dtype='str')
assert_series_equal(msData.sampleMetadata['Re-Run'], reRun)
##
#
##
suplemental = pandas.Series(['',
'',
'',
'',
'',
'',
'',
'',
'2',
'',
'',
'9',
'',
'',
''],
name='Suplemental Injections',
dtype='str')
assert_series_equal(msData.sampleMetadata['Suplemental Injections'], suplemental)
##
#
##
skipped = pandas.Series([False,
False,
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False],
name='Skipped',
dtype='bool')
assert_series_equal(msData.sampleMetadata['Skipped'], skipped)
##
#
##
matrix = pandas.Series(['P',
'U',
'S',
'P',
'U',
'S',
'P',
'U',
'S',
'',
'',
'',
'',
'',
''],
name='Matrix',
dtype='str')
assert_series_equal(msData.sampleMetadata['Matrix'], matrix)
##
#
##
well = pandas.Series([2,
3,
4,
5,
6,
5,
2,
3,
4,
1,
2,
1,
-1,
-1,
-1],
name='Well',
dtype='int')
assert_series_equal(msData.sampleMetadata['Well'], well, check_dtype=False)
self.assertEqual(msData.sampleMetadata['Well'].dtype.kind, well.dtype.kind)
##
#
##
plate = pandas.Series([1,
2,
3,
4,
5,
4,
1,
2,
3,
1,
2,
3,
1,
2,
21],
name='Plate',
dtype='int')
assert_series_equal(msData.sampleMetadata['Plate'], plate, check_dtype=False)
self.assertEqual(msData.sampleMetadata['Plate'].dtype.kind, well.dtype.kind)
##
#
##
batch = pandas.Series([numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
1.0,
2.0,
3.0,
numpy.nan,
numpy.nan,
numpy.nan],
name='Batch',
dtype='float')
assert_series_equal(msData.sampleMetadata['Batch'], batch)
##
#
##
dilution = pandas.Series([numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
1.0,
numpy.nan,
numpy.nan,
numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(msData.sampleMetadata['Dilution'], dilution)
##
#
##
assayRole = pandas.Series([AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.Assay,
AssayRole.Assay],
name='AssayRole',
dtype=object)
assert_series_equal(msData.sampleMetadata['AssayRole'], assayRole)
##
#
##
sampleType = pandas.Series([SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.ExternalReference,
SampleType.StudyPool,
SampleType.MethodReference,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.ProceduralBlank,
SampleType.StudyPool,
SampleType.StudyPool],
name='SampleType',
dtype=object)
assert_series_equal(msData.sampleMetadata['SampleType'], sampleType)
def test_updateMasks_features(self):
msData = nPYc.MSDataset('', fileType='empty')
msData.Attributes['artifactualFilter'] = True
##
# Variables:
# Good Corr, Good RSD
# Poor Corr, Good RSD
# Good Corr, Poor RSD
# Poor Corr, Poor RSD
# Good Corr, Good RSD, below blank
##
msData.intensityData = numpy.array([[100, 23, 99, 51, 100],
[90, 54, 91, 88, 91],
[50, 34, 48, 77, 49],
[10, 66, 11, 56, 11],
[1, 12, 2, 81, 2],
[50, 51, 2, 12, 49],
[51, 47, 1, 100, 50],
[47, 50, 70, 21, 48],
[51, 49, 77, 91, 50],
[48, 49, 12, 2, 49],
[50, 48, 81, 2, 51],
[54, 53, 121, 52, 53],
[57, 49, 15, 51, 56],
[140, 41, 97, 47, 137],
[52, 60, 42, 60, 48],
[12, 48, 8, 56, 12],
[1, 2, 1, 1.21, 51],
[2, 1, 1.3, 1.3, 63]],
dtype=float)
msData.sampleMetadata = pandas.DataFrame(data=[[100, 1, 1, 1, AssayRole.LinearityReference, SampleType.StudyPool],
[90, 1, 1, 2, AssayRole.LinearityReference, SampleType.StudyPool],
[50, 1, 1, 3, AssayRole.LinearityReference, SampleType.StudyPool],
[10, 1, 1, 4, AssayRole.LinearityReference, SampleType.StudyPool],
[1, 1, 1, 5, AssayRole.LinearityReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[0, 1, 1, 1, AssayRole.Assay, SampleType.ProceduralBlank],
[0, 1, 1, 1, AssayRole.Assay, SampleType.ProceduralBlank]],
columns=['Dilution', 'Batch', 'Correction Batch', 'Well', 'AssayRole', 'SampleType'])
msData.featureMetadata = pandas.DataFrame(data=[['Feature_1', 0.5, 100., 0.3],
['Feature_2', 0.55, 100.04, 0.3],
['Feature_3', 0.75, 200., 0.1],
['Feature_4', 0.9, 300., 0.1],
['Feature_5', 0.95, 300.08, 0.1]],
columns=['Feature Name','Retention Time','m/z','Peak Width'])
msData.featureMetadata['Exclusion Details'] = None
msData.featureMetadata['User Excluded'] = False
msData.featureMetadata[['rsdFilter', 'varianceRatioFilter', 'correlationToDilutionFilter', 'blankFilter',
'artifactualFilter']] = pandas.DataFrame([[True, True, True, True, True]],
index=msData.featureMetadata.index)
msData.featureMetadata[['rsdSP', 'rsdSS/rsdSP', 'correlationToDilution', 'blankValue']] \
= pandas.DataFrame([[numpy.nan, numpy.nan, numpy.nan, numpy.nan]], index=msData.featureMetadata.index)
msData.initialiseMasks()
with self.subTest(msg='Default Parameters'):
expectedFeatureMask = numpy.array([True, False, False, False, False], dtype=bool)
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True})
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax RSD threshold'):
expectedFeatureMask = numpy.array([True, False, True, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=90, varianceRatio=0.1, corrThreshold=0.7))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax correlation threshold'):
expectedFeatureMask = numpy.array([True, True, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter': True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=30, varianceRatio=1.1, corrThreshold=0))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='High variance ratio'):
expectedFeatureMask = numpy.array([False, False, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=30, varianceRatio=100, corrThreshold=0.7))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax blank filter'):
expectedFeatureMask = numpy.array([True, False, False, False, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(blankThreshold=0.5))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='No blank filter'):
expectedFeatureMask = numpy.array([True, False, False, False, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':False})
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Default withArtifactualFiltering'):
expectedTempArtifactualLinkageMatrix = pandas.DataFrame(data=[[0,1],[3,4]],columns=['node1','node2'])
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': True,'blankFilter':True})
assert_frame_equal(expectedTempArtifactualLinkageMatrix, msData._tempArtifactualLinkageMatrix)
with self.subTest(msg='Altered withArtifactualFiltering parameters'):
expectedArtifactualLinkageMatrix = pandas.DataFrame(data=[[0,1]],columns=['node1','node2'])
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter': True, 'correlationToDilutionFilter': True, 'varianceRatioFilter': True,
'artifactualFilter': True,'blankFilter':True}, **dict(deltaMzArtifactual=300,
overlapThresholdArtifactual=0.1,
corrThresholdArtifactual=0.2))
self.assertEqual(msData.Attributes['filterParameters']['deltaMzArtifactual'], 300)
self.assertEqual(msData.Attributes['filterParameters']['overlapThresholdArtifactual'], 0.1)
self.assertEqual(msData.Attributes['filterParameters']['corrThresholdArtifactual'], 0.2)
assert_frame_equal(expectedArtifactualLinkageMatrix, msData._artifactualLinkageMatrix)
with self.subTest(msg='withArtifactualFiltering=None, Attribute[artifactualFilter]=False'):
msData2 = copy.deepcopy(msData)
msData2.Attributes['artifactualFilter'] = False
expectedFeatureMask = numpy.array([True, False, False, False, False], dtype=bool)
msData2.initialiseMasks()
msData2.updateMasks(featureFilters={'rsdFilter': True, 'correlationToDilutionFilter': True, 'varianceRatioFilter': True,
'artifactualFilter': False, 'blankFilter': True})
numpy.testing.assert_array_equal(expectedFeatureMask, msData2.featureMask)
with self.subTest(msg='withArtifactualFiltering=None, Attribute[artifactualFilter]=True'):
msData2 = copy.deepcopy(msData)
msData2.Attributes['artifactualFilter'] = True
expectedTempArtifactualLinkageMatrix = pandas.DataFrame(data=[[0, 1], [3, 4]], columns=['node1', 'node2'])
msData2.initialiseMasks()
msData2.updateMasks(featureFilters={'rsdFilter': True, 'correlationToDilutionFilter': True, 'varianceRatioFilter': True,
'artifactualFilter': True,'blankFilter':True})
assert_frame_equal(expectedTempArtifactualLinkageMatrix, msData2._tempArtifactualLinkageMatrix)
def test_updateMasks_samples(self):
from nPYc.enumerations import VariableType, DatasetLevel, AssayRole, SampleType
msData = nPYc.MSDataset('', fileType='empty')
msData.intensityData = numpy.zeros([18, 5],dtype=float)
msData.sampleMetadata['AssayRole'] = pandas.Series([AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference],
name='AssayRole',
dtype=object)
msData.sampleMetadata['SampleType'] = pandas.Series([SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.ExternalReference,
SampleType.MethodReference],
name='SampleType',
dtype=object)
with self.subTest(msg='Default Parameters'):
expectedSampleMask = numpy.array([True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(withArtifactualFiltering=False, filterFeatures=False)
numpy.testing.assert_array_equal(expectedSampleMask, msData.sampleMask)
with self.subTest(msg='Export SP and ER'):
expectedSampleMask = numpy.array([False, False, False, False, False, True, True, True, True, True, True, False, False, False, False, False, True, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(withArtifactualFiltering=False, filterFeatures=False,
sampleTypes=[SampleType.StudyPool, SampleType.ExternalReference],
assayRoles=[AssayRole.PrecisionReference])
numpy.testing.assert_array_equal(expectedSampleMask, msData.sampleMask)
with self.subTest(msg='Export Dilution Samples only'):
expectedSampleMask = numpy.array([True, True, True, True, True, False, False, False, False, False, False, False, False, False, False, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(withArtifactualFiltering=False, filterFeatures=False,
sampleTypes=[SampleType.StudyPool],
assayRoles=[AssayRole.LinearityReference])
numpy.testing.assert_array_equal(expectedSampleMask, msData.sampleMask)
def test_updateMasks_raises(self):
msData = nPYc.MSDataset('', fileType='empty')
with self.subTest(msg='Correlation'):
self.assertRaises(ValueError, msData.updateMasks, **dict(corrThreshold=-1.01))
self.assertRaises(ValueError, msData.updateMasks, **dict(corrThreshold=1.01))
self.assertRaises(TypeError, msData.updateMasks, **dict(corrThreshold='0.7'))
with self.subTest(msg='RSD'):
self.assertRaises(ValueError, msData.updateMasks, **dict(rsdThreshold=-1.01))
self.assertRaises(TypeError, msData.updateMasks, **dict(rsdThreshold='30'))
with self.subTest(msg='Blanks'):
self.assertRaises(TypeError, msData.updateMasks, **dict(blankThreshold='A string'))
with self.subTest(msg='RSD'):
self.assertRaises(ValueError, msData.updateMasks, **dict(rsdThreshold=-1.01))
self.assertRaises(TypeError, msData.updateMasks, **dict(rsdThreshold='30'))
with self.subTest(msg='Variance Ratio'):
self.assertRaises(TypeError, msData.updateMasks, **dict(varianceRatio='1.1'))
with self.subTest(msg='ArtifactualParameters'):
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':'A string', 'rsdFilter':False, 'blankFilter': False,
'correlationToDilutionFilter':False, 'varianceRatioFilter':False}, **dict(blankThreshold=False))
self.assertRaises(ValueError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(corrThresholdArtifactual=1.01, blankThreshold=False))
self.assertRaises(ValueError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(corrThresholdArtifactual=-0.01, blankThreshold=False))
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(corrThresholdArtifactual='0.7', blankThreshold=False))
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(deltaMzArtifactual='100', blankThreshold=False))
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(overlapThresholdArtifactual='0.5', blankThreshold=False))
def test_applyMasks(self):
fit = numpy.random.randn(self.msData.noSamples, self.msData.noFeatures)
self.msData.fit = copy.deepcopy(fit)
deletedFeatures = numpy.random.randint(0, self.msData.noFeatures, size=2)
self.msData.featureMask[deletedFeatures] = False
fit = numpy.delete(fit, deletedFeatures, 1)
self.msData.applyMasks()
numpy.testing.assert_array_almost_equal(self.msData.fit, fit)
def test_correlationToDilution(self):
from nPYc.utilities._internal import _vcorrcoef
noSamp = numpy.random.randint(30, high=500, size=None)
noFeat = numpy.random.randint(200, high=400, size=None)
dataset = generateTestDataset(noSamp, noFeat, dtype='MSDataset', sop='GenericMS')
dataset.sampleMetadata['SampleType'] = nPYc.enumerations.SampleType.StudyPool
dataset.sampleMetadata['AssayRole'] = nPYc.enumerations.AssayRole.LinearityReference
dataset.sampleMetadata['Well'] = 1
dataset.sampleMetadata['Dilution'] = numpy.linspace(1, noSamp, num=noSamp)
correlations = dataset.correlationToDilution
with self.subTest(msg='Checking default path'):
numpy.testing.assert_array_almost_equal(correlations, _vcorrcoef(dataset.intensityData, dataset.sampleMetadata['Dilution'].values))
with self.subTest(msg='Checking corr exclusions'):
dataset.corrExclusions = None
numpy.testing.assert_array_almost_equal(correlations, _vcorrcoef(dataset.intensityData, dataset.sampleMetadata['Dilution'].values))
def test_correlateToDilution_raises(self):
noSamp = numpy.random.randint(30, high=500, size=None)
noFeat = numpy.random.randint(200, high=400, size=None)
dataset = generateTestDataset(noSamp, noFeat, dtype='MSDataset')
with self.subTest(msg='Unknown correlation type'):
self.assertRaises(ValueError, dataset._MSDataset__correlateToDilution, method='unknown')
with self.subTest(msg='No LR samples'):
dataset.sampleMetadata['AssayRole'] = AssayRole.Assay
self.assertRaises(ValueError, dataset._MSDataset__correlateToDilution)
with self.subTest(msg='No Dilution field'):
dataset.sampleMetadata.drop(['Dilution'], axis=1, inplace=True)
self.assertRaises(KeyError, dataset._MSDataset__correlateToDilution)
def test_validateObject(self):
with self.subTest(msg='validateObject successful on correct dataset'):
goodDataset = copy.deepcopy(self.msData)
self.assertEqual(goodDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True), {'Dataset': True, 'BasicMSDataset':True ,'QC':True, 'sampleMetadata':True})
with self.subTest(msg='BasicMSDataset fails on empty MSDataset'):
badDataset = nPYc.MSDataset('', fileType='empty')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset':False ,'QC':False, 'sampleMetadata':False})
with self.subTest(msg='check raise no warnings with raiseWarning=False'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rtWindow']
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 0)
with self.subTest(msg='check fail and raise warnings on bad Dataset'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'featureMetadata')
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': False, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 5)
assert issubclass(w[0].category, UserWarning)
assert "Failure, no attribute 'self.featureMetadata'" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not conform to Dataset:" in str(w[1].message)
assert issubclass(w[2].category, UserWarning)
assert "Does not conform to basic MSDataset" in str(w[2].message)
assert issubclass(w[3].category, UserWarning)
assert "Does not have QC parameters" in str(w[3].message)
assert issubclass(w[4].category, UserWarning)
assert "Does not have sample metadata information" in str(w[4].message)
with self.subTest(msg='check raise warnings BasicMSDataset'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rtWindow']
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 4)
assert issubclass(w[0].category, UserWarning)
assert "Failure, no attribute 'self.Attributes['rtWindow']" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not conform to basic MSDataset:" in str(w[1].message)
assert issubclass(w[2].category, UserWarning)
assert "Does not have QC parameters" in str(w[2].message)
assert issubclass(w[3].category, UserWarning)
assert "Does not have sample metadata information" in str(w[3].message)
with self.subTest(msg='check raise warnings QC parameters'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Batch'] = 'not an int or float'
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 3)
assert issubclass(w[0].category, UserWarning)
assert "Failure, 'self.sampleMetadata['Batch']' is <class 'str'>" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not have QC parameters:" in str(w[1].message)
assert issubclass(w[2].category, UserWarning)
assert "Does not have sample metadata information:" in str(w[2].message)
with self.subTest(msg='check raise warnings sampleMetadata'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop(['Subject ID'], axis=1, inplace=True)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 2)
assert issubclass(w[0].category, UserWarning)
assert "Failure, 'self.sampleMetadata' lacks a 'Subject ID' column" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not have sample metadata information:" in str(w[1].message)
with self.subTest(msg='self.Attributes[\'rtWindow\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rtWindow']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'rtWindow\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['rtWindow'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'msPrecision\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['msPrecision']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'msPrecision\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['msPrecision'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'varianceRatio\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['varianceRatio']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'varianceRatio\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['varianceRatio'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'blankThreshold\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['blankThreshold']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'blankThreshold\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['blankThreshold'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'corrMethod\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['corrMethod']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'corrMethod\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['corrMethod'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'corrThreshold\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['corrThreshold']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'corrThreshold\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['corrThreshold'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'rsdThreshold\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rsdThreshold']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'rsdThreshold\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['rsdThreshold'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'deltaMzArtifactual\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['deltaMzArtifactual']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'deltaMzArtifactual\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['deltaMzArtifactual'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'overlapThresholdArtifactual\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['overlapThresholdArtifactual']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'overlapThresholdArtifactual\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['overlapThresholdArtifactual'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'corrThresholdArtifactual\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['corrThresholdArtifactual']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'corrThresholdArtifactual\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['corrThresholdArtifactual'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'FeatureExtractionSoftware\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['FeatureExtractionSoftware']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'FeatureExtractionSoftware\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['FeatureExtractionSoftware'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'Raw Data Path\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['Raw Data Path']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'Raw Data Path\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['Raw Data Path'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'Feature Names\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['Feature Names']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'Feature Names\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['Feature Names'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.VariableType is not an enum VariableType'):
badDataset = copy.deepcopy(self.msData)
badDataset.VariableType = 'not an enum'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.corrExclusions does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'corrExclusions')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._correlationToDilution does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, '_correlationToDilution')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._correlationToDilution is not a numpy.ndarray'):
badDataset = copy.deepcopy(self.msData)
badDataset._correlationToDilution = 'not a numpy.ndarray'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._artifactualLinkageMatrix does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, '_artifactualLinkageMatrix')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._artifactualLinkageMatrix is not a pandas.DataFrame'):
badDataset = copy.deepcopy(self.msData)
badDataset._artifactualLinkageMatrix = 'not a pandas.DataFrame'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._tempArtifactualLinkageMatrix does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, '_tempArtifactualLinkageMatrix')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._tempArtifactualLinkageMatrix is not a pandas.DataFrame'):
badDataset = copy.deepcopy(self.msData)
badDataset._tempArtifactualLinkageMatrix = 'not a pandas.DataFrame'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.fileName does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'fileName')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.fileName is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.fileName = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.filePath does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'filePath')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.filePath is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.filePath = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop([0], axis=0, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Sample File Name\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Sample File Name'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'AssayRole\'] is not an enum \'AssayRole\''):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['AssayRole'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'SampleType\'] is not an enum \'SampleType\''):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['SampleType'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Dilution\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Dilution'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Batch\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Batch'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Correction Batch\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Correction Batch'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Run Order\'] is not an int'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Run Order'] = 'not an int'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Acquired Time\'] is not a datetime'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Acquired Time'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Sample Base Name\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Sample Base Name'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Matrix column'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop(['Matrix'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Matrix\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Matrix'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Subject ID column'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop(['Subject ID'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Subject ID\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Subject ID'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Sample ID\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Sample ID'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata.drop([0], axis=0, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'Feature Name\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['Feature Name'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'Feature Name\'] is not unique'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['Feature Name'] = ['Feature1','Feature1','Feature1']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have a m/z column'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata.drop(['m/z'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'m/z\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['m/z'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have a Retention Time column'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata.drop(['Retention Time'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'Retention Time\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['Retention Time'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMask has not been initialised'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMask = numpy.array(False, dtype=bool)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMask does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMask = numpy.squeeze(numpy.ones([5, 1], dtype=bool), axis=1)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMask has not been initialised'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMask = numpy.array(False, dtype=bool)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMask does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMask = numpy.squeeze(numpy.ones([5, 1], dtype=bool), axis=1)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
class test_msdataset_batch_inference(unittest.TestCase):
"""
Check batches are generated and amended correctly
"""
def setUp(self):
self.msData = nPYc.MSDataset('', fileType='empty')
self.msData.sampleMetadata['Sample File Name'] = ['Test_RPOS_ToF04_B1S1_SR',
'Test_RPOS_ToF04_B1S2_SR',
'Test_RPOS_ToF04_B1S3_SR',
'Test_RPOS_ToF04_B1S4_SR',
'Test_RPOS_ToF04_B1S5_SR',
'Test_RPOS_ToF04_P1W01',
'Test_RPOS_ToF04_P1W02_SR',
'Test_RPOS_ToF04_P1W03',
'Test_RPOS_ToF04_B1E1_SR',
'Test_RPOS_ToF04_B1E2_SR',
'Test_RPOS_ToF04_B1E3_SR',
'Test_RPOS_ToF04_B1E4_SR',
'Test_RPOS_ToF04_B1E5_SR',
'Test_RPOS_ToF04_B2S1_SR',
'Test_RPOS_ToF04_B2S2_SR',
'Test_RPOS_ToF04_B2S3_SR',
'Test_RPOS_ToF04_B2S4_SR',
'Test_RPOS_ToF04_B2S5_SR',
'Test_RPOS_ToF04_P2W01',
'Test_RPOS_ToF04_P2W02_SR',
'Test_RPOS_ToF04_P3W03',
'Test_RPOS_ToF04_B2S1_SR_2',
'Test_RPOS_ToF04_B2S2_SR_2',
'Test_RPOS_ToF04_B2S3_SR_2',
'Test_RPOS_ToF04_B2S4_SR_2',
'Test_RPOS_ToF04_B2S5_SR_2',
'Test_RPOS_ToF04_P3W03_b',
'Test_RPOS_ToF04_B2E1_SR',
'Test_RPOS_ToF04_B2E2_SR',
'Test_RPOS_ToF04_B2E3_SR',
'Test_RPOS_ToF04_B2E4_SR',
'Test_RPOS_ToF04_B2E5_SR',
'Test_RPOS_ToF04_B2SRD1']
self.msData.addSampleInfo(descriptionFormat='Filenames')
self.msData.sampleMetadata['Run Order'] = self.msData.sampleMetadata.index + 1
def test_fillbatches_correctionbatch(self):
self.msData._fillBatches()
correctionBatch = pandas.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, numpy.nan],
name='Correction Batch',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Correction Batch'], correctionBatch)
def test_fillbatches_warns(self):
self.msData.sampleMetadata.drop('Run Order', axis=1, inplace=True)
self.assertWarnsRegex(UserWarning, 'Unable to infer batches without run order, skipping\.', self.msData._fillBatches)
def test_amendbatches(self):
"""
"""
self.msData._fillBatches()
self.msData.amendBatches(20)
correctionBatch = pandas.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 4.0,
4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, numpy.nan],
name='Correction Batch',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Correction Batch'], correctionBatch)
def test_msdataset_addsampleinfo_batches(self):
self.msData.addSampleInfo(descriptionFormat='Batches')
correctionBatch = pandas.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, numpy.nan],
name='Correction Batch',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Correction Batch'], correctionBatch)
class test_msdataset_import_undefined(unittest.TestCase):
"""
Test we raise an error when passing an fileType we don't understand.
"""
def test_raise_notimplemented(self):
self.assertRaises(NotImplementedError, nPYc.MSDataset, os.path.join('nopath'), fileType='Unknown filetype')
class test_msdataset_import_QI(unittest.TestCase):
"""
Test import from QI csv files
"""
def setUp(self):
self.msData = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_QI.csv'), fileType='QI')
self.msData.addSampleInfo(descriptionFormat='Filenames')
def test_dimensions(self):
self.assertEqual((self.msData.noSamples, self.msData.noFeatures), (115, 4))
def test_samples(self):
samples = pandas.Series(['UnitTest1_LPOS_ToF02_B1SRD01', 'UnitTest1_LPOS_ToF02_B1SRD02',
'UnitTest1_LPOS_ToF02_B1SRD03', 'UnitTest1_LPOS_ToF02_B1SRD04',
'UnitTest1_LPOS_ToF02_B1SRD05', 'UnitTest1_LPOS_ToF02_B1SRD06',
'UnitTest1_LPOS_ToF02_B1SRD07', 'UnitTest1_LPOS_ToF02_B1SRD08',
'UnitTest1_LPOS_ToF02_B1SRD09', 'UnitTest1_LPOS_ToF02_B1SRD10',
'UnitTest1_LPOS_ToF02_B1SRD11', 'UnitTest1_LPOS_ToF02_B1SRD12',
'UnitTest1_LPOS_ToF02_B1SRD13', 'UnitTest1_LPOS_ToF02_B1SRD14',
'UnitTest1_LPOS_ToF02_B1SRD15', 'UnitTest1_LPOS_ToF02_B1SRD16',
'UnitTest1_LPOS_ToF02_B1SRD17', 'UnitTest1_LPOS_ToF02_B1SRD18',
'UnitTest1_LPOS_ToF02_B1SRD19', 'UnitTest1_LPOS_ToF02_B1SRD20',
'UnitTest1_LPOS_ToF02_B1SRD21', 'UnitTest1_LPOS_ToF02_B1SRD22',
'UnitTest1_LPOS_ToF02_B1SRD23', 'UnitTest1_LPOS_ToF02_B1SRD24',
'UnitTest1_LPOS_ToF02_B1SRD25', 'UnitTest1_LPOS_ToF02_B1SRD26',
'UnitTest1_LPOS_ToF02_B1SRD27', 'UnitTest1_LPOS_ToF02_B1SRD28',
'UnitTest1_LPOS_ToF02_B1SRD29', 'UnitTest1_LPOS_ToF02_B1SRD30',
'UnitTest1_LPOS_ToF02_B1SRD31', 'UnitTest1_LPOS_ToF02_B1SRD32',
'UnitTest1_LPOS_ToF02_B1SRD33', 'UnitTest1_LPOS_ToF02_B1SRD34',
'UnitTest1_LPOS_ToF02_B1SRD35', 'UnitTest1_LPOS_ToF02_B1SRD36',
'UnitTest1_LPOS_ToF02_B1SRD37', 'UnitTest1_LPOS_ToF02_B1SRD38',
'UnitTest1_LPOS_ToF02_B1SRD39', 'UnitTest1_LPOS_ToF02_B1SRD40',
'UnitTest1_LPOS_ToF02_B1SRD41', 'UnitTest1_LPOS_ToF02_B1SRD42',
'UnitTest1_LPOS_ToF02_B1SRD43', 'UnitTest1_LPOS_ToF02_B1SRD44',
'UnitTest1_LPOS_ToF02_B1SRD45', 'UnitTest1_LPOS_ToF02_B1SRD46',
'UnitTest1_LPOS_ToF02_B1SRD47', 'UnitTest1_LPOS_ToF02_B1SRD48',
'UnitTest1_LPOS_ToF02_B1SRD49', 'UnitTest1_LPOS_ToF02_B1SRD50',
'UnitTest1_LPOS_ToF02_B1SRD51', 'UnitTest1_LPOS_ToF02_B1SRD52',
'UnitTest1_LPOS_ToF02_B1SRD53', 'UnitTest1_LPOS_ToF02_B1SRD54',
'UnitTest1_LPOS_ToF02_B1SRD55', 'UnitTest1_LPOS_ToF02_B1SRD56',
'UnitTest1_LPOS_ToF02_B1SRD57', 'UnitTest1_LPOS_ToF02_B1SRD58',
'UnitTest1_LPOS_ToF02_B1SRD59', 'UnitTest1_LPOS_ToF02_B1SRD60',
'UnitTest1_LPOS_ToF02_B1SRD61', 'UnitTest1_LPOS_ToF02_B1SRD62',
'UnitTest1_LPOS_ToF02_B1SRD63', 'UnitTest1_LPOS_ToF02_B1SRD64',
'UnitTest1_LPOS_ToF02_B1SRD65', 'UnitTest1_LPOS_ToF02_B1SRD66',
'UnitTest1_LPOS_ToF02_B1SRD67', 'UnitTest1_LPOS_ToF02_B1SRD68',
'UnitTest1_LPOS_ToF02_B1SRD69', 'UnitTest1_LPOS_ToF02_B1SRD70',
'UnitTest1_LPOS_ToF02_B1SRD71', 'UnitTest1_LPOS_ToF02_B1SRD72',
'UnitTest1_LPOS_ToF02_B1SRD73', 'UnitTest1_LPOS_ToF02_B1SRD74',
'UnitTest1_LPOS_ToF02_B1SRD75', 'UnitTest1_LPOS_ToF02_B1SRD76',
'UnitTest1_LPOS_ToF02_B1SRD77', 'UnitTest1_LPOS_ToF02_B1SRD78',
'UnitTest1_LPOS_ToF02_B1SRD79', 'UnitTest1_LPOS_ToF02_B1SRD80',
'UnitTest1_LPOS_ToF02_B1SRD81', 'UnitTest1_LPOS_ToF02_B1SRD82',
'UnitTest1_LPOS_ToF02_B1SRD83', 'UnitTest1_LPOS_ToF02_B1SRD84',
'UnitTest1_LPOS_ToF02_B1SRD85', 'UnitTest1_LPOS_ToF02_B1SRD86',
'UnitTest1_LPOS_ToF02_B1SRD87', 'UnitTest1_LPOS_ToF02_B1SRD88',
'UnitTest1_LPOS_ToF02_B1SRD89', 'UnitTest1_LPOS_ToF02_B1SRD90',
'UnitTest1_LPOS_ToF02_B1SRD91', 'UnitTest1_LPOS_ToF02_B1SRD92',
'UnitTest1_LPOS_ToF02_Blank01', 'UnitTest1_LPOS_ToF02_Blank02',
'UnitTest1_LPOS_ToF02_B1E1_SR', 'UnitTest1_LPOS_ToF02_B1E2_SR',
'UnitTest1_LPOS_ToF02_B1E3_SR', 'UnitTest1_LPOS_ToF02_B1E4_SR',
'UnitTest1_LPOS_ToF02_B1E5_SR', 'UnitTest1_LPOS_ToF02_B1S1_SR',
'UnitTest1_LPOS_ToF02_B1S2_SR', 'UnitTest1_LPOS_ToF02_B1S3_SR',
'UnitTest1_LPOS_ToF02_B1S4_SR', 'UnitTest1_LPOS_ToF02_B1S5_SR',
'UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02',
'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06',
'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W08_x',
'UnitTest1_LPOS_ToF02_S1W11_LTR', 'UnitTest1_LPOS_ToF02_S1W12_SR',
'UnitTest1_LPOS_ToF02_ERROR'],
name='Sample File Name',
dtype=str)
assert_series_equal(self.msData.sampleMetadata['Sample File Name'], samples)
def test_featuremetadata_import(self):
with self.subTest(msg='Checking Feature Names'):
features = pandas.Series(['3.17_262.0378m/z',
'3.17_293.1812m/z',
'3.17_145.0686m/z',
'3.17_258.1033m/z'],
name='Feature Name',
dtype='str')
assert_series_equal(self.msData.featureMetadata['Feature Name'], features)
with self.subTest(msg='Checking Peak Widths'):
peakWidth = pandas.Series([0.03931667,
0.01403333,
0.01683333,
0.01683333],
name='Peak Width',
dtype='float')
assert_series_equal(self.msData.featureMetadata['Peak Width'], peakWidth)
with self.subTest(msg='Checking m/z'):
mz = pandas.Series([262.0378339,
293.1811941,
145.0686347,
258.1033447],
name='m/z',
dtype='float')
assert_series_equal(self.msData.featureMetadata['m/z'], mz)
with self.subTest(msg='Checking Retention Time'):
rt = pandas.Series([3.17485,
3.17485,
3.17485,
3.17485],
name='Retention Time',
dtype='float')
assert_series_equal(self.msData.featureMetadata['Retention Time'], rt)
with self.subTest(msg='Checking Isotope Distribution'):
isotope = pandas.Series(['100 - 36.9',
'100 - 11.9',
'100 - 8.69',
'100 - 73.4'],
name='Isotope Distribution',
dtype='str')
assert_series_equal(self.msData.featureMetadata['Isotope Distribution'], isotope)
def test_dilutionlevels(self):
dilution = pandas.Series([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Dilution'], dilution)
def test_feature_correlation(self):
self.msData.addSampleInfo(descriptionFormat='Raw Data', filePath=os.path.join('..','..','npc-standard-project','Raw_Data'))
self.msData.addSampleInfo(descriptionFormat='Batches')
with self.subTest(msg='Testing Pearson correlations'):
correlations = numpy.array([0.99999997, 0.32017508, 1., -0.0693418])
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
with self.subTest(msg='Testing Spearman correlations'):
correlations = numpy.array([0.9992837, 0.34708745, 1., -0.038844])
self.msData.Attributes['corrMethod'] = 'spearman'
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
def test_variabletype(self):
self.assertEqual(self.msData.VariableType, nPYc.enumerations.VariableType.Discrete)
class test_msdataset_import_xcms(unittest.TestCase):
"""
Test import from XCMS csv files
"""
def setUp(self):
self.msData = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_xcms.csv'), fileType='XCMS', noFeatureParams=9)
self.msData_PeakTable = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_xcms_peakTable.csv'), fileType='XCMS', noFeatureParams=8)
self.msData.addSampleInfo(descriptionFormat='Filenames')
self.msData_PeakTable.addSampleInfo(descriptionFormat='Filenames')
def test_dimensions(self):
self.assertEqual((self.msData.noSamples, self.msData.noFeatures), (111, 4))
self.assertEqual((self.msData_PeakTable.noSamples, self.msData_PeakTable.noFeatures), (111, 4))
def test_samples(self):
samples = pandas.Series(['UnitTest1_LPOS_ToF02_B1SRD01', 'UnitTest1_LPOS_ToF02_B1SRD02',
'UnitTest1_LPOS_ToF02_B1SRD03', 'UnitTest1_LPOS_ToF02_B1SRD04',
'UnitTest1_LPOS_ToF02_B1SRD05', 'UnitTest1_LPOS_ToF02_B1SRD06',
'UnitTest1_LPOS_ToF02_B1SRD07', 'UnitTest1_LPOS_ToF02_B1SRD08',
'UnitTest1_LPOS_ToF02_B1SRD09', 'UnitTest1_LPOS_ToF02_B1SRD10',
'UnitTest1_LPOS_ToF02_B1SRD11', 'UnitTest1_LPOS_ToF02_B1SRD12',
'UnitTest1_LPOS_ToF02_B1SRD13', 'UnitTest1_LPOS_ToF02_B1SRD14',
'UnitTest1_LPOS_ToF02_B1SRD15', 'UnitTest1_LPOS_ToF02_B1SRD16',
'UnitTest1_LPOS_ToF02_B1SRD17', 'UnitTest1_LPOS_ToF02_B1SRD18',
'UnitTest1_LPOS_ToF02_B1SRD19', 'UnitTest1_LPOS_ToF02_B1SRD20',
'UnitTest1_LPOS_ToF02_B1SRD21', 'UnitTest1_LPOS_ToF02_B1SRD22',
'UnitTest1_LPOS_ToF02_B1SRD23', 'UnitTest1_LPOS_ToF02_B1SRD24',
'UnitTest1_LPOS_ToF02_B1SRD25', 'UnitTest1_LPOS_ToF02_B1SRD26',
'UnitTest1_LPOS_ToF02_B1SRD27', 'UnitTest1_LPOS_ToF02_B1SRD28',
'UnitTest1_LPOS_ToF02_B1SRD29', 'UnitTest1_LPOS_ToF02_B1SRD30',
'UnitTest1_LPOS_ToF02_B1SRD31', 'UnitTest1_LPOS_ToF02_B1SRD32',
'UnitTest1_LPOS_ToF02_B1SRD33', 'UnitTest1_LPOS_ToF02_B1SRD34',
'UnitTest1_LPOS_ToF02_B1SRD35', 'UnitTest1_LPOS_ToF02_B1SRD36',
'UnitTest1_LPOS_ToF02_B1SRD37', 'UnitTest1_LPOS_ToF02_B1SRD38',
'UnitTest1_LPOS_ToF02_B1SRD39', 'UnitTest1_LPOS_ToF02_B1SRD40',
'UnitTest1_LPOS_ToF02_B1SRD41', 'UnitTest1_LPOS_ToF02_B1SRD42',
'UnitTest1_LPOS_ToF02_B1SRD43', 'UnitTest1_LPOS_ToF02_B1SRD44',
'UnitTest1_LPOS_ToF02_B1SRD45', 'UnitTest1_LPOS_ToF02_B1SRD46',
'UnitTest1_LPOS_ToF02_B1SRD47', 'UnitTest1_LPOS_ToF02_B1SRD48',
'UnitTest1_LPOS_ToF02_B1SRD49', 'UnitTest1_LPOS_ToF02_B1SRD50',
'UnitTest1_LPOS_ToF02_B1SRD51', 'UnitTest1_LPOS_ToF02_B1SRD52',
'UnitTest1_LPOS_ToF02_B1SRD53', 'UnitTest1_LPOS_ToF02_B1SRD54',
'UnitTest1_LPOS_ToF02_B1SRD55', 'UnitTest1_LPOS_ToF02_B1SRD56',
'UnitTest1_LPOS_ToF02_B1SRD57', 'UnitTest1_LPOS_ToF02_B1SRD58',
'UnitTest1_LPOS_ToF02_B1SRD59', 'UnitTest1_LPOS_ToF02_B1SRD60',
'UnitTest1_LPOS_ToF02_B1SRD61', 'UnitTest1_LPOS_ToF02_B1SRD62',
'UnitTest1_LPOS_ToF02_B1SRD63', 'UnitTest1_LPOS_ToF02_B1SRD64',
'UnitTest1_LPOS_ToF02_B1SRD65', 'UnitTest1_LPOS_ToF02_B1SRD66',
'UnitTest1_LPOS_ToF02_B1SRD67', 'UnitTest1_LPOS_ToF02_B1SRD68',
'UnitTest1_LPOS_ToF02_B1SRD69', 'UnitTest1_LPOS_ToF02_B1SRD70',
'UnitTest1_LPOS_ToF02_B1SRD71', 'UnitTest1_LPOS_ToF02_B1SRD72',
'UnitTest1_LPOS_ToF02_B1SRD73', 'UnitTest1_LPOS_ToF02_B1SRD74',
'UnitTest1_LPOS_ToF02_B1SRD75', 'UnitTest1_LPOS_ToF02_B1SRD76',
'UnitTest1_LPOS_ToF02_B1SRD77', 'UnitTest1_LPOS_ToF02_B1SRD78',
'UnitTest1_LPOS_ToF02_B1SRD79', 'UnitTest1_LPOS_ToF02_B1SRD80',
'UnitTest1_LPOS_ToF02_B1SRD81', 'UnitTest1_LPOS_ToF02_B1SRD82',
'UnitTest1_LPOS_ToF02_B1SRD83', 'UnitTest1_LPOS_ToF02_B1SRD84',
'UnitTest1_LPOS_ToF02_B1SRD85', 'UnitTest1_LPOS_ToF02_B1SRD86',
'UnitTest1_LPOS_ToF02_B1SRD87', 'UnitTest1_LPOS_ToF02_B1SRD88',
'UnitTest1_LPOS_ToF02_B1SRD89', 'UnitTest1_LPOS_ToF02_B1SRD90',
'UnitTest1_LPOS_ToF02_B1SRD91', 'UnitTest1_LPOS_ToF02_B1SRD92',
'UnitTest1_LPOS_ToF02_B1E1_SR', 'UnitTest1_LPOS_ToF02_B1E2_SR',
'UnitTest1_LPOS_ToF02_B1E3_SR', 'UnitTest1_LPOS_ToF02_B1E4_SR',
'UnitTest1_LPOS_ToF02_B1E5_SR', 'UnitTest1_LPOS_ToF02_B1S1_SR',
'UnitTest1_LPOS_ToF02_B1S2_SR', 'UnitTest1_LPOS_ToF02_B1S3_SR',
'UnitTest1_LPOS_ToF02_B1S4_SR', 'UnitTest1_LPOS_ToF02_B1S5_SR',
'UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02',
'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06',
'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W11_LTR',
'UnitTest1_LPOS_ToF02_S1W12_SR'],
name='Sample File Name',
dtype=str)
assert_series_equal(self.msData.sampleMetadata['Sample File Name'], samples)
assert_series_equal(self.msData_PeakTable.sampleMetadata['Sample File Name'], samples)
def test_featuremetadata_import(self):
with self.subTest(msg='Checking Feature Names'):
features = pandas.Series(['3.17_262.0378m/z',
'3.17_293.1812m/z',
'3.17_145.0686m/z',
'3.17_258.1033m/z'],
name='Feature Name',
dtype='str')
assert_series_equal(self.msData.featureMetadata['Feature Name'], features)
assert_series_equal(self.msData_PeakTable.featureMetadata['Feature Name'], features)
with self.subTest(msg='Checking m/z'):
mz = pandas.Series([262.0378339,
293.1811941,
145.0686347,
258.1033447],
name='m/z',
dtype='float')
assert_series_equal(self.msData.featureMetadata['m/z'], mz)
assert_series_equal(self.msData_PeakTable.featureMetadata['m/z'], mz)
with self.subTest(msg='Checking Retention Time'):
rt = pandas.Series([3.17485 / 60.0,
3.17485 / 60.0,
3.17485 / 60.0,
3.17485 / 60.0],
name='Retention Time',
dtype='float')
assert_series_equal(self.msData.featureMetadata['Retention Time'], rt)
assert_series_equal(self.msData_PeakTable.featureMetadata['Retention Time'], rt)
def test_dilutionlevels(self):
dilution = pandas.Series([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Dilution'], dilution)
assert_series_equal(self.msData_PeakTable.sampleMetadata['Dilution'], dilution)
def test_feature_correlation(self):
self.msData.addSampleInfo(descriptionFormat='Raw Data', filePath=os.path.join('..','..','npc-standard-project','Raw_Data'))
self.msData.addSampleInfo(descriptionFormat='Batches')
self.msData_PeakTable.addSampleInfo(descriptionFormat='Raw Data', filePath=os.path.join('..','..','npc-standard-project','Raw_Data'))
self.msData_PeakTable.addSampleInfo(descriptionFormat='Batches')
with self.subTest(msg='Testing Pearson correlations'):
correlations = numpy.array([0.99999997, 0.32017508, 1., -0.0693418])
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
numpy.testing.assert_array_almost_equal(self.msData_PeakTable.correlationToDilution, correlations)
with self.subTest(msg='Testing Spearman correlations'):
correlations = numpy.array([0.9992837, 0.34708745, 1., -0.038844])
self.msData.Attributes['corrMethod'] = 'spearman'
self.msData_PeakTable.Attributes['corrMethod'] = 'spearman'
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
numpy.testing.assert_array_almost_equal(self.msData_PeakTable.correlationToDilution, correlations)
def test_variabletype(self):
self.assertEqual(self.msData.VariableType, nPYc.enumerations.VariableType.Discrete)
self.assertEqual(self.msData_PeakTable.VariableType, nPYc.enumerations.VariableType.Discrete)
def test_xcms_raises(self):
path = os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_QI.csv')
self.assertRaises(ValueError, nPYc.MSDataset, path, fileType='XCMS', noFeatureParams=9)
class test_msdataset_import_csvimport_discrete(unittest.TestCase):
"""
Test import from NPC csv files
"""
def setUp(self):
self.msData = nPYc.MSDataset(
os.path.join('..', '..', 'npc-standard-project', 'Derived_Data', 'UnitTest1_PCSOP.069_csv_import.csv'), fileType='csv', noFeatureParams=1)
self.msData.addSampleInfo(descriptionFormat='Filenames')
def test_dimensions(self):
self.assertEqual((self.msData.noSamples, self.msData.noFeatures), (111, 4))
def test_samples(self):
samples = pandas.Series(['UnitTest1_LPOS_ToF02_B1SRD01', 'UnitTest1_LPOS_ToF02_B1SRD02',
'UnitTest1_LPOS_ToF02_B1SRD03', 'UnitTest1_LPOS_ToF02_B1SRD04',
'UnitTest1_LPOS_ToF02_B1SRD05', 'UnitTest1_LPOS_ToF02_B1SRD06',
'UnitTest1_LPOS_ToF02_B1SRD07', 'UnitTest1_LPOS_ToF02_B1SRD08',
'UnitTest1_LPOS_ToF02_B1SRD09', 'UnitTest1_LPOS_ToF02_B1SRD10',
'UnitTest1_LPOS_ToF02_B1SRD11', 'UnitTest1_LPOS_ToF02_B1SRD12',
'UnitTest1_LPOS_ToF02_B1SRD13', 'UnitTest1_LPOS_ToF02_B1SRD14',
'UnitTest1_LPOS_ToF02_B1SRD15', 'UnitTest1_LPOS_ToF02_B1SRD16',
'UnitTest1_LPOS_ToF02_B1SRD17', 'UnitTest1_LPOS_ToF02_B1SRD18',
'UnitTest1_LPOS_ToF02_B1SRD19', 'UnitTest1_LPOS_ToF02_B1SRD20',
'UnitTest1_LPOS_ToF02_B1SRD21', 'UnitTest1_LPOS_ToF02_B1SRD22',
'UnitTest1_LPOS_ToF02_B1SRD23', 'UnitTest1_LPOS_ToF02_B1SRD24',
'UnitTest1_LPOS_ToF02_B1SRD25', 'UnitTest1_LPOS_ToF02_B1SRD26',
'UnitTest1_LPOS_ToF02_B1SRD27', 'UnitTest1_LPOS_ToF02_B1SRD28',
'UnitTest1_LPOS_ToF02_B1SRD29', 'UnitTest1_LPOS_ToF02_B1SRD30',
'UnitTest1_LPOS_ToF02_B1SRD31', 'UnitTest1_LPOS_ToF02_B1SRD32',
'UnitTest1_LPOS_ToF02_B1SRD33', 'UnitTest1_LPOS_ToF02_B1SRD34',
'UnitTest1_LPOS_ToF02_B1SRD35', 'UnitTest1_LPOS_ToF02_B1SRD36',
'UnitTest1_LPOS_ToF02_B1SRD37', 'UnitTest1_LPOS_ToF02_B1SRD38',
'UnitTest1_LPOS_ToF02_B1SRD39', 'UnitTest1_LPOS_ToF02_B1SRD40',
'UnitTest1_LPOS_ToF02_B1SRD41', 'UnitTest1_LPOS_ToF02_B1SRD42',
'UnitTest1_LPOS_ToF02_B1SRD43', 'UnitTest1_LPOS_ToF02_B1SRD44',
'UnitTest1_LPOS_ToF02_B1SRD45', 'UnitTest1_LPOS_ToF02_B1SRD46',
'UnitTest1_LPOS_ToF02_B1SRD47', 'UnitTest1_LPOS_ToF02_B1SRD48',
'UnitTest1_LPOS_ToF02_B1SRD49', 'UnitTest1_LPOS_ToF02_B1SRD50',
'UnitTest1_LPOS_ToF02_B1SRD51', 'UnitTest1_LPOS_ToF02_B1SRD52',
'UnitTest1_LPOS_ToF02_B1SRD53', 'UnitTest1_LPOS_ToF02_B1SRD54',
'UnitTest1_LPOS_ToF02_B1SRD55', 'UnitTest1_LPOS_ToF02_B1SRD56',
'UnitTest1_LPOS_ToF02_B1SRD57', 'UnitTest1_LPOS_ToF02_B1SRD58',
'UnitTest1_LPOS_ToF02_B1SRD59', 'UnitTest1_LPOS_ToF02_B1SRD60',
'UnitTest1_LPOS_ToF02_B1SRD61', 'UnitTest1_LPOS_ToF02_B1SRD62',
'UnitTest1_LPOS_ToF02_B1SRD63', 'UnitTest1_LPOS_ToF02_B1SRD64',
'UnitTest1_LPOS_ToF02_B1SRD65', 'UnitTest1_LPOS_ToF02_B1SRD66',
'UnitTest1_LPOS_ToF02_B1SRD67', 'UnitTest1_LPOS_ToF02_B1SRD68',
'UnitTest1_LPOS_ToF02_B1SRD69', 'UnitTest1_LPOS_ToF02_B1SRD70',
'UnitTest1_LPOS_ToF02_B1SRD71', 'UnitTest1_LPOS_ToF02_B1SRD72',
'UnitTest1_LPOS_ToF02_B1SRD73', 'UnitTest1_LPOS_ToF02_B1SRD74',
'UnitTest1_LPOS_ToF02_B1SRD75', 'UnitTest1_LPOS_ToF02_B1SRD76',
'UnitTest1_LPOS_ToF02_B1SRD77', 'UnitTest1_LPOS_ToF02_B1SRD78',
'UnitTest1_LPOS_ToF02_B1SRD79', 'UnitTest1_LPOS_ToF02_B1SRD80',
'UnitTest1_LPOS_ToF02_B1SRD81', 'UnitTest1_LPOS_ToF02_B1SRD82',
'UnitTest1_LPOS_ToF02_B1SRD83', 'UnitTest1_LPOS_ToF02_B1SRD84',
'UnitTest1_LPOS_ToF02_B1SRD85', 'UnitTest1_LPOS_ToF02_B1SRD86',
'UnitTest1_LPOS_ToF02_B1SRD87', 'UnitTest1_LPOS_ToF02_B1SRD88',
'UnitTest1_LPOS_ToF02_B1SRD89', 'UnitTest1_LPOS_ToF02_B1SRD90',
'UnitTest1_LPOS_ToF02_B1SRD91', 'UnitTest1_LPOS_ToF02_B1SRD92',
'UnitTest1_LPOS_ToF02_B1E1_SR', 'UnitTest1_LPOS_ToF02_B1E2_SR',
'UnitTest1_LPOS_ToF02_B1E3_SR', 'UnitTest1_LPOS_ToF02_B1E4_SR',
'UnitTest1_LPOS_ToF02_B1E5_SR', 'UnitTest1_LPOS_ToF02_B1S1_SR',
'UnitTest1_LPOS_ToF02_B1S2_SR', 'UnitTest1_LPOS_ToF02_B1S3_SR',
'UnitTest1_LPOS_ToF02_B1S4_SR', 'UnitTest1_LPOS_ToF02_B1S5_SR',
'UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02',
'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06',
'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W11_LTR',
'UnitTest1_LPOS_ToF02_S1W12_SR'],
name='Sample File Name',
dtype=str)
assert_series_equal(self.msData.sampleMetadata['Sample File Name'], samples)
def test_featuremetadata_import(self):
with self.subTest(msg='Checking Feature Names'):
features = pandas.Series(['3.17_262.0378m/z',
'3.17_293.1812m/z',
'3.17_145.0686m/z',
'3.17_258.1033m/z'],
name='Feature Name',
dtype='str')
assert_series_equal(self.msData.featureMetadata['Feature Name'], features)
def test_dilutionlevels(self):
dilution = pandas.Series([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Dilution'], dilution)
def test_feature_correlation(self):
self.msData.addSampleInfo(descriptionFormat='Raw Data', filePath=os.path.join('..','..','npc-standard-project','Raw_Data'))
self.msData.addSampleInfo(descriptionFormat='Batches')
with self.subTest(msg='Testing Pearson correlations'):
correlations = numpy.array([0.99999997, 0.32017508, 1., -0.0693418])
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
with self.subTest(msg='Testing Spearman correlations'):
correlations = numpy.array([0.9992837, 0.34708745, 1., -0.038844])
self.msData.Attributes['corrMethod'] = 'spearman'
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
def test_variabletype(self):
self.assertEqual(self.msData.VariableType, nPYc.enumerations.VariableType.Discrete)
class test_msdataset_import_csvimport_continuum(unittest.TestCase):
"""
Test import from NPC csv files
"""
def test_csv_continuum_import_raises(self):
path = os.path.join('..', '..', 'npc-standard-project', 'Derived_Data', 'UnitTest1_PCSOP.069_csv_import.csv')
self.assertRaises(NotImplementedError, nPYc.MSDataset, path, fileType='csv', noFeatureParams=2, variableType='Continuum')
class test_msdataset_import_metaboscape(unittest.TestCase):
"""
Test import from metaboscape xlsx outputs
"""
def setUp(self):
path = os.path.join('..','..','npc-standard-project','Derived_Data', 'UnitTest1_PCSOP.069_Metaboscape.xlsx')
self.lcData = nPYc.MSDataset(path, fileType='Metaboscape', noFeatureParams=18, sheetName='Test Data')
self.lcData.addSampleInfo(descriptionFormat='Filenames')
self.diData = nPYc.MSDataset(path, fileType='Metaboscape', noFeatureParams=16, sheetName='Test Data (DI)')
self.diData.addSampleInfo(descriptionFormat='Filenames')
def test_dimensions(self):
self.assertEqual((self.lcData.noSamples, self.lcData.noFeatures), (115, 4))
self.assertEqual((self.diData.noSamples, self.diData.noFeatures), (115, 4))
def test_samples(self):
samples = pandas.Series(['UnitTest1_LPOS_ToF02_B1SRD01', 'UnitTest1_LPOS_ToF02_B1SRD02',
'UnitTest1_LPOS_ToF02_B1SRD03', 'UnitTest1_LPOS_ToF02_B1SRD04',
'UnitTest1_LPOS_ToF02_B1SRD05', 'UnitTest1_LPOS_ToF02_B1SRD06',
'UnitTest1_LPOS_ToF02_B1SRD07', 'UnitTest1_LPOS_ToF02_B1SRD08',
'UnitTest1_LPOS_ToF02_B1SRD09', 'UnitTest1_LPOS_ToF02_B1SRD10',
'UnitTest1_LPOS_ToF02_B1SRD11', 'UnitTest1_LPOS_ToF02_B1SRD12',
'UnitTest1_LPOS_ToF02_B1SRD13', 'UnitTest1_LPOS_ToF02_B1SRD14',
'UnitTest1_LPOS_ToF02_B1SRD15', 'UnitTest1_LPOS_ToF02_B1SRD16',
'UnitTest1_LPOS_ToF02_B1SRD17', 'UnitTest1_LPOS_ToF02_B1SRD18',
'UnitTest1_LPOS_ToF02_B1SRD19', 'UnitTest1_LPOS_ToF02_B1SRD20',
'UnitTest1_LPOS_ToF02_B1SRD21', 'UnitTest1_LPOS_ToF02_B1SRD22',
'UnitTest1_LPOS_ToF02_B1SRD23', 'UnitTest1_LPOS_ToF02_B1SRD24',
'UnitTest1_LPOS_ToF02_B1SRD25', 'UnitTest1_LPOS_ToF02_B1SRD26',
'UnitTest1_LPOS_ToF02_B1SRD27', 'UnitTest1_LPOS_ToF02_B1SRD28',
'UnitTest1_LPOS_ToF02_B1SRD29', 'UnitTest1_LPOS_ToF02_B1SRD30',
'UnitTest1_LPOS_ToF02_B1SRD31', 'UnitTest1_LPOS_ToF02_B1SRD32',
'UnitTest1_LPOS_ToF02_B1SRD33', 'UnitTest1_LPOS_ToF02_B1SRD34',
'UnitTest1_LPOS_ToF02_B1SRD35', 'UnitTest1_LPOS_ToF02_B1SRD36',
'UnitTest1_LPOS_ToF02_B1SRD37', 'UnitTest1_LPOS_ToF02_B1SRD38',
'UnitTest1_LPOS_ToF02_B1SRD39', 'UnitTest1_LPOS_ToF02_B1SRD40',
'UnitTest1_LPOS_ToF02_B1SRD41', 'UnitTest1_LPOS_ToF02_B1SRD42',
'UnitTest1_LPOS_ToF02_B1SRD43', 'UnitTest1_LPOS_ToF02_B1SRD44',
'UnitTest1_LPOS_ToF02_B1SRD45', 'UnitTest1_LPOS_ToF02_B1SRD46',
'UnitTest1_LPOS_ToF02_B1SRD47', 'UnitTest1_LPOS_ToF02_B1SRD48',
'UnitTest1_LPOS_ToF02_B1SRD49', 'UnitTest1_LPOS_ToF02_B1SRD50',
'UnitTest1_LPOS_ToF02_B1SRD51', 'UnitTest1_LPOS_ToF02_B1SRD52',
'UnitTest1_LPOS_ToF02_B1SRD53', 'UnitTest1_LPOS_ToF02_B1SRD54',
'UnitTest1_LPOS_ToF02_B1SRD55', 'UnitTest1_LPOS_ToF02_B1SRD56',
'UnitTest1_LPOS_ToF02_B1SRD57', 'UnitTest1_LPOS_ToF02_B1SRD58',
'UnitTest1_LPOS_ToF02_B1SRD59', 'UnitTest1_LPOS_ToF02_B1SRD60',
'UnitTest1_LPOS_ToF02_B1SRD61', 'UnitTest1_LPOS_ToF02_B1SRD62',
'UnitTest1_LPOS_ToF02_B1SRD63', 'UnitTest1_LPOS_ToF02_B1SRD64',
'UnitTest1_LPOS_ToF02_B1SRD65', 'UnitTest1_LPOS_ToF02_B1SRD66',
'UnitTest1_LPOS_ToF02_B1SRD67', 'UnitTest1_LPOS_ToF02_B1SRD68',
'UnitTest1_LPOS_ToF02_B1SRD69', 'UnitTest1_LPOS_ToF02_B1SRD70',
'UnitTest1_LPOS_ToF02_B1SRD71', 'UnitTest1_LPOS_ToF02_B1SRD72',
'UnitTest1_LPOS_ToF02_B1SRD73', 'UnitTest1_LPOS_ToF02_B1SRD74',
'UnitTest1_LPOS_ToF02_B1SRD75', 'UnitTest1_LPOS_ToF02_B1SRD76',
'UnitTest1_LPOS_ToF02_B1SRD77', 'UnitTest1_LPOS_ToF02_B1SRD78',
'UnitTest1_LPOS_ToF02_B1SRD79', 'UnitTest1_LPOS_ToF02_B1SRD80',
'UnitTest1_LPOS_ToF02_B1SRD81', 'UnitTest1_LPOS_ToF02_B1SRD82',
'UnitTest1_LPOS_ToF02_B1SRD83', 'UnitTest1_LPOS_ToF02_B1SRD84',
'UnitTest1_LPOS_ToF02_B1SRD85', 'UnitTest1_LPOS_ToF02_B1SRD86',
'UnitTest1_LPOS_ToF02_B1SRD87', 'UnitTest1_LPOS_ToF02_B1SRD88',
'UnitTest1_LPOS_ToF02_B1SRD89', 'UnitTest1_LPOS_ToF02_B1SRD90',
'UnitTest1_LPOS_ToF02_B1SRD91', 'UnitTest1_LPOS_ToF02_B1SRD92',
'UnitTest1_LPOS_ToF02_Blank01', 'UnitTest1_LPOS_ToF02_Blank02',
'UnitTest1_LPOS_ToF02_B1E1_SR', 'UnitTest1_LPOS_ToF02_B1E2_SR',
'UnitTest1_LPOS_ToF02_B1E3_SR', 'UnitTest1_LPOS_ToF02_B1E4_SR',
'UnitTest1_LPOS_ToF02_B1E5_SR', 'UnitTest1_LPOS_ToF02_B1S1_SR',
'UnitTest1_LPOS_ToF02_B1S2_SR', 'UnitTest1_LPOS_ToF02_B1S3_SR',
'UnitTest1_LPOS_ToF02_B1S4_SR', 'UnitTest1_LPOS_ToF02_B1S5_SR',
'UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02',
'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06',
'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W08_x',
'UnitTest1_LPOS_ToF02_S1W11_LTR', 'UnitTest1_LPOS_ToF02_S1W12_SR',
'UnitTest1_LPOS_ToF02_ERROR'],
name='Sample File Name',
dtype=str)
assert_series_equal(self.lcData.sampleMetadata['Sample File Name'], samples)
assert_series_equal(self.diData.sampleMetadata['Sample File Name'], samples)
def test_featuremetadata_import(self):
with self.subTest(msg='Checking Feature Names'):
features = pandas.Series(['3.17_262.0378m/z',
'3.17_293.1812m/z',
'3.17_145.0686m/z',
'3.17_258.1033m/z'],
name='Feature Name',
dtype='str')
assert_series_equal(self.lcData.featureMetadata['Feature Name'], features)
features = pandas.Series(['262.0378339m/z',
'293.1811941m/z',
'145.0686347m/z',
'258.1033447m/z'],
name='Feature Name',
dtype='str')
assert_series_equal(self.diData.featureMetadata['Feature Name'], features)
with self.subTest(msg='Checking m/z'):
mz = pandas.Series([262.0378,
293.1812,
145.0686,
258.1033],
name='m/z',
dtype='float')
assert_series_equal(self.lcData.featureMetadata['m/z'], mz)
assert_series_equal(self.diData.featureMetadata['m/z'], mz)
with self.subTest(msg='Checking Retention Time'):
rt = pandas.Series([3.17485,
3.17485,
3.17485,
3.17485],
name='Retention Time',
dtype='float')
assert_series_equal(self.lcData.featureMetadata['Retention Time'], rt)
with self.subTest(msg='Checking ΔRT'):
deltaRT = pandas.Series([0,
0.1,
0,
-0.1],
name='ΔRT',
dtype='object')
assert_series_equal(self.lcData.featureMetadata['ΔRT'], deltaRT)
def test_dilutionlevels(self):
dilution = pandas.Series([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(self.lcData.sampleMetadata['Dilution'], dilution)
assert_series_equal(self.diData.sampleMetadata['Dilution'], dilution)
def test_feature_correlation(self):
self.lcData.addSampleInfo(descriptionFormat='Raw Data', filePath=os.path.join('..','..','npc-standard-project','Raw_Data'))
self.lcData.addSampleInfo(descriptionFormat='Batches')
with self.subTest(msg='Testing Pearson correlations'):
correlations = numpy.array([0.99999997, 0.32017508, 1., -0.0693418])
numpy.testing.assert_array_almost_equal(self.lcData.correlationToDilution, correlations)
with self.subTest(msg='Testing Spearman correlations'):
correlations = numpy.array([0.9992837, 0.34708745, 1., -0.038844])
self.lcData.Attributes['corrMethod'] = 'spearman'
numpy.testing.assert_array_almost_equal(self.lcData.correlationToDilution, correlations)
def test_variabletype(self):
self.assertEqual(self.lcData.VariableType, nPYc.enumerations.VariableType.Discrete)
def test_csv_import(self):
path = os.path.join('..','..','npc-standard-project','Derived_Data', 'UnitTest1_PCSOP.069_Metaboscape_LC.csv')
lcData = nPYc.MSDataset(path, fileType='Metaboscape', noFeatureParams=18)
lcData.addSampleInfo(descriptionFormat='Filenames')
assert_frame_equal(self.lcData.sampleMetadata, lcData.sampleMetadata)
numpy.testing.assert_array_equal(self.lcData.intensityData, lcData.intensityData)
class test_msdataset_import_biocrates(unittest.TestCase):
"""
Test import of Biocrate sheets
"""
def setUp(self):
self.msData = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_Biocrates.xlsx'), fileType='Biocrates', sheetName='Master Samples')
def test_dimensions(self):
self.assertEqual((self.msData.noSamples, self.msData.noFeatures), (9, 144))
def test_samples(self):
with self.subTest(msg='Checking Sample IDs'):
samples = pandas.Series(['UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02',
'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06',
'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W11_LTR',
'UnitTest1_LPOS_ToF02_S1W12_SR'],
name='Sample ID',
dtype=str)
assert_series_equal(self.msData.sampleMetadata['Sample ID'], samples)
with self.subTest(msg='Checking Sample Bar Code'):
samples = pandas.Series([1010751983, 1010751983, 1010751983, 1010751983, 1010751983, 1010751998, 1010751998, 1010751998, 1010751998],
name='Sample Bar Code',
dtype=int)
assert_series_equal(self.msData.sampleMetadata['Sample Bar Code'], samples)
def test_featuremetadata_import(self):
with self.subTest(msg='Checking Feature Names'):
features = pandas.Series(['C0', 'C10', 'C10:1', 'C10:2', 'C12', 'C12-DC', 'C12:1', 'C14', 'C14:1', 'C14:1-OH', 'C14:2', 'C14:2-OH', 'C16', 'C16-OH',
'C16:1', 'C16:1-OH', 'C16:2', 'C16:2-OH', 'C18', 'C18:1', 'C18:1-OH', 'C18:2', 'C2', 'C3', 'C3-DC (C4-OH)', 'C3-OH', 'C3:1',
'C4', 'C4:1', 'C6 (C4:1-DC)', 'C5', 'C5-M-DC', 'C5-OH (C3-DC-M)', 'C5:1', 'C5:1-DC', 'C5-DC (C6-OH)', 'C6:1', 'C7-DC', 'C8',
'C9', 'lysoPC a C14:0', 'lysoPC a C16:0', 'lysoPC a C16:1', 'lysoPC a C17:0', 'lysoPC a C18:0', 'lysoPC a C18:1', 'lysoPC a C18:2',
'lysoPC a C20:3', 'lysoPC a C20:4', 'lysoPC a C24:0', 'lysoPC a C26:0', 'lysoPC a C26:1', 'lysoPC a C28:0', 'lysoPC a C28:1',
'PC aa C24:0', 'PC aa C26:0', 'PC aa C28:1', 'PC aa C30:0', 'PC aa C32:0', 'PC aa C32:1', 'PC aa C32:2', 'PC aa C32:3', 'PC aa C34:1',
'PC aa C34:2', 'PC aa C34:3', 'PC aa C34:4', 'PC aa C36:0', 'PC aa C36:1', 'PC aa C36:2', 'PC aa C36:3', 'PC aa C36:4', 'PC aa C36:5',
'PC aa C36:6', 'PC aa C38:0', 'PC aa C38:3', 'PC aa C38:4', 'PC aa C38:5', 'PC aa C38:6', 'PC aa C40:1', 'PC aa C40:2', 'PC aa C40:3',
'PC aa C40:4', 'PC aa C40:5', 'PC aa C40:6', 'PC aa C42:0', 'PC aa C42:1', 'PC aa C42:2', 'PC aa C42:4', 'PC aa C42:5', 'PC aa C42:6',
'PC ae C30:0', 'PC ae C30:1', 'PC ae C30:2', 'PC ae C32:1', 'PC ae C32:2', 'PC ae C34:0', 'PC ae C34:1', 'PC ae C34:2', 'PC ae C34:3',
'PC ae C36:0', 'PC ae C36:1', 'PC ae C36:2', 'PC ae C36:3', 'PC ae C36:4', 'PC ae C36:5', 'PC ae C38:0', 'PC ae C38:1', 'PC ae C38:2',
'PC ae C38:3', 'PC ae C38:4', 'PC ae C38:5', 'PC ae C38:6', 'PC ae C40:1', 'PC ae C40:2', 'PC ae C40:3', 'PC ae C40:4', 'PC ae C40:5',
'PC ae C40:6', 'PC ae C42:0', 'PC ae C42:1', 'PC ae C42:2', 'PC ae C42:3', 'PC ae C42:4', 'PC ae C42:5', 'PC ae C44:3', 'PC ae C44:4',
'PC ae C44:5', 'PC ae C44:6', 'SM (OH) C14:1', 'SM (OH) C16:1', 'SM (OH) C22:1', 'SM (OH) C22:2', 'SM (OH) C24:1', 'SM C16:0',
'SM C16:1', 'SM C18:0', 'SM C18:1', 'SM C20:2', 'SM C24:0', 'SM C24:1', 'SM C26:0', 'SM C26:1', 'H1', 'H1.1'],
name='Feature Name',
dtype=str)
assert_series_equal(self.msData.featureMetadata['Feature Name'], features)
with self.subTest(msg='Class'):
classField = pandas.Series(['acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines',
'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines',
'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines',
'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines',
'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines',
'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines',
'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'acylcarnitines', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids', 'glycerophospholipids',
'glycerophospholipids', 'glycerophospholipids', 'sphingolipids', 'sphingolipids', 'sphingolipids', 'sphingolipids', 'sphingolipids',
'sphingolipids', 'sphingolipids', 'sphingolipids', 'sphingolipids', 'sphingolipids', 'sphingolipids', 'sphingolipids', 'sphingolipids',
'sphingolipids', 'sugars', 'sugars'],
name='Class',
dtype=str)
assert_series_equal(self.msData.featureMetadata['Class'], classField)
with self.subTest(msg='Checking LOD'):
lod = pandas.Series([2.1, 0.08, 1.08, 0.156, 0.064, 0.151, 0.857, 0.023, 0.009, 0.015, 0.049, 0.019, 0.018, 0.009, 0.017, 0.029, 0.023, 0.035,
0.013, 0.029, 0.017, 0.01, 0.063, 0.011, 0.046, 0.02, 0., 0.027, 0.021, 0.02, 0.035, 0.05, 0.037, 0.072, 0.015, 0.014, 0.036,
0.018, 0.1, 0.017, 5.32, 0.068, 0.064, 0.035, 0.181, 0.023, 0.02, 0.088, 0., 0.038, 0.034, 0.015, 0.105, 0.007, 0.061, 1.1,
0.079, 0.139, 0.02, 0.006, 0.006, 0., 0.03, 0.015, 0.001, 0.004, 0.203, 0.012, 0.022, 0.004, 0.009, 0.004, 0.002, 0.035, 0.01,
0.008, 0.005, 0.002, 0.394, 0.058, 0.003, 0.017, 0., 0.188, 0.065, 0.019, 0.058, 0.011, 0.037, 0.248, 0.155, 0.005, 0.01,
0.002, 0.001, 0.011, 0.014, 0.004, 0.01, 0.059, 0.061, 0.029, 0., 0.084, 0.014, 0.076, 0.031, 0.012, 0.005, 0.009, 0.002,
0.003, 0.019, 0.006, 0.013, 0.08, 0.003, 0.007, 1.32, 0.119, 0.017, 0.007, 0., 0.843, 0.048, 0.116, 0.072, 0.043, 0., 0.004,
0.006, 0.001, 0., 0.032, 0.005, 0.003, 0.004, 0.013, 0.006, 0.003, 0.01, 0.003, 912., 912.],
name='LOD (μM)',
dtype=float)
assert_series_equal(self.msData.featureMetadata['LOD (μM)'], lod)
def test_variabletype(self):
self.assertEqual(self.msData.VariableType, nPYc.enumerations.VariableType.Discrete)
class test_msdataset_addsampleinfo(unittest.TestCase):
"""
Test import from QI csv files
"""
def setUp(self):
self.msData = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_QI.csv'), fileType='QI')
self.msData.addSampleInfo(descriptionFormat='Filenames')
def test_msdataset_load_npc_lims(self):
"""
Test we are matching samples IDs in the LIMS correctly
"""
samplingIDs = pandas.Series(['Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Procedural Blank Sample', 'Procedural Blank Sample',
'Study Pool Sample','Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample','Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'UT1_S1_s1', 'UT1_S2_s1', 'UT1_S3_s1', 'Not specified', 'UT1_S4_s2', 'UT1_S4_s3', 'UT1_S4_s4', 'UT1_S4_s5',
'External Reference Sample', 'Study Pool Sample', 'Not specified'], name='Sample ID', dtype='str')
samplingIDs = samplingIDs.astype(str)
self.msData.addSampleInfo(descriptionFormat='NPC LIMS', filePath=os.path.join('..','..','npc-standard-project','Derived_Worklists','UnitTest1_MS_serum_PCSOP.069.csv'))
assert_series_equal(self.msData.sampleMetadata['Sample ID'], samplingIDs)
def test_msdataset_load_watersraw_metadata(self):
"""
Test we read raw data from Waters .raw and concatenate it correctly - currently focusing on parameters of importance to the workflow.
TODO: Test all paramaters
"""
# Expected data starts with the same samples
expected = copy.deepcopy(self.msData.sampleMetadata)
##
# Define a test subset of columns with one unique value
##
testSeries = ['Sampling Cone', 'Scan Time (sec)', 'Source Offset', 'Source Temperature (°C)', 'Start Mass', 'End Mass', 'Column Serial Number:', 'ColumnType:']
expected['Sampling Cone'] = 20.0
expected['Scan Time (sec)'] = 0.15
expected['Source Offset'] = 80
expected['Source Temperature (°C)'] = 120.0
expected['Start Mass'] = 50.0
expected['End Mass'] = 1200.0
expected['Column Serial Number:'] = 1573413615729.
expected['ColumnType:'] = 'ACQUITY UPLC® HSS T3 1.8µm'
##
# And a subset with multiple values
##
testSeries.append('Detector')
expected['Detector'] = [3161., 3161., 3166., 3166., 3166., 3166., 3171., 3171., 3171., 3171., 3171., 3171., 3171., 3171., 3179.,
3179., 3179., 3179., 3179., 3179., 3184., 3184., 3184., 3188., 3188., 3188., 3188., 3188., 3188., 3193.,
3193., 3193., 3193., 3193., 3197., 3197., 3197., 3197., 3197., 3203., 3203., 3203., 3203., 3203., 3208.,
3208., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407.,
3407., 3407., 3407., 3407., 3407., 3407., 3407., 3407., 3399., 3399., 3399., 3399., 3399., 3399., 3399.,
3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399.,
3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3399., 3212., 3212., 3217., 3217., 3217., 3293.,
3293., 3293., 3299., 3299., 3299., 3299., 3299., 3293., 3299., 3299.]
testSeries.append('Measurement Date')
expected['Measurement Date'] = ['25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014',
'25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014',
'25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014',
'25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '25-Nov-2014',
'25-Nov-2014', '25-Nov-2014', '25-Nov-2014', '26-Nov-2014', '26-Nov-2014', '26-Nov-2014', '26-Nov-2014', '26-Nov-2014', '26-Nov-2014',
'26-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014',
'30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014',
'30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014',
'30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014',
'30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014',
'30-Nov-2014', '30-Nov-2014', '24-Nov-2014', '24-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014', '30-Nov-2014',
'26-Nov-2014', '26-Nov-2014', '26-Nov-2014', '26-Nov-2014', '26-Nov-2014', '27-Nov-2014', '27-Nov-2014', '27-Nov-2014', '27-Nov-2014',
'27-Nov-2014', '27-Nov-2014', '27-Nov-2014', '27-Nov-2014', '27-Nov-2014', '27-Nov-2014', '27-Nov-2014']
testSeries.append('Measurement Time')
expected['Measurement Time'] = ['13:43:57', '13:59:44', '14:15:39', '14:31:26', '14:47:21', '15:03:07', '15:19:00', '15:34:46', '15:50:40', '16:06:26',
'16:22:12', '16:38:06', '16:54:01', '17:09:56', '17:25:44', '17:41:30', '17:57:16', '18:13:02', '18:28:47', '18:44:35',
'19:00:22', '19:16:10', '19:31:56', '19:47:51', '20:03:46', '20:19:33', '20:35:19', '20:51:13', '21:07:09', '21:22:55',
'21:38:50', '21:54:43', '22:10:28', '22:26:15', '22:42:09', '22:57:56', '23:13:41', '23:29:35', '23:45:29', '00:01:23',
'00:17:10', '00:32:56', '00:48:49', '01:04:33', '01:20:20', '01:36:06', '19:53:55', '19:38:18', '19:22:41', '19:07:03',
'18:51:23', '18:35:46', '18:20:06', '18:04:29', '17:48:57', '17:33:20', '17:17:42', '17:02:05', '16:46:27', '16:30:57',
'16:15:18', '15:59:40', '15:44:03', '15:28:24', '15:12:48', '14:57:10', '14:41:33', '14:25:55', '14:10:24', '13:54:46',
'13:39:08', '13:23:38', '13:08:08', '12:52:30', '12:36:50', '12:21:13', '12:05:41', '11:50:03', '11:34:25', '11:18:55',
'11:03:25', '10:47:55', '10:32:18', '10:16:40', '10:01:10', '09:45:32', '09:30:01', '09:14:25', '08:58:53', '08:43:23',
'08:27:47', '08:12:10', '08:12:47', '08:25:10', '06:52:08', '07:09:38', '07:25:16', '07:40:52', '07:56:32', '02:39:17',
'02:55:03', '03:10:49', '03:26:43', '03:42:35', '12:11:04', '12:26:51', '12:42:35', '12:58:13', '13:14:01', '13:45:26',
'14:01:05', '14:16:51', '11:53:27', '13:29:48', '13:46:48']
self.msData.addSampleInfo(descriptionFormat='Raw Data', filePath=os.path.join('..', '..', 'npc-standard-project', 'Raw_Data', 'ms', 'parameters_data'))
with self.subTest(msg='Default Path'):
for series in testSeries:
|
assert_series_equal(self.msData.sampleMetadata[series], expected[series], check_dtype=False)
|
pandas.testing.assert_series_equal
|
# See also: https://www.kaggle.com/garethjns/microsoft-lightgbm-0-795
# https://github.com/garethjns/Kaggle-Titanic
#%% Imports
# The usuals
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Regular expressions
import re
# LightGBM
import lightgbm as lgb
# sklearn tools for model training and assesment
from sklearn.model_selection import train_test_split
from sklearn.metrics import (roc_curve, auc, accuracy_score)
from sklearn.model_selection import GridSearchCV
import os
os.chdir('/Users/hanbosun/Documents/GitHub/TrasactionPrediction/')
#%% Import data
# Import both data sets
# trainRaw = pd.read_csv('input/train.csv')
# testRaw = pd.read_csv('input/test.csv')
trainRaw = pd.read_csv('input/train_titan.csv')
testRaw = pd.read_csv('input/test_titan.csv')
# And concatonate together
nTrain = trainRaw.shape[0]
full = pd.concat([trainRaw, testRaw],
axis=0)
# %% Cabins
def ADSplit(s):
"""
Function to try and extract cabin letter and number from the cabin column.
Runs a regular expression that finds letters and numbers in the
string. These are held in match.group, if they exist.
"""
match = re.match(r"([a-z]+)([0-9]+)", s, re.I)
try:
letter = match.group(1)
except:
letter = ''
try:
number = match.group(2)
except:
number = 9999
return letter, number
def DR(s):
"""
From the cabin string, try and extract letter, number, and number of cabins
"""
# Check contents
if isinstance(s, (int, float)):
# If field is empty, return nothing
letter = ''
number = ''
nRooms = 9999
else:
# If field isn't empty, split sting on space. Some strings contain
# multiple cabins.
s = s.split(' ')
# Count the cabins based on number of splits
nRooms = len(s)
# Just take first cabin for letter/number extraction
s = s[0]
letter, number = ADSplit(s)
return [letter, number, nRooms]
# Apply DR function to each cell in Cabin column using pandas apply method.
out = full['Cabin'].apply(DR)
# Outout tuple with 3 values for each row, convert this to pandas df
out = out.apply(pd.Series)
# And name the columns
out.columns = ['CL', 'CN', 'nC']
# Then concatenate these columns to the dataset
full = pd.concat([full, out],
axis=1)
# %% Family
# Add some family features directly to new columns in the dataset
# Size
full['fSize'] = full['SibSp'] + full['Parch'] + 1
# Ratio
full['fRatio'] = (full['Parch'] + 1) / (full['SibSp'] + 1)
# Adult?
full['Adult'] = full['Age'] > 18
# %% Names
# Extract titles from Name column, standardise
titleDict = {
"Capt": "Officer",
"Col": "Officer",
"Major": "Officer",
"Jonkheer": "Sir",
"Don": "Sir",
"Sir": "Sir",
"Dr": "Dr",
"Rev": "Rev",
"theCountess": "Lady",
"Dona": "Lady",
"Mme": "Mrs",
"Mlle": "Miss",
"Ms": "Mrs",
"Mr": "Mr",
"Mrs": "Mrs",
"Miss": "Miss",
"Master": "Master",
"Lady": "Lady"
}
def splitName(s, titleDict):
"""
Extract title from name, replace with value in title dictionary. Also
return surname.
"""
# Remove '.' from name string
s = s.replace('.', '')
# Split on spaces
s = s.split(' ')
# get surname
surname = s[0]
# Get title - loop over titleDict, if s matches a key, take the
# corresponding value as the title
title = [t for k, t in titleDict.items() if str(k) in s]
# If no matching keys in title dict, use 'Other'.
if title == []:
title = 'Other'
else:
# Title is a list, so extract contents
title = title[0]
# Return surname (stripping remaining ',') and title as string
return surname.strip(','), title
# Apply functions to df and concatenate new columns as before
out = full['Name'].apply(splitName,
args=[titleDict])
out = out.apply(pd.Series)
out.columns = ['Surname', 'Title']
full = pd.concat([full, out],
axis=1)
# %% Categorical columns
# List of categorical columns to recode
catCols = ['Sex', 'Embarked', 'CL', 'CN', 'Surname', 'Title']
# Recode
for c in catCols:
# Convert column to pd.Categotical
full[c] = pd.Categorical(full[c])
# Extract the cat.codes and replace the column with these
full[c] = full[c].cat.codes
# Convert the cat codes to categotical...
full[c] = pd.Categorical(full[c])
# Generate a logical index of categorical columns to maybe use with LightGBM later
catCols = [i for i,v in enumerate(full.dtypes) if str(v)=='category']
#%% Age
# Replace missing age values with median.
# See ither kernels for more sophisticated ways of doing this!
full.loc[full.Age.isnull(), 'Age'] = np.median(full['Age'].loc[full.Age.notnull()])
#%% Split datasets
train = full.iloc[0:nTrain,:]
test = full.iloc[nTrain::,:]
#%% Prepare data
def prepLGB(data,
classCol='',
IDCol='',
fDrop=[]):
# Drop class column
if classCol != '':
labels = data[classCol]
fDrop = fDrop + [classCol]
else:
labels = []
if IDCol != '':
IDs = data[IDCol]
else:
IDs = []
if fDrop != []:
data = data.drop(fDrop,
axis=1)
# Create LGB mats
lData = lgb.Dataset(data, label=labels,
free_raw_data=False,
feature_name=list(data.columns),
categorical_feature='auto')
return lData, labels, IDs, data
# Specify columns to drop
fDrop = ['Ticket', 'Cabin', 'Name']
# Split training data in to training and validation sets.
# Validation set is used for early stopping.
trainData, validData = train_test_split(train,
test_size=0.3,
stratify=train.Survived)
# Prepare the data sets
trainDataL, trainLabels, trainIDs, trainData = prepLGB(trainData,
classCol='Survived',
IDCol='PassengerId',
fDrop=fDrop)
validDataL, validLabels, validIDs, validData = prepLGB(validData,
classCol='Survived',
IDCol='PassengerId',
fDrop=fDrop)
testDataL, _, _ , testData = prepLGB(test,
classCol='Survived',
IDCol='PassengerId',
fDrop=fDrop)
# Prepare data set using all the training data
allTrainDataL, allTrainLabels, _ , allTrainData = prepLGB(train,
classCol='Survived',
IDCol='PassengerId',
fDrop=fDrop)
# Set params
# Scores ~0.784 (without tuning and early stopping)
params = {'boosting_type': 'gbdt',
'max_depth' : -1,
'objective': 'binary',
'nthread': 3, # Updated from nthread
'num_leaves': 64,
'learning_rate': 0.05,
'max_bin': 512,
'subsample_for_bin': 200,
'subsample': 1,
'subsample_freq': 1,
'colsample_bytree': 0.8,
'reg_alpha': 5,
'reg_lambda': 10,
'min_split_gain': 0.5,
'min_child_weight': 1,
'min_child_samples': 5,
'scale_pos_weight': 1,
'num_class' : 1,
'metric' : 'binary_error'}
# Create parameters to search
gridParams = {
'learning_rate': [0.005],
'n_estimators': [40],
'num_leaves': [6,8,12,16],
'boosting_type' : ['gbdt'],
'objective' : ['binary'],
'random_state' : [501], # Updated from 'seed'
'colsample_bytree' : [0.65, 0.66],
'subsample' : [0.7,0.75],
'reg_alpha' : [1,1.2],
'reg_lambda' : [1,1.2,1.4],
}
# Create classifier to use. Note that parameters have to be input manually
# not as a dict!
mdl = lgb.LGBMClassifier(boosting_type= 'gbdt',
objective = 'binary',
n_jobs = 3, # Updated from 'nthread'
silent = True,
max_depth = params['max_depth'],
max_bin = params['max_bin'],
subsample_for_bin = params['subsample_for_bin'],
subsample = params['subsample'],
subsample_freq = params['subsample_freq'],
min_split_gain = params['min_split_gain'],
min_child_weight = params['min_child_weight'],
min_child_samples = params['min_child_samples'],
scale_pos_weight = params['scale_pos_weight'])
# To view the default model params:
mdl.get_params().keys()
# Create the grid
grid = GridSearchCV(mdl, gridParams,
verbose=0,
cv=4,
n_jobs=2)
# Run the grid
grid.fit(allTrainData, allTrainLabels)
# Print the best parameters found
print(grid.best_params_)
print(grid.best_score_)
# Using parameters already set above, replace in the best from the grid search
params['colsample_bytree'] = grid.best_params_['colsample_bytree']
params['learning_rate'] = grid.best_params_['learning_rate']
# params['max_bin'] = grid.best_params_['max_bin']
params['num_leaves'] = grid.best_params_['num_leaves']
params['reg_alpha'] = grid.best_params_['reg_alpha']
params['reg_lambda'] = grid.best_params_['reg_lambda']
params['subsample'] = grid.best_params_['subsample']
# params['subsample_for_bin'] = grid.best_params_['subsample_for_bin']
print('Fitting with params: ')
print(params)
# Kit k models with early-stopping on different training/validation splits
k = 4
predsValid = 0
predsTrain = 0
predsTest = 0
for i in range(0, k):
print('Fitting model', k)
# Prepare the data set for fold
trainData, validData = train_test_split(train,
test_size=0.4,
stratify=train.Survived)
trainDataL, trainLabels, trainIDs, trainData = prepLGB(trainData,
classCol='Survived',
IDCol='PassengerId',
fDrop=fDrop)
validDataL, validLabels, validIDs, validData = prepLGB(validData,
classCol='Survived',
IDCol='PassengerId',
fDrop=fDrop)
# Train
gbm = lgb.train(params,
trainDataL,
100000,
valid_sets=[trainDataL, validDataL],
early_stopping_rounds=50,
verbose_eval=4)
# Plot importance
lgb.plot_importance(gbm)
plt.show()
# Predict
predsValid += gbm.predict(validData,
num_iteration=gbm.best_iteration)/k
predsTrain += gbm.predict(trainData,
num_iteration=gbm.best_iteration)/k
predsTest += gbm.predict(testData,
num_iteration=gbm.best_iteration)/k
# Print assessment
# assessMod(predsTrain, trainLabels, predsValid=predsValid, yValid= validLabels,
# report=True, plot=True)
# Save submission
sub =
|
pd.DataFrame()
|
pandas.DataFrame
|
import tensorflow as tf
import numpy as np
import pandas as pd
from math import floor, ceil
random_state = 42
np.random.seed(random_state)
tf.set_random_seed(random_state)
## EXTRACT DATA+
math_df = pd.read_csv("E:\\MLData\\General\\student-alcohol-consumption\\student-mat.csv", sep=",")
port_df =
|
pd.read_csv("E:\\MLData\\General\\student-alcohol-consumption\\student-por.csv", sep=",")
|
pandas.read_csv
|
import os
from glob import glob
import pandas as pd
import json
from textwrap import wrap
from IPython.display import HTML, display
import matplotlib.pyplot as plt
# Specific to meet up
def parse_event_mu(e, column_names_only=False):
if column_names_only:
return 'msg time user userID receiver m-type'.split()
return (e['msg'] if e['type'] == 'text'
else 'cmd: ' + e['command'] if e['type'] == 'command'
# at most the last three parts of the path are needed to identify the image:
# else 'url: ' + '/'.join(e['url'].split('/')[-3:]) if e['type'] == 'new_image'
# well, hardcoding that is bad, doesn't work sometimes
# so here I'm hardcoding the fact that the word "training"
# occurs in the URL:
else 'url: ' + e['url'].split('training')[1] if e['type'] == 'new_image'
else "",
e['timestamp-iso'],
e['user']['name'],
e['user']['id'],
'All' if 'receiver' not in e else e['receiver'],
e['type'])
def inst2type(url):
prefix_elements = url.split('/')[:-1] # remove image name
if len(prefix_elements) and len(prefix_elements[0]) == 1: # if significant, longer than one letter
return prefix_elements[1]
return '/'.join(prefix_elements)
def postproc_df(in_df, game_master='Game Master'):
'''Make some information in MeetUp DFs more accessible. Not in place, returns new DF'''
this_df = in_df.copy()
# canonicalise usernames
usernames = this_df['user'].unique().tolist()
usernames.remove(game_master)
user2canonical = {u: c for u, c in zip(usernames, ['A', 'B'])}
user2canonical['Game Master'] = 'GM'
name2id = {e[0]: e[1] for e in this_df[['user', 'userID']].values}
canonical2id = {v: name2id[k] for k, v in user2canonical.items()}
id2canonical = {v: k for k, v in canonical2id.items()}
this_df['user'] = this_df['user'].apply(lambda x: user2canonical[x])
# find the current location of each user at all times
is_in = {}
locations = []
for n, this_row in this_df.iterrows():
if this_row['m-type'] == 'new_image':
is_in[this_row['receiver']] = this_row['msg'].split()[1] # remove "url: " part
locations.append(
(
'unspec' if canonical2id['A'] not in is_in else is_in[canonical2id['A']],
'unspec' if canonical2id['B'] not in is_in else is_in[canonical2id['B']]
)
)
this_df['A_type'] = [inst2type(e[0]) for e in locations]
this_df['B_type'] = [inst2type(e[1]) for e in locations]
this_df['A_inst'] = [e[0] for e in locations]
this_df['B_inst'] = [e[1] for e in locations]
# make timestamps relative to first event
this_df['time'] =
|
pd.to_datetime(this_df['time'])
|
pandas.to_datetime
|
from PyQt5 import QtWidgets
import BOMAnalysisGUI as Gui
import pandas as pd
import sys
import os
def explore():
qfd = Gui.QtWidgets.QFileDialog()
path = "bom-examples"
filters = "bom(*.bom)"
title = 'Выбрать BOM 1'
file = QtWidgets.QFileDialog.getOpenFileName(qfd, title, path, filters)
ui.lineEdit.setText(file[0])
return file[0]
def explore_second():
qfd = Gui.QtWidgets.QFileDialog()
path = "bom-examples"
filters = "bom(*.bom)"
title = 'Выбрать BOM 2'
file = QtWidgets.QFileDialog.getOpenFileName(qfd, title, path, filters)
ui.lineEdit_2.setText(file[0])
return file[0]
def compare():
bom_1 = ui.lineEdit.text()
bom_2 = ui.lineEdit_2.text()
print('Сравниваем ', bom_1, 'и ', bom_2)
pd.options.mode.chained_assignment = None
# BOM1
ind = bom_1.rfind('/') + 1
path = bom_1[:ind]
old_doc = bom_1[ind:].replace(r'.bom', '').replace(r' ', '')
print('PATH:', path, 'BOM1:', old_doc)
df1 = pd.read_csv(path + old_doc + '.bom', encoding='ISO-8859-1', sep=";", index_col='RefDes')
del df1['Count']
df1 = df1.fillna('-')
# BOM2
ind = bom_2.rfind('/') + 1
path = bom_2[:ind]
new_doc = bom_2[ind:].replace(r'.bom', '').replace(r' ', '')
print('PATH:', path, 'BOM2:', new_doc)
df2 = pd.read_csv(path + new_doc + '.bom', encoding='ISO-8859-1', sep=";", index_col='RefDes')
del df2['Count']
df2 = df2.fillna('-')
###
res = pd.merge(df1, df2, how='outer', on='RefDes', suffixes=('_old', '_new'), indicator=True).sort_values(
'RefDes')
###
add = res.loc[res['_merge'] == 'right_only']
add = add[['ComponentName_new', 'PatternName_new', 'Value_new']]
###
remove = res.loc[res['_merge'] == 'left_only']
remove = remove[['ComponentName_old', 'PatternName_old', 'Value_old']]
###
change = res.loc[res['_merge'] == 'both']
del change['_merge']
change['ComponentName'] = f'-'
change['PatternName'] = f'-'
change['Value'] = f'-'
for i in range(len(change)):
old_c = change['ComponentName_old'][i]
new_c = change['ComponentName_new'][i]
old_p = change['PatternName_old'][i]
new_p = change['PatternName_new'][i]
old_v = change['Value_old'][i]
new_v = change['Value_new'][i]
if old_c != new_c:
change['ComponentName'][i] = f'{old_c} --> {new_c}'
if old_p != new_p:
change['PatternName'][i] = f'{old_p} --> {new_p}'
if old_v != new_v:
change['Value'][i] = f'{old_v} --> {new_v}'
change = change[['ComponentName', 'PatternName', 'Value']]
drop_index = []
for i in range(len(change)):
if change['ComponentName'][i] == '-' and change['PatternName'][i] == '-' and change['Value'][i] == '-':
drop_index.append(i)
drop_index.reverse()
for i in drop_index:
change = change.drop(change.index[i])
###
old_name = old_doc.split("\\")[-1]
new_name = new_doc.split("\\")[-1]
path += 'from_' + old_name + '_to_' + new_name + '/'
try:
os.mkdir(path)
except OSError:
pass
add.rename(
columns={'ComponentName_new': 'ComponentName', 'PatternName_new': 'PatternName', 'Value_new': 'Value'},
inplace=True)
remove.rename(
columns={'ComponentName_old': 'ComponentName', 'PatternName_old': 'PatternName', 'Value_old': 'Value'},
inplace=True)
sep_df0 = pd.DataFrame(data=[['', '', '']], columns=add.columns,
index=['Платы', '', 'Измененных компонентов', 'Новых компонентов',
'Удаленных компонентов', '',
''])
sep_df0.index.name = 'RefDes'
sep_df0['ComponentName'][0] = f'Старая {old_name}.bom'
sep_df0['PatternName'][0] = f'>>>>>'
sep_df0['Value'][0] = f'Новая {new_name}.bom'
sep_df0['ComponentName'][2] = f'{len(change)}'
sep_df0['ComponentName'][3] = f'{len(add)}'
sep_df0['ComponentName'][4] = f'{len(remove)}'
sep_df1 =
|
pd.DataFrame(data=[['', '', '']], columns=add.columns, index=['Изменено'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""inst.to_data_frame() helper functions."""
# Authors: <NAME> <<EMAIL>>
#
# License: BSD-3-Clause
import numpy as np
from ._logging import logger, verbose
from ..defaults import _handle_default
@verbose
def _set_pandas_dtype(df, columns, dtype, verbose=None):
"""Try to set the right columns to dtype."""
for column in columns:
df[column] = df[column].astype(dtype)
logger.info('Converting "%s" to "%s"...' % (column, dtype))
def _scale_dataframe_data(inst, data, picks, scalings):
ch_types = inst.get_channel_types()
ch_types_used = list()
scalings = _handle_default('scalings', scalings)
for tt in scalings.keys():
if tt in ch_types:
ch_types_used.append(tt)
for tt in ch_types_used:
scaling = scalings[tt]
idx = [ii for ii in range(len(picks)) if ch_types[ii] == tt]
if len(idx):
data[:, idx] *= scaling
return data
def _convert_times(inst, times, time_format):
"""Convert vector of time in seconds to ms, datetime, or timedelta."""
# private function; pandas already checked in calling function
from pandas import to_timedelta
if time_format == 'ms':
times = np.round(times * 1e3).astype(np.int64)
elif time_format == 'timedelta':
times = to_timedelta(times, unit='s')
elif time_format == 'datetime':
times = (
|
to_timedelta(times + inst.first_time, unit='s')
|
pandas.to_timedelta
|
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# interpreted as seconds, no longer supported
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'"
with pytest.raises(TypeError, match=msg):
obj.fillna(1)
result = obj.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# ffill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# bfill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_datetime64_fillna(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# ffill
result = ser.ffill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# bfill
result = ser.bfill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
def test_datetime64_fillna_backfill(self):
# GH#6587
# make sure that we are treating as integer when filling
msg = "containing strings is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# this also tests inference of a datetime-like with NaT's
ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"])
expected = Series(
[
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
],
dtype="M8[ns]",
)
result = ser.fillna(method="backfill")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
def test_datetime64_tz_fillna(self, tz):
# DatetimeLikeBlock
ser = Series(
[
Timestamp("2011-01-01 10:00"),
NaT,
Timestamp("2011-01-03 10:00"),
NaT,
]
)
null_loc = Series([False, True, False, True])
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00"),
"AAA",
Timestamp("2011-01-03 10:00"),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{1: Timestamp("2011-01-02 10:00"), 3: Timestamp("2011-01-04 10:00")}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# DatetimeTZBlock
idx = DatetimeIndex(["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz)
ser = Series(idx)
assert ser.dtype == f"datetime64[ns, {tz}]"
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime())
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
"AAA",
Timestamp("2011-01-03 10:00", tz=tz),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00", tz=tz),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# filling with a naive/other zone, coerce to object
result = ser.fillna(Timestamp("20130101"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
result = ser.fillna(Timestamp("20130101", tz="US/Pacific"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
def test_fillna_dt64tz_with_method(self):
# with timezone
# GH#15855
ser = Series([Timestamp("2012-11-11 00:00:00+01:00"), NaT])
exp = Series(
[
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
)
tm.assert_series_equal(ser.fillna(method="pad"), exp)
ser = Series([NaT, Timestamp("2012-11-11 00:00:00+01:00")])
exp = Series(
[
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
)
tm.assert_series_equal(ser.fillna(method="bfill"), exp)
def test_fillna_pytimedelta(self):
# GH#8209
ser = Series([np.nan, Timedelta("1 days")], index=["A", "B"])
result = ser.fillna(timedelta(1))
expected = Series(Timedelta("1 days"), index=["A", "B"])
tm.assert_series_equal(result, expected)
def test_fillna_period(self):
# GH#13737
ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])
res = ser.fillna(Period("2012-01", freq="M"))
exp = Series([Period("2011-01", freq="M"), Period("2012-01", freq="M")])
tm.assert_series_equal(res, exp)
assert res.dtype == "Period[M]"
def test_fillna_dt64_timestamp(self, frame_or_series):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
obj = frame_or_series(ser)
# reg fillna
result = obj.fillna(Timestamp("20130104"))
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130104"),
Timestamp("20130103 9:01:01"),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = obj
tm.assert_equal(result, expected)
def test_fillna_dt64_non_nao(self):
# GH#27419
ser = Series([Timestamp("2010-01-01"), NaT, Timestamp("2000-01-01")])
val = np.datetime64("1975-04-05", "ms")
result = ser.fillna(val)
expected = Series(
[Timestamp("2010-01-01"), Timestamp("1975-04-05"), Timestamp("2000-01-01")]
)
tm.assert_series_equal(result, expected)
def test_fillna_numeric_inplace(self):
x = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"])
y = x.copy()
return_value = y.fillna(value=0, inplace=True)
assert return_value is None
expected = x.fillna(value=0)
tm.assert_series_equal(y, expected)
# ---------------------------------------------------------------
# CategoricalDtype
@pytest.mark.parametrize(
"fill_value, expected_output",
[
("a", ["a", "a", "b", "a", "a"]),
({1: "a", 3: "b", 4: "b"}, ["a", "a", "b", "b", "b"]),
({1: "a"}, ["a", "a", "b", np.nan, np.nan]),
({1: "a", 3: "b"}, ["a", "a", "b", "b", np.nan]),
(Series("a"), ["a", np.nan, "b", np.nan, np.nan]),
(Series("a", index=[1]), ["a", "a", "b", np.nan, np.nan]),
(Series({1: "a", 3: "b"}), ["a", "a", "b", "b", np.nan]),
(Series(["a", "b"], index=[3, 4]), ["a", np.nan, "b", "a", "b"]),
],
)
def test_fillna_categorical(self, fill_value, expected_output):
# GH#17033
# Test fillna for a Categorical series
data = ["a", np.nan, "b", np.nan, np.nan]
ser = Series(Categorical(data, categories=["a", "b"]))
exp = Series(Categorical(expected_output, categories=["a", "b"]))
result = ser.fillna(fill_value)
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"fill_value, expected_output",
[
(Series(["a", "b", "c", "d", "e"]), ["a", "b", "b", "d", "e"]),
(Series(["b", "d", "a", "d", "a"]), ["a", "d", "b", "d", "a"]),
(
Series(
Categorical(
["b", "d", "a", "d", "a"], categories=["b", "c", "d", "e", "a"]
)
),
["a", "d", "b", "d", "a"],
),
],
)
def test_fillna_categorical_with_new_categories(self, fill_value, expected_output):
# GH#26215
data = ["a", np.nan, "b", np.nan, np.nan]
ser = Series(Categorical(data, categories=["a", "b", "c", "d", "e"]))
exp = Series(Categorical(expected_output, categories=["a", "b", "c", "d", "e"]))
result = ser.fillna(fill_value)
tm.assert_series_equal(result, exp)
def test_fillna_categorical_raises(self):
data = ["a", np.nan, "b", np.nan, np.nan]
ser = Series(Categorical(data, categories=["a", "b"]))
cat = ser._values
msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(TypeError, match=msg):
ser.fillna("d")
msg2 = "Length of 'value' does not match."
with pytest.raises(ValueError, match=msg2):
cat.fillna(Series("d"))
with pytest.raises(TypeError, match=msg):
ser.fillna({1: "d", 3: "a"})
msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
with pytest.raises(TypeError, match=msg):
ser.fillna(["a", "b"])
msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'
with pytest.raises(TypeError, match=msg):
ser.fillna(("a", "b"))
msg = (
'"value" parameter must be a scalar, dict '
'or Series, but you passed a "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
ser.fillna(DataFrame({1: ["a"], 3: ["b"]}))
@pytest.mark.parametrize("dtype", [float, "float32", "float64"])
@pytest.mark.parametrize("fill_type", tm.ALL_REAL_NUMPY_DTYPES)
def test_fillna_float_casting(self, dtype, fill_type):
# GH-43424
ser = Series([np.nan, 1.2], dtype=dtype)
fill_values = Series([2, 2], dtype=fill_type)
result = ser.fillna(fill_values)
expected = Series([2.0, 1.2], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_fillna_f32_upcast_with_dict(self):
# GH-43424
ser = Series([np.nan, 1.2], dtype=np.float32)
result = ser.fillna({0: 1})
expected = Series([1.0, 1.2], dtype=np.float32)
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------
# Invalid Usages
def test_fillna_invalid_method(self, datetime_series):
try:
datetime_series.fillna(method="ffil")
except ValueError as inst:
assert "ffil" in str(inst)
def test_fillna_listlike_invalid(self):
ser = Series(np.random.randint(-100, 100, 50))
msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
with pytest.raises(TypeError, match=msg):
ser.fillna([1, 2])
msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'
with pytest.raises(TypeError, match=msg):
ser.fillna((1, 2))
def test_fillna_method_and_limit_invalid(self):
# related GH#9217, make sure limit is an int and greater than 0
ser = Series([1, 2, 3, None])
msg = "|".join(
[
r"Cannot specify both 'value' and 'method'\.",
"Limit must be greater than 0",
"Limit must be an integer",
]
)
for limit in [-1, 0, 1.0, 2.0]:
for method in ["backfill", "bfill", "pad", "ffill", None]:
with pytest.raises(ValueError, match=msg):
ser.fillna(1, limit=limit, method=method)
def test_fillna_datetime64_with_timezone_tzinfo(self):
# https://github.com/pandas-dev/pandas/issues/38851
# different tzinfos representing UTC treated as equal
ser = Series(date_range("2020", periods=3, tz="UTC"))
expected = ser.copy()
ser[1] = NaT
result = ser.fillna(datetime(2020, 1, 2, tzinfo=timezone.utc))
tm.assert_series_equal(result, expected)
# but we dont (yet) consider distinct tzinfos for non-UTC tz equivalent
ts = Timestamp("2000-01-01", tz="US/Pacific")
ser2 = Series(ser._values.tz_convert("dateutil/US/Pacific"))
assert ser2.dtype.kind == "M"
with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
result = ser2.fillna(ts)
expected = Series([ser[0], ts, ser[2]], dtype=object)
# TODO(2.0): once deprecation is enforced
# expected = Series(
# [ser2[0], ts.tz_convert(ser2.dtype.tz), ser2[2]],
# dtype=ser2.dtype,
# )
tm.assert_series_equal(result, expected)
def test_fillna_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
srs = Series([1, 2, 3, np.nan], dtype=float)
msg = (
r"In a future version of pandas all arguments of Series.fillna "
r"except for the argument 'value' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = srs.fillna(0, None, None)
expected = Series([1, 2, 3, 0], dtype=float)
tm.assert_series_equal(result, expected)
class TestFillnaPad:
def test_fillna_bug(self):
ser = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"])
filled = ser.fillna(method="ffill")
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0], ser.index)
tm.assert_series_equal(filled, expected)
filled = ser.fillna(method="bfill")
expected = Series([1.0, 1.0, 3.0, 3.0, np.nan], ser.index)
tm.assert_series_equal(filled, expected)
def test_ffill(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
ts[2] = np.NaN
tm.assert_series_equal(ts.ffill(), ts.fillna(method="ffill"))
def test_ffill_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
ser = Series([1, 2, 3])
msg = (
r"In a future version of pandas all arguments of Series.ffill "
r"will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.ffill(0)
expected = Series([1, 2, 3])
tm.assert_series_equal(result, expected)
def test_ffill_mixed_dtypes_without_missing_data(self):
# GH#14956
series = Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
result = series.ffill()
tm.assert_series_equal(series, result)
def test_bfill(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
ts[2] = np.NaN
tm.assert_series_equal(ts.bfill(), ts.fillna(method="bfill"))
def test_bfill_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
ser = Series([1, 2, 3])
msg = (
r"In a future version of pandas all arguments of Series.bfill "
r"will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.bfill(0)
expected =
|
Series([1, 2, 3])
|
pandas.Series
|
# You can write code above the if-main block.
import pandas as pd
from statsmodels.tsa.arima.model import ARIMA
from sklearn.metrics import mean_squared_error
import argparse
from matplotlib import pyplot as plt
import numpy as np
if __name__ == "__main__":
# You should not modify this part.
parser = argparse.ArgumentParser()
parser.add_argument("--training", default="training_data.csv", help="input training data file name")
parser.add_argument("--testing", default="testing_data.csv", help="input testing data file name")
parser.add_argument("--output", default="output.csv", help="output file name")
args = parser.parse_args()
# The following part is an example.
# You can modify it at will.
#training_data = load_data(args.training)
training_data=
|
pd.read_csv(args.training,squeeze=True)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
To derive the MAG network and diffusion cascades we employed the tables Paper, Paper References, Author, PaperAuthorAffiliation, Fields of Study, Paper Fields of Study from MAG
https://docs.microsoft.com/en-us/academic-services/graph/reference-data-schema
Extract network and diffusion cascades from CS of MAG
"""
import pandas as pd
import os
import numpy as np
import networkx as nx
def clean_authors_by_name():
#---- Clean authors
auth = pd.read_csv("authors.txt")
auth = auth.ix[auth.columns[0,2]]
# the unique display names are 83209107 and the normalized are 60m
idx = auth.iloc[:,1].apply(lambda x:x[0]<"a" or x[0]>"z")
auth=auth[~idx]
auth.to_csv("authors.txt",index=False)
def prepare_fields():
#---- Keep the CS papers that have high confidence
f = pd.read_csv("fields.txt",sep="\t",header=None)
cs_fields = f.loc[f[4].str.contains('computer')==True,0].values
f1 = open("paper_fields.txt","r")
f2 = open("paper_fields_filtered.txt","w")
i = 0
for l in f1:
i+=1
if(i%1000000==0):
print(i)
parts = l.split("\t")
#-- check if the confidence is enough
try:
ty = int(parts[1])
conf= float(parts[2])
except:
next
if(conf>0.5):
if(ty in cs_fields):
f2.write("cs"+","+parts[0]+"\n")
f1.close()
def extract_network():
pap_auth = pd.read_csv("paper_author.txt")
pap_auth = pap_auth.drop(pap_auth.columns[0],axis=1)
pap_auth.to_csv("paper_author.txt",index=False)
fields =
|
pd.read_csv("paper_fields_filtered.txt")
|
pandas.read_csv
|
"""
End to end training of my neural network model.
The training routine has three key phases
- Evaluation through MCTS
- Data generation through MCTS
- Neural network training
"""
import numpy as np
from collections import defaultdict, deque, Counter, namedtuple
import itertools
import warnings
import os, psutil # useful for memory management
from datetime import datetime
from mcts_nn_cube import State, MCTSAgent
import objgraph
import pympler
from pympler import muppy
from pympler import summary
from pympler import tracker
tr = tracker.SummaryTracker()
from pprint import pprint
import gc
# this keeps track of the training runs, including the older versions that we are extending
VERSIONS = ["v0.9.3.memory_leak"]
# memory management
MY_PROCESS = psutil.Process(os.getpid())
def memory_used():
return MY_PROCESS.memory_info().rss
def str_between(s, start, end):
return (s.split(start))[1].split(end)[0]
class GameAgent():
def __init__(self, game_id):
self.game_id = game_id
self.self_play_stats=defaultdict(list)
self.game_stats=defaultdict(list)
self.data_states = []
self.data_policies = []
self.data_values = []
self.counter=0
self.done=False
self.win=False
# can attach other attributes as needed
class BatchGameAgent():
"""
Handles the steps of the games, including batch games.
"""
def __init__(self, model, max_steps, max_depth, max_game_length, transposition_table, decay, exploration):
self.game_agents = deque()
self.model = model
self.max_depth = max_depth
self.max_steps = max_steps
self.max_game_length = max_game_length
self.transposition_table = transposition_table
self.exploration = exploration
self.decay = decay
def is_empty(self):
return not bool(self.game_agents)
def append_states(self, state_info_iter):
for game_id, state, distance, distance_level in state_info_iter:
mcts = MCTSAgent(self.model.function,
state.copy(),
max_depth = self.max_depth,
transposition_table = self.transposition_table.copy(),
c_puct = self.exploration,
gamma = self.decay)
game_agent = GameAgent(game_id)
game_agent.mcts = mcts
game_agent.distance = distance
game_agent.distance_level = distance_level
self.game_agents.append(game_agent)
def run_game_agent_one_step(self, game_agent):
mcts = game_agent.mcts
mcts.search(steps=self.max_steps)
# reduce the max batch size to prevent the worker from blocking
self.model.set_max_batch_size(self.model.get_max_batch_size() - 1)
def process_completed_step(self, game_agent):
mcts = game_agent.mcts
# find next state
probs = mcts.action_probabilities(inv_temp = 10)
action = np.argmax(probs)
#action = np.random.choice(12, p=probs)
shortest_path = game_agent.mcts.stats('shortest_path')
# record stats
game_agent.self_play_stats['_game_id'].append(game_agent.game_id)
game_agent.self_play_stats['_step_id'].append(game_agent.counter)
game_agent.self_play_stats['shortest_path'].append(shortest_path)
game_agent.self_play_stats['action'].append(action)
game_agent.self_play_stats['value'].append(mcts.stats('value'))
game_agent.self_play_stats['prior'].append(mcts.stats('prior'))
game_agent.self_play_stats['prior_dirichlet'].append(mcts.stats('prior_dirichlet'))
game_agent.self_play_stats['visit_counts'].append(mcts.stats('visit_counts'))
game_agent.self_play_stats['total_action_values'].append(mcts.stats('total_action_values'))
# training data (also recorded in stats)
game_agent.data_states.append(mcts.initial_node.state.input_array_no_history())
policy = mcts.action_probabilities(inv_temp = 10)
game_agent.data_policies.append(policy)
game_agent.self_play_stats['updated_policy'].append(policy)
game_agent.data_values.append(0) # updated if game is success
game_agent.self_play_stats['updated_value'].append(0)
# prepare for next state
game_agent.counter += 1
#if shortest_path < 0:
# print("(DB) no path")
if (game_agent.counter > 1 and shortest_path < 0) or game_agent.counter >= self.max_game_length:
game_agent.win = False
game_agent.done = True
else:
mcts.advance_to_action(action)
if mcts.is_terminal():
game_agent.win = True
game_agent.done = True
def run_one_step_with_threading(self):
import threading
# start threads
self.model.set_max_batch_size(len(self.game_agents))
threads = []
for game_agent in self.game_agents:
t = threading.Thread(target=self.run_game_agent_one_step, args=(game_agent, ))
t.start()
threads.append(t)
# wait for threads to finish
for t in threads:
t.join()
for game_agent in self.game_agents:
self.process_completed_step(game_agent)
def run_one_step(self):
for game_agent in self.game_agents:
mcts = game_agent.mcts
mcts.search(steps=self.max_steps)
self.process_completed_step(game_agent)
def finished_game_results(self):
for _ in range(len(self.game_agents)):
game_agent = self.game_agents.popleft()
if not game_agent.done:
self.game_agents.append(game_agent)
else:
if game_agent.win:
value = 1
for i in range(game_agent.counter):
value *= self.decay
game_agent.data_values[-(i+1)] = value
game_agent.self_play_stats['updated_value'][-(i+1)] = value
# record game stats
game_agent.game_stats['_game_id'].append(game_agent.game_id)
game_agent.game_stats['distance_level'].append(game_agent.distance_level)
game_agent.game_stats['training_distance'].append(game_agent.distance)
game_agent.game_stats['max_game_length'].append(self.max_game_length)
game_agent.game_stats['win'].append(game_agent.win)
game_agent.game_stats['total_steps'].append(game_agent.counter if game_agent.win else -1)
yield game_agent
class TrainingAgent():
"""
This agent handles all the details of the training.
"""
def __init__(self):
import models
# Threading
self.multithreaded = True
# Model (NN) parameters (fixed)
self.prev_state_history = 8 # the number of previous states (including the current one) used as input to the model
self.checkpoint_model = models.ConvModel2D3D(history=self.prev_state_history) # this doesn't build and/or load the model yet
self.best_model = models.ConvModel2D3D(history=self.prev_state_history) # this doesn't build and/or load the model yet
if self.multithreaded:
self.checkpoint_model.multithreaded = True
self.best_model.multithreaded = True
self.learning_rate = .001
# MCTS parameters (fixed)
self.max_depth = 900
self.max_steps = 1600
self.use_prebuilt_transposition_table = False
self.decay = 0.95 # gamma
self.exploration = 1. #c_puct
self.prebuilt_transposition_table = None # built later
# Validation flags
self.validate_training_data = True
# Training parameters (fixed)
self.batch_size = 32
self.games_per_generation = 4#512
self.starting_distance = 1
self.min_distance = 1
self.win_rate_target = .5
self.max_game_length = 100
self.prev_generations_used_for_training = 8
self.training_sample_ratio = 1/self.prev_generations_used_for_training
self.games_per_evaluation = 2#128
# Training parameters preserved between generations
self.training_distance_level = float(self.starting_distance)
self.recent_wins = Counter()
self.recent_games = Counter()
self.checkpoint_training_distance_level = float(self.starting_distance)
self.checkpoint_recent_wins = Counter()
self.checkpoint_recent_games = Counter()
# Training parameters (dynamic)
self.game_number = 0
self.self_play_start = None # date and time (utc)
self.self_play_end = None
self.training_start = None
self.training_end = None
# Evaluation parameters (dynamic)
self.generation = 0
self.best_generation = 0
# Self play stats
# These are functionally data tables implemented as a dictionary of lists
# The keys are the column names. This makes it easy to change the stats I am recording.
self.self_play_stats = defaultdict(list)
self.game_stats = defaultdict(list)
self.training_stats = defaultdict(list)
self.generation_stats = defaultdict(list)
# Training data
self.training_data_states = []
self.training_data_policies = []
self.training_data_values = []
def build_models(self):
"""
Builds both checkpoint and best model
May be overwritten later by loaded weights
"""
self.checkpoint_model.build()
self.best_model.build()
def load_transposition_table(self):
#TODO: Add this. For now, just use empty table.
warnings.warn("load_transposition_table is not properly implemented", stacklevel=2)
self.prebuilt_transposition_table = {}
def load_models(self):
"""
Finds the checkpoint model and the best model in the given naming scheme
and loads those
"""
import os
# load checkpoint model
for version in VERSIONS:
model_files = [f for f in os.listdir('./save/')
if f.startswith("checkpoint_model_{}_gen".format(version))
and f.endswith(".h5")]
if model_files:
# choose newest generation
model_file = max(model_files,
key=lambda f: str_between(f, "_gen", ".h5"))
path = "./save/" + model_file
print("checkpoint model found:", "'" + path + "'")
print("loading model ...")
self.checkpoint_model.load_from_file(path)
self.generation = int(str_between(path, "_gen", ".h5"))
break
else:
print("no checkpoint model found with version {}".format(version))
print("generation set to", self.generation)
# load best model
for version in VERSIONS:
model_files = [f for f in os.listdir('./save/')
if f.startswith("model_{}_gen".format(version))
and f.endswith(".h5")]
if model_files:
# choose newest generation
model_file = max(model_files,
key=lambda f: (str_between(f, "_gen", ".h5")))
path = "./save/" + model_file
print("best model found:", "'" + path + "'")
print("loading model ...")
self.best_model.load_from_file(path)
self.best_generation = int(str_between(path, "_gen", ".h5"))
break
else:
print("no best model found with version {}".format(version))
print("best generation:", self.best_generation)
def save_checkpoint_model(self):
file_name = "checkpoint_model_{}_gen{:03}.h5".format(VERSIONS[0], self.generation)
path = "./save/" + file_name
self.checkpoint_model.save_to_file(path)
print("saved model checkpoint:", "'" + path + "'")
self.checkpoint_training_distance_level = self.training_distance_level
self.checkpoint_recent_wins = Counter()
self.checkpoint_recent_games = Counter()
# add a few free wins to speed up the convergence
for dist in range(int(self.training_distance_level) + 1):
self.checkpoint_recent_games[dist] += 1
self.checkpoint_recent_wins[dist] += 1
def save_and_set_best_model(self):
file_name = "model_{}_gen{:03}.h5".format(VERSIONS[0], self.generation)
path = "./save/" + file_name
self.checkpoint_model.save_to_file(path)
print("saved model:", "'" + path + "'")
self.best_model.load_from_file(path)
self.best_generation = self.generation
self.training_distance_level = self.checkpoint_training_distance_level
self.recent_wins = self.checkpoint_recent_wins
self.recent_games = self.checkpoint_recent_games
def train_model(self):
import os
import h5py
inputs_list = []
outputs_policy_list = []
outputs_value_list = []
counter = 0
for version in VERSIONS:
if counter > self.prev_generations_used_for_training:
break
data_files = [(str_between(f, "_gen", ".h5"), f)
for f in os.listdir('./save/')
if f.startswith("data_{}_gen".format(version))
and f.endswith(".h5")]
# go through in reverse order
for gen, f in reversed(sorted(data_files)):
if counter > self.prev_generations_used_for_training:
break
path = "./save/" + f
print("loading data:", "'" + path + "'")
with h5py.File(path, 'r') as hf:
inputs_list.append(hf['inputs'][:])
outputs_policy_list.append(hf['outputs_policy'][:])
outputs_value_list.append(hf['outputs_value'][:])
counter += 1
inputs_all = np.concatenate(inputs_list, axis=0)
outputs_policy_all = np.concatenate(outputs_policy_list, axis=0)
outputs_value_all = np.concatenate(outputs_value_list, axis=0)
if self.validate_training_data:
print("validating data...")
self.checkpoint_model.validate_data(inputs_all, outputs_policy_all, outputs_value_all, gamma=self.decay)
self.validate_training_data = False # just validate for first round
print("data valid.")
print("processing...")
inputs_all, outputs_policy_all, outputs_value_all = \
self.checkpoint_model.process_training_data(inputs_all, outputs_policy_all, outputs_value_all, augment=True)
n = len(inputs_all)
sample_size = int((n * self.training_sample_ratio) // 32 + 1) * 32 # roughly self.training_sample_ratio % of samples
sample_idx = np.random.choice(n, size=sample_size)
inputs = inputs_all[sample_idx]
outputs_policy = outputs_policy_all[sample_idx]
outputs_value = outputs_value_all[sample_idx]
print("training...")
self.checkpoint_model.train_on_data([inputs, outputs_policy, outputs_value])
def reset_self_play(self):
# Training parameters (dynamic)
self.game_number = 0
self.self_play_start = None # date and time (utc)
self.self_play_end = None
self.training_start = None
self.training_end = None
# Self play stats
self.self_play_stats = defaultdict(list)
self.game_stats = defaultdict(list)
self.generation_stats = defaultdict(list)
# Training data (one item per game based on randomly chosen game state)
self.training_data_states = []
self.training_data_policies = []
self.training_data_values = []
# set start time
self.self_play_start = datetime.utcnow() # date and time (utc)
def save_training_stats(self):
import pandas as pd
file_name = "stats_{}_gen{:03}.h5".format(VERSIONS[0], self.generation)
path = "./save/" + file_name
# record time of end of self-play
self.self_play_end = datetime.utcnow()
# save generation_stats data
self.generation_stats['_generation'].append(self.generation)
self.generation_stats['best_model_generation'].append(self.best_generation)
self.generation_stats['distance_level'].append(self.training_distance_level)
self.generation_stats['memory_usage'].append(memory_used())
self.generation_stats['version_history'].append(",".join(VERSIONS))
self.generation_stats['self_play_start_datetime_utc'].append(str(self.self_play_start))
self.generation_stats['self_play_end_datetime_utc'].append(str(self.self_play_end))
self.generation_stats['self_play_time_sec'].append((self.self_play_end - self.self_play_start).total_seconds())
generation_stats_df = pd.DataFrame(data=self.generation_stats)
generation_stats_df.to_hdf(path, 'generation_stats', mode='a', format='fixed') #use mode='a' to avoid overwriting
# save game_stats data
game_stats_df =
|
pd.DataFrame(data=self.game_stats)
|
pandas.DataFrame
|
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
from itertools import product
import unittest
import pandas as pd
import numpy as np
import pyspark
from databricks import koalas as ks
from databricks.koalas.config import set_option, reset_option
from databricks.koalas.frame import DataFrame
from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils
from databricks.koalas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
class OpsOnDiffFramesEnabledTest(ReusedSQLTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", True)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
)
@property
def pdf2(self):
return pd.DataFrame(
{"a": [9, 8, 7, 6, 5, 4, 3, 2, 1], "b": [0, 0, 0, 4, 5, 6, 1, 2, 3]},
index=list(range(9)),
)
@property
def pdf3(self):
return pd.DataFrame(
{"b": [1, 1, 1, 1, 1, 1, 1, 1, 1], "c": [1, 1, 1, 1, 1, 1, 1, 1, 1]},
index=list(range(9)),
)
@property
def pdf4(self):
return pd.DataFrame(
{"e": [2, 2, 2, 2, 2, 2, 2, 2, 2], "f": [2, 2, 2, 2, 2, 2, 2, 2, 2]},
index=list(range(9)),
)
@property
def pdf5(self):
return pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"b": [4, 5, 6, 3, 2, 1, 0, 0, 0],
"c": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
).set_index(["a", "b"])
@property
def pdf6(self):
return pd.DataFrame(
{
"a": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"b": [0, 0, 0, 4, 5, 6, 1, 2, 3],
"c": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"e": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=list(range(9)),
).set_index(["a", "b"])
@property
def pser1(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx)
@property
def pser2(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
return pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx)
@property
def pser3(self):
midx = pd.MultiIndex(
[["koalas", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [1, 1, 2, 0, 0, 2, 2, 2, 1]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
@property
def kdf1(self):
return ks.from_pandas(self.pdf1)
@property
def kdf2(self):
return ks.from_pandas(self.pdf2)
@property
def kdf3(self):
return ks.from_pandas(self.pdf3)
@property
def kdf4(self):
return ks.from_pandas(self.pdf4)
@property
def kdf5(self):
return ks.from_pandas(self.pdf5)
@property
def kdf6(self):
return ks.from_pandas(self.pdf6)
@property
def kser1(self):
return ks.from_pandas(self.pser1)
@property
def kser2(self):
return ks.from_pandas(self.pser2)
@property
def kser3(self):
return ks.from_pandas(self.pser3)
def test_ranges(self):
self.assert_eq(
(ks.range(10) + ks.range(10)).sort_index(),
(
ks.DataFrame({"id": list(range(10))}) + ks.DataFrame({"id": list(range(10))})
).sort_index(),
)
def test_no_matched_index(self):
with self.assertRaisesRegex(ValueError, "Index names must be exactly matched"):
ks.DataFrame({"a": [1, 2, 3]}).set_index("a") + ks.DataFrame(
{"b": [1, 2, 3]}
).set_index("b")
def test_arithmetic(self):
self._test_arithmetic_frame(self.pdf1, self.pdf2, check_extension=False)
self._test_arithmetic_series(self.pser1, self.pser2, check_extension=False)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_extension_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Int64"), self.pdf2.astype("Int64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_extension_float_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Float64"), self.pdf2.astype("Float64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype("Float64"), self.pser2.astype("Float64"), check_extension=True
)
def _test_arithmetic_frame(self, pdf1, pdf2, *, check_extension):
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for dtype in actual.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq((kdf1.a - kdf2.b).sort_index(), (pdf1.a - pdf2.b).sort_index())
assert_eq((kdf1.a * kdf2.a).sort_index(), (pdf1.a * pdf2.a).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(kdf1["a"] / kdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index()
)
else:
assert_eq((kdf1["a"] / kdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index())
# DataFrame
assert_eq((kdf1 + kdf2).sort_index(), (pdf1 + pdf2).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
kdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
# Series
assert_eq(
(kdf1[("x", "a")] - kdf2[("x", "b")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")]).sort_index(),
)
assert_eq(
(kdf1[("x", "a")] - kdf2["x"]["b"]).sort_index(),
(pdf1[("x", "a")] - pdf2["x"]["b"]).sort_index(),
)
assert_eq(
(kdf1["x"]["a"] - kdf2[("x", "b")]).sort_index(),
(pdf1["x"]["a"] - pdf2[("x", "b")]).sort_index(),
)
# DataFrame
assert_eq((kdf1 + kdf2).sort_index(), (pdf1 + pdf2).sort_index())
def _test_arithmetic_series(self, pser1, pser2, *, check_extension):
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((kser1 + kser2).sort_index(), (pser1 + pser2).sort_index())
assert_eq((kser1 - kser2).sort_index(), (pser1 - pser2).sort_index())
assert_eq((kser1 * kser2).sort_index(), (pser1 * pser2).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq((kser1 / kser2).sort_index(), (pser1 / pser2).sort_index())
else:
assert_eq((kser1 / kser2).sort_index(), (pser1 / pser2).sort_index())
def test_arithmetic_chain(self):
self._test_arithmetic_chain_frame(self.pdf1, self.pdf2, self.pdf3, check_extension=False)
self._test_arithmetic_chain_series(
self.pser1, self.pser2, self.pser3, check_extension=False
)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_chain_extension_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Int64"),
self.pdf2.astype("Int64"),
self.pdf3.astype("Int64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
self.pser3.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_chain_extension_float_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Float64"),
self.pdf2.astype("Float64"),
self.pdf3.astype("Float64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype("Float64"),
self.pser2.astype("Float64"),
self.pser3.astype("Float64"),
check_extension=True,
)
def _test_arithmetic_chain_frame(self, pdf1, pdf2, pdf3, *, check_extension):
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
kdf3 = ks.from_pandas(pdf3)
common_columns = set(kdf1.columns).intersection(kdf2.columns).intersection(kdf3.columns)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for column, dtype in zip(actual.columns, actual.dtypes):
if column in common_columns:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertFalse(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq((kdf1.a - kdf2.b - kdf3.c).sort_index(), (pdf1.a - pdf2.b - pdf3.c).sort_index())
assert_eq(
(kdf1.a * (kdf2.a * kdf3.c)).sort_index(), (pdf1.a * (pdf2.a * pdf3.c)).sort_index()
)
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(kdf1["a"] / kdf2["a"] / kdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
else:
assert_eq(
(kdf1["a"] / kdf2["a"] / kdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
kdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
columns = pd.MultiIndex.from_tuples([("x", "b"), ("y", "c")])
kdf3.columns = columns
pdf3.columns = columns
common_columns = set(kdf1.columns).intersection(kdf2.columns).intersection(kdf3.columns)
# Series
assert_eq(
(kdf1[("x", "a")] - kdf2[("x", "b")] - kdf3[("y", "c")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")] - pdf3[("y", "c")]).sort_index(),
)
assert_eq(
(kdf1[("x", "a")] * (kdf2[("x", "b")] * kdf3[("y", "c")])).sort_index(),
(pdf1[("x", "a")] * (pdf2[("x", "b")] * pdf3[("y", "c")])).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
def _test_arithmetic_chain_series(self, pser1, pser2, pser3, *, check_extension):
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
kser3 = ks.from_pandas(pser3)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((kser1 + kser2 - kser3).sort_index(), (pser1 + pser2 - pser3).sort_index())
assert_eq((kser1 * kser2 * kser3).sort_index(), (pser1 * pser2 * pser3).sort_index())
if check_extension and not extension_float_dtypes_available:
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(
(kser1 - kser2 / kser3).sort_index(), (pser1 - pser2 / pser3).sort_index()
)
else:
expected = pd.Series(
[249.0, np.nan, 0.0, 0.88, np.nan, np.nan, np.nan, np.nan, np.nan, -np.inf]
+ [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex(
[
["cow", "falcon", "koala", "koalas", "lama"],
["length", "power", "speed", "weight"],
],
[
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4],
[0, 1, 2, 2, 3, 0, 0, 1, 2, 3, 0, 0, 3, 3, 0, 2, 3],
],
),
)
self.assert_eq((kser1 - kser2 / kser3).sort_index(), expected)
else:
assert_eq((kser1 - kser2 / kser3).sort_index(), (pser1 - pser2 / pser3).sort_index())
assert_eq((kser1 + kser2 * kser3).sort_index(), (pser1 + pser2 * pser3).sort_index())
def test_mod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
kser = ks.from_pandas(pser)
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
def test_rmod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
kser = ks.from_pandas(pser)
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
def test_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[0, 30, 10, 20, 50],
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1[pdf2.A > -3].sort_index(), kdf1[kdf2.A > -3].sort_index())
self.assert_eq(pdf1.A[pdf2.A > -3].sort_index(), kdf1.A[kdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1)[pdf2.A > -3].sort_index(), (kdf1.A + 1)[kdf2.A > -3].sort_index()
)
def test_loc_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[20, 10, 30, 0, 50],
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.loc[pdf2.A > -3].sort_index(), kdf1.loc[kdf2.A > -3].sort_index())
self.assert_eq(pdf1.A.loc[pdf2.A > -3].sort_index(), kdf1.A.loc[kdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1).loc[pdf2.A > -3].sort_index(), (kdf1.A + 1).loc[kdf2.A > -3].sort_index()
)
def test_bitwise(self):
pser1 = pd.Series([True, False, True, False, np.nan, np.nan, True, False, np.nan])
pser2 = pd.Series([True, False, False, True, True, False, np.nan, np.nan, np.nan])
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (kser1 | kser2).sort_index())
self.assert_eq(pser1 & pser2, (kser1 & kser2).sort_index())
pser1 = pd.Series([True, False, np.nan], index=list("ABC"))
pser2 = pd.Series([False, True, np.nan], index=list("DEF"))
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (kser1 | kser2).sort_index())
self.assert_eq(pser1 & pser2, (kser1 & kser2).sort_index())
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_bitwise_extension_dtype(self):
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=False)
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
pser1 = pd.Series(
[True, False, True, False, np.nan, np.nan, True, False, np.nan], dtype="boolean"
)
pser2 = pd.Series(
[True, False, False, True, True, False, np.nan, np.nan, np.nan], dtype="boolean"
)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
assert_eq((kser1 | kser2).sort_index(), pser1 | pser2)
assert_eq((kser1 & kser2).sort_index(), pser1 & pser2)
pser1 = pd.Series([True, False, np.nan], index=list("ABC"), dtype="boolean")
pser2 = pd.Series([False, True, np.nan], index=list("DEF"), dtype="boolean")
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
# a pandas bug?
# assert_eq((kser1 | kser2).sort_index(), pser1 | pser2)
# assert_eq((kser1 & kser2).sort_index(), pser1 & pser2)
assert_eq(
(kser1 | kser2).sort_index(),
pd.Series([True, None, None, None, True, None], index=list("ABCDEF"), dtype="boolean"),
)
assert_eq(
(kser1 & kser2).sort_index(),
pd.Series(
[None, False, None, False, None, None], index=list("ABCDEF"), dtype="boolean"
),
)
def test_concat_column_axis(self):
pdf1 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf1.columns.names = ["AB"]
pdf2 = pd.DataFrame({"C": [1, 2, 3], "D": [4, 5, 6]}, index=[1, 3, 5])
pdf2.columns.names = ["CD"]
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
kdf3 = kdf1.copy()
kdf4 = kdf2.copy()
pdf3 = pdf1.copy()
pdf4 = pdf2.copy()
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")], names=["X", "AB"])
pdf3.columns = columns
kdf3.columns = columns
columns = pd.MultiIndex.from_tuples([("X", "C"), ("X", "D")], names=["Y", "CD"])
pdf4.columns = columns
kdf4.columns = columns
pdf5 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf6 = pd.DataFrame({"C": [1, 2, 3]}, index=[1, 3, 5])
kdf5 = ks.from_pandas(pdf5)
kdf6 = ks.from_pandas(pdf6)
ignore_indexes = [True, False]
joins = ["inner", "outer"]
objs = [
([kdf1.A, kdf2.C], [pdf1.A, pdf2.C]),
# TODO: ([kdf1, kdf2.C], [pdf1, pdf2.C]),
([kdf1.A, kdf2], [pdf1.A, pdf2]),
([kdf1.A, kdf2.C], [pdf1.A, pdf2.C]),
([kdf3[("X", "A")], kdf4[("X", "C")]], [pdf3[("X", "A")], pdf4[("X", "C")]]),
([kdf3, kdf4[("X", "C")]], [pdf3, pdf4[("X", "C")]]),
([kdf3[("X", "A")], kdf4], [pdf3[("X", "A")], pdf4]),
([kdf3, kdf4], [pdf3, pdf4]),
([kdf5, kdf6], [pdf5, pdf6]),
([kdf6, kdf5], [pdf6, pdf5]),
]
for ignore_index, join in product(ignore_indexes, joins):
for i, (kdfs, pdfs) in enumerate(objs):
with self.subTest(ignore_index=ignore_index, join=join, pdfs=pdfs, pair=i):
actual = ks.concat(kdfs, axis=1, ignore_index=ignore_index, join=join)
expected = pd.concat(pdfs, axis=1, ignore_index=ignore_index, join=join)
self.assert_eq(
repr(actual.sort_values(list(actual.columns)).reset_index(drop=True)),
repr(expected.sort_values(list(expected.columns)).reset_index(drop=True)),
)
def test_combine_first(self):
pser1 = pd.Series({"falcon": 330.0, "eagle": 160.0})
pser2 = pd.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0})
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
with self.assertRaisesRegex(
ValueError, "`combine_first` only allows `Series` for parameter `other`"
):
kser1.combine_first(50)
kser1.name = ("X", "A")
kser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# MultiIndex
midx1 = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
midx2 = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser1 = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx1)
pser2 = pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx2)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# Series come from same DataFrame
pdf = pd.DataFrame(
{
"A": {"falcon": 330.0, "eagle": 160.0},
"B": {"falcon": 345.0, "eagle": 200.0, "duck": 30.0},
}
)
pser1 = pdf.A
pser2 = pdf.B
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
kser1.name = ("X", "A")
kser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
kdf = ks.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
kser = ks.from_pandas(pser)
kdf.insert(1, "y", kser)
pdf.insert(1, "y", pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
#
# DataFrame with Index different from inserting Series'
#
pdf = pd.DataFrame([1, 2, 3], index=[10, 20, 30])
kdf = ks.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
kser = ks.from_pandas(pser)
kdf.insert(1, "y", kser)
pdf.insert(1, "y", pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
#
# DataFrame with Multi-index columns
#
pdf = pd.DataFrame({("x", "a"): [1, 2, 3]})
kdf = ks.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
kser = ks.from_pandas(pser)
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
kdf = ks.from_pandas(pdf)
kdf.insert(0, "a", kser)
pdf.insert(0, "a", pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf.insert(0, ("b", "c", ""), kser)
pdf.insert(0, ("b", "c", ""), pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_compare(self):
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
pser1 = pd.Series(["b", "c", np.nan, "g", np.nan])
pser2 = pd.Series(["a", "c", np.nan, np.nan, "h"])
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(), kser1.compare(kser2).sort_index(),
)
# `keep_shape=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
pser1.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
pser2.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(), kser1.compare(kser2).sort_index(),
)
# `keep_shape=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
else:
kser1 = ks.Series(["b", "c", np.nan, "g", np.nan])
kser2 = ks.Series(["a", "c", np.nan, np.nan, "h"])
expected = ks.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(expected, kser1.compare(kser2).sort_index())
# `keep_shape=True`
expected = ks.DataFrame(
[["b", "a"], [None, None], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(
expected, kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["c", "c"], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
kser1 = ks.Series(
["b", "c", np.nan, "g", np.nan],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
kser2 = ks.Series(
["a", "c", np.nan, np.nan, "h"],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
expected = ks.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(expected, kser1.compare(kser2).sort_index())
# `keep_shape=True`
expected = ks.DataFrame(
[["b", "a"], [None, None], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["c", "c"], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# Different Index
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
kser1 = ks.Series([1, 2, 3, 4, 5], index=pd.Index([1, 2, 3, 4, 5]),)
kser2 = ks.Series([2, 2, 3, 4, 1], index=pd.Index([5, 4, 3, 2, 1]),)
kser1.compare(kser2)
# Different MultiIndex
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
kser1 = ks.Series(
[1, 2, 3, 4, 5],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
kser2 = ks.Series(
[2, 2, 3, 4, 1],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "a"), ("x", "k"), ("q", "l")]
),
)
kser1.compare(kser2)
def test_different_columns(self):
kdf1 = self.kdf1
kdf4 = self.kdf4
pdf1 = self.pdf1
pdf4 = self.pdf4
self.assert_eq((kdf1 + kdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
pdf1.columns = columns
columns = pd.MultiIndex.from_tuples([("z", "e"), ("z", "f")])
kdf4.columns = columns
pdf4.columns = columns
self.assert_eq((kdf1 + kdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
def test_assignment_series(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf["a"] = self.kdf2.a
pdf["a"] = self.pdf2.a
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf["a"] = self.kdf2.b
pdf["a"] = self.pdf2.b
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf["c"] = self.kdf2.a
pdf["c"] = self.pdf2.a
self.assert_eq(kdf.sort_index(), pdf.sort_index())
# Multi-index columns
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf.columns = columns
pdf.columns = columns
kdf[("y", "c")] = self.kdf2.a
pdf[("y", "c")] = self.pdf2.a
self.assert_eq(kdf.sort_index(), pdf.sort_index())
pdf = pd.DataFrame({"a": [1, 2, 3], "Koalas": [0, 1, 2]}).set_index("Koalas", drop=False)
kdf = ks.from_pandas(pdf)
kdf.index.name = None
kdf["NEW"] = ks.Series([100, 200, 300])
pdf.index.name = None
pdf["NEW"] = pd.Series([100, 200, 300])
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_assignment_frame(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf[["a", "b"]] = self.kdf1
pdf[["a", "b"]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
# 'c' does not exist in `kdf`.
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf[["b", "c"]] = self.kdf1
pdf[["b", "c"]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
# 'c' and 'd' do not exist in `kdf`.
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf[["c", "d"]] = self.kdf1
pdf[["c", "d"]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf.columns = columns
pdf.columns = columns
kdf[[("y", "c"), ("z", "d")]] = self.kdf1
pdf[[("y", "c"), ("z", "d")]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf1 = ks.from_pandas(self.pdf1)
pdf1 = self.pdf1
kdf1.columns = columns
pdf1.columns = columns
kdf[["c", "d"]] = kdf1
pdf[["c", "d"]] = pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_assignment_series_chain(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf["a"] = self.kdf1.a
pdf["a"] = self.pdf1.a
kdf["a"] = self.kdf2.b
pdf["a"] = self.pdf2.b
kdf["d"] = self.kdf3.c
pdf["d"] = self.pdf3.c
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_assignment_frame_chain(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf[["a", "b"]] = self.kdf1
pdf[["a", "b"]] = self.pdf1
kdf[["e", "f"]] = self.kdf3
pdf[["e", "f"]] = self.pdf3
kdf[["b", "c"]] = self.kdf2
pdf[["b", "c"]] = self.pdf2
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_multi_index_arithmetic(self):
kdf5 = self.kdf5
kdf6 = self.kdf6
pdf5 = self.pdf5
pdf6 = self.pdf6
# Series
self.assert_eq((kdf5.c - kdf6.e).sort_index(), (pdf5.c - pdf6.e).sort_index())
self.assert_eq((kdf5["c"] / kdf6["e"]).sort_index(), (pdf5["c"] / pdf6["e"]).sort_index())
# DataFrame
self.assert_eq((kdf5 + kdf6).sort_index(), (pdf5 + pdf6).sort_index(), almost=True)
def test_multi_index_assignment_series(self):
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf["x"] = self.kdf6.e
pdf["x"] = self.pdf6.e
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf["e"] = self.kdf6.e
pdf["e"] = self.pdf6.e
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf["c"] = self.kdf6.e
pdf["c"] = self.pdf6.e
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_multi_index_assignment_frame(self):
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf[["c"]] = self.kdf5
pdf[["c"]] = self.pdf5
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf[["x"]] = self.kdf5
pdf[["x"]] = self.pdf5
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf6)
pdf = self.pdf6
kdf[["x", "y"]] = self.kdf6
pdf[["x", "y"]] = self.pdf6
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_frame_loc_setitem(self):
pdf_orig = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
kdf_orig = ks.DataFrame(pdf_orig)
pdf = pdf_orig.copy()
kdf = kdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
another_kdf = ks.DataFrame(pdf_orig)
kdf.loc[["viper", "sidewinder"], ["shield"]] = -another_kdf.max_speed
pdf.loc[["viper", "sidewinder"], ["shield"]] = -pdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf = pdf_orig.copy()
kdf = kdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
kdf.loc[another_kdf.max_speed < 5, ["shield"]] = -kdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf = pdf_orig.copy()
kdf = kdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
kdf.loc[another_kdf.max_speed < 5, ["shield"]] = -another_kdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
def test_frame_iloc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
kdf = ks.DataFrame(pdf)
another_kdf = ks.DataFrame(pdf)
kdf.iloc[[0, 1, 2], 1] = -another_kdf.max_speed
pdf.iloc[[0, 1, 2], 1] = -pdf.max_speed
self.assert_eq(kdf, pdf)
# TODO: matching the behavior with pandas 1.2 and uncomment below test
# with self.assertRaisesRegex(
# ValueError,
# "shape mismatch: value array of shape (3,) could not be broadcast to indexing "
# "result of shape (2,1)",
# ):
# kdf.iloc[[1, 2], [1]] = -another_kdf.max_speed
kdf.iloc[[0, 1, 2], 1] = 10 * another_kdf.max_speed
pdf.iloc[[0, 1, 2], 1] = 10 * pdf.max_speed
self.assert_eq(kdf, pdf)
# TODO: matching the behavior with pandas 1.2 and uncomment below test
# with self.assertRaisesRegex(
# ValueError,
# "shape mismatch: value array of shape (3,) could not be broadcast to indexing "
# "result of shape (1,)",
# ):
# kdf.iloc[[0], 1] = 10 * another_kdf.max_speed
def test_series_loc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
kser_another = ks.from_pandas(pser_another)
kser.loc[kser % 2 == 1] = -kser_another
pser.loc[pser % 2 == 1] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = -kser
pser.loc[pser_another % 2 == 1] = -pser
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = -kser
pser.loc[pser_another % 2 == 1] = -pser
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = -kser_another
pser.loc[pser_another % 2 == 1] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[["viper", "sidewinder"]] = -kser_another
pser.loc[["viper", "sidewinder"]] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = 10
pser.loc[pser_another % 2 == 1] = 10
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
def test_series_iloc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
pser1 = pser + 1
kser1 = kser + 1
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
kser_another = ks.from_pandas(pser_another)
kser.iloc[[0, 1, 2]] = -kser_another
pser.iloc[[0, 1, 2]] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# TODO: matching the behavior with pandas 1.2 and uncomment below test.
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kser.iloc[[1, 2]] = -kser_another
kser.iloc[[0, 1, 2]] = 10 * kser_another
pser.iloc[[0, 1, 2]] = 10 * pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kser.iloc[[0]] = 10 * kser_another
kser1.iloc[[0, 1, 2]] = -kser_another
pser1.iloc[[0, 1, 2]] = -pser_another
self.assert_eq(kser1, pser1)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kser1.iloc[[1, 2]] = -kser_another
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
piloc = pser.iloc
kiloc = kser.iloc
kiloc[[0, 1, 2]] = -kser_another
piloc[[0, 1, 2]] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# TODO: matching the behavior with pandas 1.2 and uncomment below test.
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kiloc[[1, 2]] = -kser_another
kiloc[[0, 1, 2]] = 10 * kser_another
piloc[[0, 1, 2]] = 10 * pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kiloc[[0]] = 10 * kser_another
def test_update(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [10, 20, 30]})
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
pser.update(pd.Series([4, 5, 6]))
kser.update(ks.Series([4, 5, 6]))
self.assert_eq(kser.sort_index(), pser.sort_index())
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_where(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 > 100), kdf1.where(kdf2 > 100).sort_index())
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 < -250), kdf1.where(kdf2 < -250).sort_index())
# multi-index columns
pdf1 = pd.DataFrame({("X", "A"): [0, 1, 2, 3, 4], ("X", "B"): [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame(
{("X", "A"): [0, -1, -2, -3, -4], ("X", "B"): [-100, -200, -300, -400, -500]}
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 > 100), kdf1.where(kdf2 > 100).sort_index())
def test_mask(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 < 100), kdf1.mask(kdf2 < 100).sort_index())
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 > -250), kdf1.mask(kdf2 > -250).sort_index())
# multi-index columns
pdf1 = pd.DataFrame({("X", "A"): [0, 1, 2, 3, 4], ("X", "B"): [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame(
{("X", "A"): [0, -1, -2, -3, -4], ("X", "B"): [-100, -200, -300, -400, -500]}
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 < 100), kdf1.mask(kdf2 < 100).sort_index())
def test_multi_index_column_assignment_frame(self):
pdf = pd.DataFrame({"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0]})
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y")])
kdf = ks.DataFrame(pdf)
kdf["c"] = ks.Series([10, 20, 30, 20])
pdf["c"] = pd.Series([10, 20, 30, 20])
kdf[("d", "x")] = ks.Series([100, 200, 300, 200], name="1")
pdf[("d", "x")] = pd.Series([100, 200, 300, 200], name="1")
kdf[("d", "y")] = ks.Series([1000, 2000, 3000, 2000], name=("1", "2"))
pdf[("d", "y")] = pd.Series([1000, 2000, 3000, 2000], name=("1", "2"))
kdf["e"] = ks.Series([10000, 20000, 30000, 20000], name=("1", "2", "3"))
pdf["e"] = pd.Series([10000, 20000, 30000, 20000], name=("1", "2", "3"))
kdf[[("f", "x"), ("f", "y")]] = ks.DataFrame(
{"1": [100000, 200000, 300000, 200000], "2": [1000000, 2000000, 3000000, 2000000]}
)
pdf[[("f", "x"), ("f", "y")]] = pd.DataFrame(
{"1": [100000, 200000, 300000, 200000], "2": [1000000, 2000000, 3000000, 2000000]}
)
self.assert_eq(repr(kdf.sort_index()), repr(pdf))
with self.assertRaisesRegex(KeyError, "Key length \\(3\\) exceeds index depth \\(2\\)"):
kdf[("1", "2", "3")] = ks.Series([100, 200, 300, 200])
def test_series_dot(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
kser = ks.from_pandas(pser)
pser_other = pd.Series([90, 91, 85], index=[2, 4, 1])
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.dot(kser_other), pser.dot(pser_other))
kser_other = ks.Series([90, 91, 85], index=[1, 2, 4])
pser_other = pd.Series([90, 91, 85], index=[1, 2, 4])
self.assert_eq(kser.dot(kser_other), pser.dot(pser_other))
# length of index is different
kser_other = ks.Series([90, 91, 85, 100], index=[2, 4, 1, 0])
with self.assertRaisesRegex(ValueError, "matrices are not aligned"):
kser.dot(kser_other)
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
kser = ks.from_pandas(pser)
pser_other = pd.Series([-450, 20, 12, -30, -250, 15, -320, 100, 3], index=midx)
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.dot(kser_other), pser.dot(pser_other))
pser = pd.Series([0, 1, 2, 3])
kser = ks.from_pandas(pser)
# DataFrame "other" without Index/MultiIndex as columns
pdf = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
# DataFrame "other" with Index as columns
pdf.columns = pd.Index(["x", "y"])
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
pdf.columns = pd.Index(["x", "y"], name="cols_name")
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
pdf = pdf.reindex([1, 0, 2, 3])
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
# DataFrame "other" with MultiIndex as columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y")])
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
pdf.columns = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y")], names=["cols_name1", "cols_name2"]
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
kser = ks.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}).b
pser = kser.to_pandas()
kdf = ks.DataFrame({"c": [7, 8, 9]})
pdf = kdf.to_pandas()
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
def test_frame_dot(self):
pdf = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
kdf = ks.from_pandas(pdf)
pser = pd.Series([1, 1, 2, 1])
kser = ks.from_pandas(pser)
self.assert_eq(kdf.dot(kser), pdf.dot(pser))
# Index reorder
pser = pser.reindex([1, 0, 2, 3])
kser = ks.from_pandas(pser)
self.assert_eq(kdf.dot(kser), pdf.dot(pser))
# ser with name
pser.name = "ser"
kser = ks.from_pandas(pser)
self.assert_eq(kdf.dot(kser), pdf.dot(pser))
# df with MultiIndex as column (ser with MultiIndex)
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pser =
|
pd.Series([1, 1, 2, 1], index=pidx)
|
pandas.Series
|
#! /usr/bin/env python3
import os
import string
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from nltk import word_tokenize, pos_tag
from collections import Counter, defaultdict
from tqdm import tqdm
def visualize_class_balance(data_path):
train_fileid = os.listdir(data_path + '/sampled_train')
train_fileid = map(os.path.splitext, train_fileid)
train_fileid = [id_ for (id_, _) in train_fileid]
metadata = pd.read_csv(data_path + '/annotations_metadata.csv')
train_instances = metadata.loc[metadata['file_id'].isin(train_fileid)]
class_counts = train_instances['label'].value_counts(normalize=True)
percentage_strings = class_counts.round(4) * 100
percentage_strings = percentage_strings.astype('str') + '%'
plt.figure(figsize=(8, 8))
plt.pie(class_counts, labels=percentage_strings)
plt.legend(class_counts.index)
plt.show()
def visualize_tags(data_path):
metadata =
|
pd.read_csv(data_path + '/annotations_metadata.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
import pandas as pd
import pandas.types.concat as _concat
import pandas.util.testing as tm
class TestConcatCompat(tm.TestCase):
def check_concat(self, to_concat, exp):
for klass in [pd.Index, pd.Series]:
to_concat_klass = [klass(c) for c in to_concat]
res = _concat.get_dtype_kinds(to_concat_klass)
self.assertEqual(res, set(exp))
def test_get_dtype_kinds(self):
to_concat = [['a'], [1, 2]]
self.check_concat(to_concat, ['i', 'object'])
to_concat = [[3, 4], [1, 2]]
self.check_concat(to_concat, ['i'])
to_concat = [[3, 4], [1, 2.1]]
self.check_concat(to_concat, ['i', 'f'])
def test_get_dtype_kinds_datetimelike(self):
to_concat = [pd.DatetimeIndex(['2011-01-01']),
pd.DatetimeIndex(['2011-01-02'])]
self.check_concat(to_concat, ['datetime'])
to_concat = [pd.TimedeltaIndex(['1 days']),
pd.TimedeltaIndex(['2 days'])]
self.check_concat(to_concat, ['timedelta'])
def test_get_dtype_kinds_datetimelike_object(self):
to_concat = [pd.DatetimeIndex(['2011-01-01']),
pd.DatetimeIndex(['2011-01-02'], tz='US/Eastern')]
self.check_concat(to_concat,
['datetime', 'datetime64[ns, US/Eastern]'])
to_concat = [pd.DatetimeIndex(['2011-01-01'], tz='Asia/Tokyo'),
pd.DatetimeIndex(['2011-01-02'], tz='US/Eastern')]
self.check_concat(to_concat,
['datetime64[ns, Asia/Tokyo]',
'datetime64[ns, US/Eastern]'])
# timedelta has single type
to_concat = [pd.TimedeltaIndex(['1 days']),
pd.TimedeltaIndex(['2 hours'])]
self.check_concat(to_concat, ['timedelta'])
to_concat = [pd.DatetimeIndex(['2011-01-01'], tz='Asia/Tokyo'),
pd.TimedeltaIndex(['1 days'])]
self.check_concat(to_concat,
['datetime64[ns, Asia/Tokyo]', 'timedelta'])
def test_get_dtype_kinds_period(self):
# because we don't have Period dtype (yet),
# Series results in object dtype
to_concat = [pd.PeriodIndex(['2011-01'], freq='M'),
pd.PeriodIndex(['2011-01'], freq='M')]
res = _concat.get_dtype_kinds(to_concat)
self.assertEqual(res, set(['period[M]']))
to_concat = [pd.Series([pd.Period('2011-01', freq='M')]),
pd.Series([pd.Period('2011-02', freq='M')])]
res = _concat.get_dtype_kinds(to_concat)
self.assertEqual(res, set(['object']))
to_concat = [pd.PeriodIndex(['2011-01'], freq='M'),
pd.PeriodIndex(['2011-01'], freq='D')]
res =
|
_concat.get_dtype_kinds(to_concat)
|
pandas.types.concat.get_dtype_kinds
|
from typing import Dict
from typing import Union
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.transforms import ResampleWithDistributionTransform
DistributionDict = Dict[str, pd.DataFrame]
@pytest.fixture
def daily_exog_ts() -> Dict[str, Union[TSDataset, DistributionDict]]:
df1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_1",
"target": 1,
}
)
df2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_2",
"target": [1] + 23 * [0] + [1] + 23 * [0],
}
)
df = pd.concat([df1, df2], ignore_index=True)
df_exog1 = pd.DataFrame(
{
"timestamp":
|
pd.date_range(start="2020-01-05", freq="D", periods=3)
|
pandas.date_range
|
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
import gensim.utils as gu
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
import os
import argparse
def doWork(i, lyrics, genre):
if not i%1000:
print("song number", i)
#print("LYRICS: ", lyrics)
#print("GENRE", genre)
if not pd.isnull(lyrics):
tokens = gu.simple_preprocess(lyrics)
if tokens is not None and len(tokens) > 0:
return tokens, genre
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--outdir', action='store', help='Output directory', required=True)
parser.add_argument('-i', '--infile', action='store', help='Input CSV file with lyrics and labels', required=True)
parser.add_argument('--songs-per-genre', action='store', type=int, help='Number of songs per genre', required=True)
args = parser.parse_args()
print("reading lyrics csv")
chosen_genres = ["Rock","R&B","Pop","Metal","Jazz","Indie","Hip-Hop","Folk","Electronic","Country"]
input_df =
|
pd.read_csv(args.infile)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 14:52:35 2019
@author: KatieSi
"""
##############################################################################
### Import Packages
##############################################################################
import numpy as np
import pandas as pd
from datetime import datetime, timedelta, date
import random
##############################################################################
### Set Variables
##############################################################################
# Base Variable
ReportName= 'Water Inspection Prioritzation Model - Inspection Allocation 1'
RunDate = str(date.today())
### Baseline File
InspectionFile = 'InspectionList2019-09-18.csv'
SegmentationFile = 'Segmentation2019-09-18.csv'
SegmentationNoteFile = 'SegmentationNote2019-09-18.csv'
### Allocation totals per fortnight
FirstRunCountF1 = 625
FirstRunCountF2 = 616
FortnightDate1 = '2019-09-09'
FortnightDate2 = '2019-09-23'
##############################################################################
### Import data
##############################################################################
#Uplaod baseline
InspectionList = pd.read_csv(
r"D:\\Implementation Support\\Python Scripts\\scripts\\Import\\" +
InspectionFile)
Segmentation = pd.read_csv(
r"D:\\Implementation Support\\Python Scripts\\scripts\\Import\\" +
SegmentationFile)
SegmentationNote = pd.read_csv(
r"D:\\Implementation Support\\Python Scripts\\scripts\\Import\\" +
SegmentationNoteFile)
##############################################################################
### Select First Run of Inspections
##############################################################################
### Remove unecessary inspections
InspectionList = InspectionList[InspectionList['InspectionNecessity'] != 0]
### Remove Midseason Inspections
MidSeasonCount = Segmentation[Segmentation['InspectionID'].notnull()]
### Remove Number of Midseason Inspections from total
MidSeasonCount = len(MidSeasonCount[['ConsentNo']].drop_duplicates())
Fortnight1 = FirstRunCountF1-MidSeasonCount
Fortnight2 = FirstRunCountF2
### Reduce list to First Push Inspections
FirstInspections = InspectionList[(InspectionList['InspectionAssignment'] == 1 )]
### Choose Inspections for Fornight 1
F1Inspections = FirstInspections.sample(n=Fortnight1, weights = 'TotalRisk', random_state = 1)
F1Inspections['Fortnight'] = 1
F1InspectionsList = F1Inspections[['ConsentNo','Fortnight']]
FirstInspections = pd.merge(FirstInspections, F1InspectionsList, on = 'ConsentNo', how = 'left')
FirstInspections = FirstInspections[(FirstInspections['Fortnight'] != 1)]
FirstInspections = FirstInspections.drop(['Fortnight'], axis=1)
### Choose Inspections for Fornight 2
F2Inspections = FirstInspections.sample(n=Fortnight2, weights = 'TotalRisk', random_state = 1)
F2Inspections['Fortnight'] = 2
F2InspectionsList = F2Inspections[['ConsentNo','Fortnight']]
InspectionAllocations = pd.concat([
F1InspectionsList,
F2InspectionsList
])
InspectionAllocations = pd.merge(InspectionAllocations, InspectionList, on = 'ConsentNo', how = 'left')
Officers =
|
pd.read_csv(r"D:\\Implementation Support\\Python Scripts\\scripts\\Import\\Officers.csv")
|
pandas.read_csv
|
#/*##########################################################################
# Copyright (C) 2020-2021 The University of Lorraine - France
#
# This file is part of the PyRecon toolkit developed at the GeoRessources
# Laboratory of the University of Lorraine, France.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
# -*- coding: utf-8 -*-
from pkgs.SpectraPreProcessing import *
import numpy as np
import pandas as pd
import re
'''
Once the object is built : Obj = VNIR_SWIR(AdressFolder, Name)
Where :
> AdressFolder : The path to the folder containing the files.
> Name : the file name without the extension (ex : SR-6500_SN2029020_00626.sed --> Name = SR-6500_SN2029020_00626).
We can :
> Read headers from one file with the funtion "Read_Header" : df = Obj.Read_Header()
> Read headers from more files in one DataFrame With the function "Read_Headers" : df = Obj.Read_Headers()
We can leave the argument "Name" empty as folows: Name = " "
> Read spectrum from one file with the funtion "Read_spectrum" : df = Obj.Read_spectrum()
> Read spectrums from more files in one DataFrame With the function "Read_spectrums" : df = Obj.Read_spectrums()
we can leave argument "Name" empty as folows: Name = " "
> Save as .CSV, after having collected the data in "df" we can save them under a given name
"name = "DataFrame"" with the following command : : Obj.Save_as_csv(df , name = "DataFrame")
NOTE :
* The folder must containe only the files.
* Please move the .csv file generated by "Obj.Save_as_csv" elsewhere and delete it from the current folder.
'''
class VNIR_SWIR(SpectraPreProcessing):
def __init__(self, AdressFolder, Name):
SpectraPreProcessing.__init__(self,AdressFolder)
self.Name = Name
def NameSpectrumColumns(self):
'''
Returns List of tuples containing the names of each column
Note : you can change the folowing argument "ColomnsName" for naming the columns.
'''
ColomnsName = ["Wavelength", "Reflection %"]
Name = [self.NameColumn(SampleName = self.Name, ColomnName = ["_",w]) for w in ColomnsName]
return Name
def Header_to_Dict(self,NumberLine):
'''
Return the header lines as a dictionary
'''
inputfile = open(self.FilePath(self.Name), 'r')
Liste = list()
for i in range(NumberLine):
inputstr=inputfile.readline()
inputspl=re.split("=",inputstr)
Word = inputspl[0].rstrip()
pos = Word.find(":")
Liste.append(dict({Word[None:pos] : Word[pos+1 ::] }))
inputfile.close()
return Liste
def Read_Header(self):
'''
Converts the header lines in the file to data frame
Note : In VNIR_SWIR case the header lines are the lines before the word "Data:"
'''
global df
try :
List = [self.Dict_to_Df(Element) for Element in self.Header_to_Dict(self.FindWord("Data:",self.Name)+1)]
# Add columns for the name : the name is also the name of the file "name.esp"
df = self.Add_columns(List[0], "Sample", self.Name)
# Join the laste two data frame in one data frame:
for i in range(len(List)-1):
df = self.Join_df(df , List[i+1])
except IndexError:
print("Please check that your file contains headers")
df = pd.DataFrame({'None' : [np.nan]})
return df
def Read_Headers(self):
'''
Returns DataFrame containing the header lines in all file exising in the folder
Note : this function concatenates all header lines from one or more files in the folder into one dataframe using Header_to_Df
'''
df = pd.DataFrame()
li = self.NameFiles()
for i in range(len(li)):
V_S = VNIR_SWIR(self.AdressFolder, li[i])
DF = V_S.Read_Header()
df = df.append(DF, ignore_index = True)
return df
def Read_spectrum(self):
'''
Returns DataFrame containing Spectrum
Note : > The columns name are multi level from the above function "NameSpectrumColumns"
> This function reads the spectrum when headers exist or not.
'''
File = self.FilePath(self.Name)
Sep = self.Separator(Extension = self.FileExtension(self.Name))
ColumnsName = self.NameSpectrumColumns()
df = pd.read_table(File, header = self.FindWord("Data:", self.Name)+1, sep = Sep, names = ColumnsName)
return df
def Read_spectrums(self):
'''
Return DataFrame containing all spectrums in the folder
'''
Names = self.NameFiles()
DF = [VNIR_SWIR(self.AdressFolder, Names[i]).Read_spectrum() for i in range(len(Names))]
df =
|
pd.DataFrame({("_","_","Wavelength"):DF[0][DF[0].columns[0]]})
|
pandas.DataFrame
|
from math import pi, cos, sin, sqrt
# from random import random
import random
import math
from typing import Tuple
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from numpy.random.mtrand import rand
import statistics
# ponizej zakomentowany kod ustala "niezmienna" losowosc punktow oraz neuronow
# rand2 = np.random.RandomState(2)
def get_random_point(center: Tuple[float, float], radius: float) -> Tuple[float, float]:
shift_x, shift_y = center
# a = rand2.random() * 2 * pi
a = random.random() * 2 * pi
r = radius * sqrt(random.random())
# r = radius * sqrt(rand2.random())
return r * cos(a) + shift_x, r * sin(a) + shift_y
# losowo generowane punkty dla jednego okregu
list_only_one_circle = [get_random_point((0, 0), 2) for x in range(200)]
df_only_one_circle = pd.DataFrame(list_only_one_circle)
# losowo generowane punkty dla dwoch okregow
list_first_circle = [get_random_point((-3, 0), 1) for x in range(100)]
list_second_circle = [get_random_point((3, 0), 1) for x in range(100)]
df_fist_circle = pd.DataFrame(list_first_circle)
df_second_circle = pd.DataFrame(list_second_circle)
df_two_circles_combined = df_fist_circle.append(df_second_circle)
# tutaj zmieniamy liczbe neuronow (dla jednego okregu)
neurons = [get_random_point((0, 0), 3) for x in range(20)]
df_neurons_only_one_circle =
|
pd.DataFrame(neurons)
|
pandas.DataFrame
|
import pandas as pd
from datetime import datetime, timedelta
import cbpro
# Import client and init dictionary of granularity
c = cbpro.PublicClient()
def _concat(dataframe, data):
return pd.concat([dataframe, data])
def _make_df(df):
columns = ['time', 'Low', 'High', 'Open', 'Close', 'volume']
return pd.DataFrame(df, columns=columns)
def getCBHistory(symbol: str, period: int, cycles: int = 5):
'''
Returns historical OHLCV price data from coinbase.
Parameters:
symbol (str): The symbol of the cryptocurrency
period (int): Granularity of data in seconds.
Accepts one minute, five minutes, 15 minutes,
one hour, four hour and one day
min = 60
5min = 300
15min = 900
hour = 3600
4hour = 21600
day = 86400
cycles (int): data is returned in groups of 300 data points.
If you want more than 300 points of data you will need
to increase the number of cycles.
Default is 1 cycle.
Returns:
data_df (dataframe): A pandas dataframe
'''
# Initialize count
count = 1
# Set initial start and end time
timeEnd = datetime.now()
delta = timedelta(seconds = int(period))
timeStart = timeEnd - (300*delta)
# Iterate through cycles and return dataframe
# if cycles != 1:
while count <= cycles:
if count == 1:
start = timeStart.isoformat()
end = timeEnd.isoformat()
dataframe = c.get_product_historic_rates(f'{symbol.upper()}-USD', start, end, period)
dataframe = _make_df(dataframe)
df = dataframe
else:
# timeEnd = timeStart - (delta)
timeEnd = timeStart
timeStart = timeEnd - (300*delta)
end = timeEnd.isoformat()
start = timeStart.isoformat()
data = c.get_product_historic_rates(f'{symbol.upper()}-USD', start, end, period)
data = _make_df(data)
if data.empty:
break
df = _concat(dataframe, data)
dataframe = df
count += 1
# Check if symbol is listed and format dataframe
if df.empty:
return 'Error: That symbol is not listed on Coinbase'
else:
df['Date'] = pd.to_datetime(df['time'], unit='s')
data_df = df[['Date', 'Open', 'High', 'Low', 'Close']]
data_df = data_df[::-1].reset_index()
data_df.drop('index', axis=1, inplace=True)
data_df = data_df.set_index(['Date'])
return data_df
def getLastRow(symbol: str, period: int):
'''
Returns most recent candle
Parameters:
symbol (str): The symbol of the cryptocurrency
period (int): Granularity of data in seconds.
Accepts one minute, five minutes, 15 minutes,
one hour, four hour and one day.
Min = 60,
5Min = 300,
15Min = 900,
Hour = 3600,
4Hour = 21600,
Day = 86400,
Returns:
data_df (dataframe): A pandas dataframe containing one row
'''
# Time formatting
delta = timedelta(seconds = int(period))
timeEnd = datetime.now() - delta
timeStart = timeEnd - (delta)
start = timeStart.isoformat()
end = timeEnd.isoformat()
# Row formatting
row = c.get_product_historic_rates(f'{symbol.upper()}-USD', start, end, period)
row = _make_df(row)
row['Date'] =
|
pd.to_datetime(row['time'], unit='s')
|
pandas.to_datetime
|
import os
import pandas as pd
import json
size_file = r'C:\Users\huker\Desktop\size.csv'
image_list = r'C:\Users\huker\Desktop\1GE02_img_list.csv'
json_file = r'C:\Users\huker\Desktop\1GE02_img_size.json'
size_df = pd.read_csv(size_file)
img_df =
|
pd.read_csv(image_list)
|
pandas.read_csv
|
################################################################################
# This module lemmatizes the text in the specified column of the input
# pandas.DataFrame. The module recognizes each input record as a unit of
# assessment content (i.e. a single passage section, an item stem,
# or an item option) and applies a serial number of 'AC_Doc_ID' to the each
# output record for the following processing.
# Parameters df_ac: input pandas.DataFrame, it should have, at least, one
# column of text assessment content
# content_column: column name of text assessment content to be
# lemmatized
# lang = 'En' : Language option ('En' or 'Jp')
# Returns Result: pandas.DataFrame including the original columns of the input
# DataFrame plus lemmatized result columns
################################################################################
def ac_lemmatizer(df_ac, content_column, lang = 'En'):
import pandas as pd
import numpy as np
if lang == 'Jp':
from janome.tokenizer import Tokenizer
tagger = Tokenizer()
else:
import nltk
wnl = nltk.WordNetLemmatizer()
import nltk.data
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
df_ac_buf = df_ac.copy()
list_cntnt = list(df_ac_buf[content_column])
list_cntnt_lemma = list_cntnt[:]
list_doc_id = list_cntnt[:]
df_lemma_all = pd.DataFrame()
if lang == 'Jp':
for i, x in enumerate(list_cntnt):
lemmas = []
sentences = x.splitlines()
for y in sentences:
for token in tagger.tokenize(y.strip()):
surface = token.surface
feature = token.part_of_speech
feature_list = feature.split(',')
if surface != u' ':
if token.base_form != u'*':
lemmas = lemmas + [token.base_form]
else:
lemmas = lemmas + [surface]
s = ' '.join(lemmas)
list_cntnt_lemma[i] = s
print(s)
df_lemma = pd.DataFrame({ 'Lemma' : lemmas })
df_doc = pd.DataFrame({ 'AC_Doc_ID' : np.array([i] * len(df_lemma)) })
df_lemma['AC_Doc_ID'] = df_doc['AC_Doc_ID']
df_lemma['Dummy'] = df_doc['AC_Doc_ID']
df_lemma_all = df_lemma_all.append(df_lemma)
list_doc_id[i] = i
else:
for i, x in enumerate(list_cntnt):
lemmas = []
sentences = sent_detector.tokenize(x.strip())
for y in sentences:
tokens = nltk.word_tokenize(y)
words = [w.lower() for w in tokens]
lemmas_v = [wnl.lemmatize(t, 'v') for t in words]
lemmas = lemmas + [wnl.lemmatize(t) for t in lemmas_v]
s = ' '.join(lemmas)
list_cntnt_lemma[i] = s
print(s)
df_lemma = pd.DataFrame({ 'Lemma' : lemmas })
df_doc = pd.DataFrame({ 'AC_Doc_ID' : np.array([i] * len(df_lemma)) })
df_lemma['AC_Doc_ID'] = df_doc['AC_Doc_ID']
df_lemma['Dummy'] = df_doc['AC_Doc_ID']
df_lemma_all = df_lemma_all.append(df_lemma)
list_doc_id[i] = i
df_doc_id = pd.DataFrame({ 'AC_Doc_ID' : list_doc_id })
df_ac_buf['AC_Doc_ID'] = df_doc_id['AC_Doc_ID']
df_cntnt_lemma = pd.DataFrame({ 'Cntnt_Lemma' : list_cntnt_lemma })
df_ac_buf['Cntnt_Lemma'] = df_cntnt_lemma['Cntnt_Lemma']
#Updated 1/16/2017 <EMAIL>
if df_lemma_all.shape[0] > 0:
#Updated 3/4/2017 <EMAIL>
pd_ver = list(map(int, pd.__version__.split('.')))
if (pd_ver[0] > 0) or (pd_ver[1] > 13):
df_crosstab = df_lemma_all.pivot_table(values='Dummy',
index='AC_Doc_ID', columns='Lemma', aggfunc = len)
else:
df_crosstab = df_lemma_all.pivot_table(values='Dummy',
rows='AC_Doc_ID', cols='Lemma', aggfunc = len)
df_crosstab['AC_Doc_ID'] = df_doc_id['AC_Doc_ID']
df_res =
|
pd.merge(df_ac_buf, df_crosstab, on='AC_Doc_ID')
|
pandas.merge
|
"""
Plot the CKS ages against the gyro ages.
Underpredicts the ages of M dwarfs/ the M dwarfs rotate too rapidly?
Overpredicts the ages of hot stars /the hot stars rotate too slowly?
I should get the M right and underpredict the hot stars in the van saders
model.
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import teff_bv as tbv
plotpar = {'axes.labelsize': 18,
'text.fontsize': 10,
'legend.fontsize': 18,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True}
plt.rcParams.update(plotpar)
def gyro_age(par, period, bv):
bvc = a*(bv - c)**b
return (period/bvc)**(1./n) * 1e-3
def uncertainty_hist(df):
plt.clf()
mean_uncert = .5*(df.iso_sage_err1.values/df.iso_sage.values -
df.iso_sage_err2.values/df.iso_sage.values)
plt.hist(mean_uncert, 30)
plt.axvline(np.median(mean_uncert), color="k")
print(np.median(mean_uncert))
plt.savefig("cks_uncertainty_hist")
if __name__ == "__main__":
my =
|
pd.read_csv("data/koi_periods_0712.csv")
|
pandas.read_csv
|
import pandas as pd
from pathlib import Path
from steinbock import io
from steinbock.export import graphs
class TestGraphsExport:
def test_convert_to_networkx(self):
neighbors = pd.DataFrame(
data={
"Object": [1, 2],
"Neighbor": [2, 1],
"Distance": [1.0, 1.0],
}
)
neighbors["Object"] = neighbors["Object"].astype(io.mask_dtype)
neighbors["Neighbor"] = neighbors["Neighbor"].astype(io.mask_dtype)
intensities = pd.DataFrame(
data={
"Channel 1": [1.0, 2.0],
"Channel 2": [100.0, 200.0],
},
index=
|
pd.Index([1, 2], name="Object", dtype=io.mask_dtype)
|
pandas.Index
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import h5py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from keras.models import load_model
import utils
pnt = ['phi', 'theta', 'Lp']
pnp = ['phi_pred', 'theta_pred', 'Lp_pred']
def plot_corr_model(y, y_pred, name='plotcorr.png'):
df = utils.create_df(y, y_pred)
font_size = 22
labelx = [r'$\phi_{cep}$', r'$\theta$', r'$L_p$']
labely = [r'predict $\phi_{cep}$', r'predict $\theta$', r'predict $L_p$']
for i in range(len(pnt)):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(7, 7), dpi=200)
ax.scatter(df[pnt[i]], df[pnp[i]], s=2)
red_line = [df[pnt[i]].min(), df[pnt[i]].max()]
ax.plot(red_line, red_line, color='red')
ax.set_xlabel(labelx[i], fontsize=font_size)
ax.set_ylabel(labely[i], fontsize=font_size)
ax.tick_params(axis='both', which='major', labelsize=15)
plt.tight_layout()
plt.savefig('../picture/' + name[:-3] + str(i) + '.png')
# plt.clf()
plt.close()
def load_data(file):
dataset = h5py.File(file, 'r')
x_test = np.array(dataset['x_test'])
y_test = np.array(dataset['y_test'])
dataset.close()
return x_test, y_test
def get_mean_result(accuracy):
mean_mre_phi = []
mean_mre_theta = []
mean_mre_lp = []
mean_mre_av = []
for acc in accuracy:
mean_mre_phi.append(acc['phi']['MRE'])
mean_mre_theta.append(acc['theta']['MRE'])
mean_mre_lp.append(acc['Lp']['MRE'])
mean_mre_av.append(acc['average']['MRE'])
mean_r2_phi = []
mean_r2_theta = []
mean_r2_lp = []
mean_r2_av = []
for acc in accuracy:
mean_r2_phi.append(acc['phi']['R2'])
mean_r2_theta.append(acc['theta']['R2'])
mean_r2_lp.append(acc['Lp']['R2'])
mean_r2_av.append(acc['average']['R2'])
data = [[np.mean(mean_mre_phi), np.mean(mean_mre_theta), np.mean(mean_mre_lp), np.mean(mean_mre_av)],
[np.mean(mean_r2_phi), np.mean(mean_r2_theta), np.mean(mean_r2_lp), np.mean(mean_r2_av)]]
df_accuracy = pd.DataFrame(np.round(data, 3), columns=accuracy[0].columns, index=["MRE", "R2"])
return df_accuracy
if not os.path.exists('../picture/'):
os.mkdir('../picture/')
model_fcnn = []
model_pca = []
model_aug = []
data_fcnn = {}
data_pca = {}
data_aug = {}
accuracy_fcnn = []
accuracy_pca = []
accuracy_aug = []
fcnn_dir = '../result/fcnn/'
pca_dir = '../result/pca/'
aug_dir = '../result/aug/'
k = 10
y_min = np.array([0, 0, 0.08])
y_max = np.array([180, 75, 0.78])
for i in range(k):
model_fcnn.append(load_model(fcnn_dir + 'model' + str(i) +'.h5'))
model_pca.append(load_model(pca_dir + 'model' + str(i) +'.h5'))
model_aug.append(load_model(aug_dir + 'model' + str(i) +'.h5'))
x, y = load_data(fcnn_dir + 'data' + str(i) + '.h5')
data_fcnn['x'], data_fcnn['y'] = x, y
y_pred = model_fcnn[i].predict(x)
y_pred, y = utils.postprocessing(y_pred, y, y_min, y_max)
accuracy_fcnn.append(utils.metric(utils.create_df(y, y_pred)))
x, y = load_data(pca_dir + 'data' + str(i) + '.h5')
data_pca['x'], data_pca['y'] = x, y
y_pred = model_pca[i].predict(x)
y_pred, y = utils.postprocessing(y_pred, y, y_min, y_max)
accuracy_pca.append(utils.metric(utils.create_df(y, y_pred)))
x, y = load_data(aug_dir + 'data' + str(i) + '.h5')
data_aug['x'], data_aug['y'] = x, y
y_pred = model_aug[i].predict(x)
y_pred, y = utils.postprocessing(y_pred, y, y_min, y_max)
accuracy_aug.append(utils.metric(utils.create_df(y, y_pred)))
print('fcnn')
print(get_mean_result(accuracy_fcnn))
x, y = load_data(fcnn_dir + 'data' + str(0) + '.h5')
data_fcnn['x'], data_fcnn['y'] = x, y
y_pred = model_fcnn[0].predict(x)
y_pred, y = utils.postprocessing(y_pred, y, y_min, y_max)
plot_corr_model(y, y_pred, 'plot_corr_fcnn.png')
print('pca')
print(get_mean_result(accuracy_pca))
x, y = load_data(pca_dir + 'data' + str(0) + '.h5')
data_pca['x'], data_pca['y'] = x, y
y_pred = model_pca[0].predict(x)
y_pred, y = utils.postprocessing(y_pred, y, y_min, y_max)
plot_corr_model(y, y_pred, 'plot_corr_pca.png')
print('augmentation')
print(get_mean_result(accuracy_aug))
x, y = load_data(aug_dir + 'data' + str(0) + '.h5')
data_aug['x'], data_aug['y'] = x, y
y_pred = model_aug[0].predict(x)
y_pred, y = utils.postprocessing(y_pred, y, y_min, y_max)
plot_corr_model(y, y_pred, 'plot_corr_aug.png')
metric_fcnn = []
metric_pca = []
metric_aug = []
critarion = 'MRE'
for i in range(k):
metric_fcnn.append(accuracy_fcnn[i]['average'][critarion])
metric_pca.append(accuracy_pca[i]['average'][critarion])
metric_aug.append(accuracy_aug[i]['average'][critarion])
mae_fcnn = np.array(metric_fcnn).reshape(-1, 1)
mae_pca = np.array(metric_pca).reshape(-1, 1)
mae_aug = np.array(metric_aug).reshape(-1, 1)
data = np.concatenate([mae_fcnn, mae_pca, mae_aug], axis=1)
df_accuracy = pd.DataFrame(data, columns=['FCNN', 'PCA', "augmentation"])
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(7, 7))
sns.boxplot(data=df_accuracy, ax=ax)
ax.set_ylabel('Mean relative error', fontsize=14)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(14)
plt.tight_layout()
plt.savefig('../picture/boxplot.png')
model_noise_fcnn = []
model_noise_pca = []
model_noise_aug = []
data_noise_fcnn = {}
data_noise_pca = {}
data_noise_aug = {}
accuracy_noise_fcnn = []
accuracy_noise_pca = []
accuracy_noise_aug = []
noise_fcnn_dir = '../result/noise_fcnn/'
noise_pca_dir = '../result/noise_pca/'
noise_aug_dir = '../result/noise_aug/'
k = 10
y_min = np.array([0, 0, 0.08])
y_max = np.array([180, 75, 0.78])
for i in range(k):
model_noise_fcnn.append(load_model(noise_fcnn_dir + 'model' + str(i) +'.h5'))
model_noise_pca.append(load_model(noise_pca_dir + 'model' + str(i) +'.h5'))
model_noise_aug.append(load_model(noise_aug_dir + 'model' + str(i) +'.h5'))
x, y = load_data(noise_fcnn_dir + 'data' + str(i) + '.h5')
data_noise_fcnn['x'], data_noise_fcnn['y'] = x, y
y_pred = model_noise_fcnn[i].predict(x)
y_pred, y = utils.postprocessing(y_pred, y, y_min, y_max)
accuracy_noise_fcnn.append(utils.metric(utils.create_df(y, y_pred)))
x, y = load_data(noise_pca_dir + 'data' + str(i) + '.h5')
data_noise_pca['x'], data_noise_pca['y'] = x, y
y_pred = model_noise_pca[i].predict(x)
y_pred, y = utils.postprocessing(y_pred, y, y_min, y_max)
accuracy_noise_pca.append(utils.metric(utils.create_df(y, y_pred)))
x, y = load_data(noise_aug_dir + 'data' + str(i) + '.h5')
data_noise_aug['x'], data_noise_aug['y'] = x, y
y_pred = model_noise_aug[i].predict(x)
y_pred, y = utils.postprocessing(y_pred, y, y_min, y_max)
accuracy_noise_aug.append(utils.metric(utils.create_df(y, y_pred)))
print('noise fcnn')
print(get_mean_result(accuracy_noise_fcnn))
x, y = load_data(noise_fcnn_dir + 'data' + str(0) + '.h5')
data_noise_fcnn['x'], data_noise_fcnn['y'] = x, y
y_pred = model_noise_fcnn[0].predict(x)
y_pred, y = utils.postprocessing(y_pred, y, y_min, y_max)
plot_corr_model(y, y_pred, 'plot_corr_noise_fcnn.png')
print('noise pca')
print(get_mean_result(accuracy_noise_pca))
x, y = load_data(noise_pca_dir + 'data' + str(0) + '.h5')
data_noise_pca['x'], data_noise_pca['y'] = x, y
y_pred = model_noise_pca[0].predict(x)
y_pred, y = utils.postprocessing(y_pred, y, y_min, y_max)
plot_corr_model(y, y_pred, 'plot_corr_noise_pca.png')
print('noise augmentation')
print(get_mean_result(accuracy_noise_aug))
x, y = load_data(noise_aug_dir + 'data' + str(0) + '.h5')
data_noise_aug['x'], data_noise_aug['y'] = x, y
y_pred = model_noise_aug[0].predict(x)
y_pred, y = utils.postprocessing(y_pred, y, y_min, y_max)
plot_corr_model(y, y_pred, 'plot_corr_noise_aug.png')
metric_noise_fcnn = []
metric_noise_pca = []
metric_noise_aug = []
critarion = 'MRE'
for i in range(k):
metric_noise_fcnn.append(accuracy_noise_fcnn[i]['average'][critarion])
metric_noise_pca.append(accuracy_noise_pca[i]['average'][critarion])
metric_noise_aug.append(accuracy_noise_aug[i]['average'][critarion])
mae_noise_fcnn = np.array(metric_noise_fcnn).reshape(-1, 1)
mae_noise_pca = np.array(metric_noise_pca).reshape(-1, 1)
mae_noise_aug = np.array(metric_noise_aug).reshape(-1, 1)
data = np.concatenate([mae_noise_fcnn, mae_noise_pca, mae_noise_aug], axis=1)
df_accuracy =
|
pd.DataFrame(data, columns=['FCNN', 'PCA', "augmentation"])
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# This compares Fe/H values as calculated by nSSPP and by us
# Parent notebook created 2021 July 19 by E.S.
# Updated 2021 Aug 9 to include S/N of spectra
import pickle
import pandas as pd
import numpy as np
import glob
import os
import re
import matplotlib.pyplot as plt
# directory of pickled Fe/H using our abcd calibration (note just first 1k lines of posterior!)
dir_pickled_feh_abcd = "/Users/bandari/Documents/git.repos/rrlyrae_metallicity/rrlyrae_metallicity/calib_application/" + \
"bin/pickled_info/escrow_us_abcdfghk_on_sdss"
# read in nSSPP Fe/H values
df_nsspp = pd.read_csv("./data/nSSPP82.out", names=["sdss","spectrum", "teff", "logg",
"feh_direct_nsspp", "feh_beers"], delim_whitespace=True)
# read in S/N
df_s2n =
|
pd.read_csv("./data/s2n_sdss_spec.csv")
|
pandas.read_csv
|
"""
ExperimentData module allows us to abstract the data format from the library
such that we can have multiple fetcher modules that only need import this
module, and none of our analysis modules need know anything about the fetchers.
In other words, this is the interface between data fetching and data analysis.
"""
import copy
import numpy as np
import pandas as pd
import warnings
from time import time
class ExperimentData(object):
"""Main class in ExperimentData module.
Attributes:
mandatory_metadata: metadata that needs to be provided (default: {'experiment', 'source'})
primary_indices: primary indices on which the analyses will be performed (default: ['entity', 'variant']
optional_kpi_indices: optional indices for analysis (default: ['time_since_treatment']
known_feature_metrics: metric names that are automatically considered as features
"""
# TODO: allow definition of the name of 'entity': would be nicer if the index
# itself maintained the name 'chash' or 'order_number' etc.
# TODO: explain the mandatory_metadata in the exception raised
# TODO: maybe move these to the __init__?
mandatory_metadata = {'experiment', 'source'}
primary_indices = ['entity', 'variant']
optional_kpi_indices = ['time_since_treatment']
known_feature_metrics = {
'age',
'zalando_age',
'customer_age',
'customer_zalando_age',
'gender', 'sex',
'feature',
'treatment_start_time',
'orders_existing',
'orders_prev_year',
'start_segment',
# 'business_customer', #these SHOULD be features, but because we have no historical data, their value will depend on when we retrieve the data - i.e. they can change after the experiment.
# 'corporate_customer',
# 'special_customer',
'existing_customer',
'exposed_customer',
'clv',
# I know this is somewhat controversial, but I want to insist on using PCII as KPI and CLV as a feature always meaning 'sum of PCII over lifetime up to treatment start'
}
def __init__(self, metrics=None, metadata={}, features='default',
deepcopy=False):
"""
Want to be able to create results from just a single dataframe.
Args:
metrics: data frame that contains either KPI or feature
metadata: the metadata dict
features: either 'default', which searches the metrics data frame for predefined feature names
or list, which subsets the metrics data frame with the given column indices
or data frame, which is feature data frame itself and metrics is either KPI or None
or None
deepcopy: the internal data frames are, by default, shallow copies of the input
dataframes: this means the actual data arrays underlying the frames are
references to the input. In most use-cases, this is desired (reindexing
will not reindex the original etc.) but it may have some edge-case issues.
"""
self.metadata = metadata or {}
feature_indices = copy.deepcopy(self.primary_indices)
kpi_indices = copy.deepcopy(self.primary_indices)
if metrics is not None:
kpi_indices += [i for i in self.optional_kpi_indices if i in metrics.columns]
if metrics is None:
if not isinstance(features, pd.DataFrame):
raise ValueError('No metrics provided!')
else:
self.kpis = pd.DataFrame(columns=self.primary_indices)
self.features = features.copy(deep=deepcopy)
self.variant_names = set(np.unique(self.features.variant))
else:
if isinstance(features, pd.DataFrame):
self.kpis = metrics.copy(deep=deepcopy)
self.features = features.copy(deep=deepcopy)
self.variant_names = set(np.unique(self.features.variant))
elif isinstance(features, list):
if len(features) == 0:
self.kpis = metrics.copy(deep=deepcopy)
self.features = pd.DataFrame(columns=self.primary_indices)
self.variant_names = set(np.unique(self.kpis.variant))
else:
self.kpis = metrics.drop(metrics.columns[features], axis=1)
primary_idx = [metrics.columns.get_loc(x) for x in self.primary_indices]
feature_idx = primary_idx + features
self.features = metrics.iloc[:, feature_idx]
self.variant_names = set(np.unique(self.features.variant))
elif features == 'default':
# TODO: use the detect_features function
features_present = {m for m in metrics if
m.lower() in self.known_feature_metrics | set(feature_indices)}
kpis_present = {m for m in metrics if m.lower() not in self.known_feature_metrics}
self.features = metrics.loc[:, features_present]
self.kpis = metrics.loc[:, kpis_present]
self.variant_names = set(np.unique(self.features.variant))
elif features is None:
self.kpis = metrics.copy(deep=deepcopy)
self.features =
|
pd.DataFrame(columns=self.primary_indices)
|
pandas.DataFrame
|
import pathlib
import typing
import pandas as pd
# these are initialized by calling function `initialize` with the appropriate parameters `dict` (you MUST do it)
authors_disambiguator = None
organizations_disambiguator = None
def initialize(parameters: dict) -> None:
"""
Initializes this module.
Parameters
----------
parameters : dictionary
Settings
"""
global authors_disambiguator, organizations_disambiguator
# authors disambiguator is initialized...
authors_disambiguator = AuthorsDisambiguator(**parameters['authors'])
# ...and so is the one for organizations
organizations_disambiguator = OrganizationsDisambiguator(**parameters['organizations'])
def projects_researchers(df: pd.DataFrame):
"""
Callback function for "researchers" table in "projects" database.
Parameters
----------
df : Pandas dataframe
Input data
Returns
-------
df: Pandas dataframe
Output data
"""
# names are lower-cased
df['NOMBRE'] = df['NOMBRE'].str.lower()
return df
def publications_authorship(df: pd.DataFrame):
"""
Callback function for "authorship" table in "publications" database.
Parameters
----------
df : Pandas dataframe
Input data
Returns
-------
df: Pandas dataframe
Output data
"""
df['fullname'] = df['initials'] + ' ' + df['surname']
# names are lower-cased
df['fullname'] = df['fullname'].str.lower()
# some nuisance characters are removed
df['affiliation'] = df['affiliation'].str.replace('\n', ' ', regex=False)
df['affiliation'] = df['affiliation'].str.replace('\\', ' ', regex=False)
return df
def patents_literature(df: pd.DataFrame):
"""
Callback function for relating patents literature with itself in "patents" database.
Parameters
----------
df : Pandas dataframe
Input data
Returns
-------
df: Pandas dataframe
Output data
"""
# the rows in which `cited_pat_publn_id` is 0 are filtered out
return df[df['cited_pat_publn_id'] != 0]
def patents_non_literature(df: pd.DataFrame):
"""
Callback function for relating patents literature with non-patents literature in "patents" database.
Parameters
----------
df : Pandas dataframe
Input data
Returns
-------
df: Pandas dataframe
Output data
"""
# the rows in which `npl_publn_id` is 0 are filtered out
return df[df['npl_publn_id'] != 0]
def patents_person(df: pd.DataFrame):
"""
Callback function for "person" table in "patents" database.
Parameters
----------
df : Pandas dataframe
Input data
Returns
-------
df: Pandas dataframe
Output data
"""
# names are lower-cased
df['person_name'] = df['person_name'].str.lower()
# rows in which the name is `na` are removed...
res = df.dropna(subset=['person_name'])
# ...and so are those in which the same field contains an empty string
return res[res['person_name'] != '']
class Disambiguator:
"""
Class to process disambiguation data.
"""
def __init__(self, disambiguation_map: typing.List[str], new_id: str) -> None:
"""
Initializer.
Parameters
----------
disambiguation_map : list
Path to the disambiguation map
new_id : str
The name of the new field/column/neo4j property to be created for storing the final *disambiguated* id
"""
# a `pathlib` object
self.file = pathlib.Path(*disambiguation_map)
# the disambiguation map is read from the specified file
self.map = pd.read_csv(self.file, sep='\s+')
self.new_id = new_id
# the *final* mapped-to id is obtained as the concatenation of the (output) "db" and "id" columns...
self.map[self.new_id] = self.map['std_db'].str.cat(self.map['std_id'], sep='_')
# ...and those are not needed anymore
self.map = self.map.drop(['std_db', 'std_id'], axis=1)
# prefix identifying the "patstats" entries in the disambiguation map
self.patents_prefix = 'epo'
# the subset of the dataframe corresponding to "patstats"
self.patents_subset = self.map[self.map['orig_db'] == self.patents_prefix]
# the column specifying the database is not needed anymore
self.patents_subset = self.patents_subset.drop(['orig_db'], axis=1)
# same for projects
self.projects_prefix = 'pn'
self.projects_subset = self.map[self.map['orig_db'] == self.projects_prefix]
self.projects_subset = self.projects_subset.drop(['orig_db'], axis=1)
# same for "Scopus"
self.scopus_prefix = 'scps'
self.scopus_subset = self.map[self.map['orig_db'] == self.scopus_prefix]
self.scopus_subset = self.scopus_subset.drop(['orig_db'], axis=1)
@staticmethod
def handle_duplicates(df):
"""
Get rid of duplicated (disambiguated) ids.
Parameters
----------
df : Pandas dataframe
Data with duplicates
Returns
-------
unmapped: Pandas dataframe
Input data with the only row with the value "left_only" in column `source`, or the 1st row of the input
dataframe if there are several.
"""
# a (possibly empty) `DataFrame` containing the unmapped value
unmapped = df[df['source'] == 'left_only']
# if all the elements in the group have been mapped...
if unmapped.empty:
# the first one (for example) is returned
return df.head(1)
else:
return unmapped
def merge(self, mapping: pd.DataFrame, df: pd.DataFrame, field: str, prefix: str):
"""
Replace within the passed `DataFrame` the values of `field` that are present in the disambiguation map,
while at the same time ensuring that the new disambiguated values are unique.
Parameters
----------
mapping : Pandas dataframe
Disambiguation map
df : Pandas dataframe
Data to be disambiguated
field : str
Column to be used in `df`
prefix : str
Prefix to be added to a "new" identifier built from the already existing
(not present in the disambiguation map)
Returns
-------
merge: Pandas dataframe
Input data with the disambiguation map applied.
"""
# lef-join: only the rows in the data (as opposed to those in the disambiguation map) will be present
merge = df.merge(mapping, how='left', left_on=field, right_on='orig_id', indicator='source')
# if the value in new column `new_id` is `NaN`, then replace it with that in `field` with `prefix` prepended
merge[self.new_id] = merge[self.new_id].where(merge[self.new_id].notna(), prefix + '_' + merge[field])
# (all) the indexes of the rows which have duplicate values in `new_id`
i_duplicated = merge.duplicated(subset=self.new_id, keep=False)
# a sub-`DataFrame` with the rows which do *not* contain any duplicates
unique = merge[~i_duplicated]
# the rows which contain duplicates are grouped and *deduplicated* by applying the method `handle_duplicates`
# NOTE: the index of this `DataFrame` is a `MultiIndex`, but that's not a problem since index is not to be
# written to the csv file later on
deduplicated = merge[i_duplicated].groupby(self.new_id, group_keys=True).apply(self.handle_duplicates)
# `unique` and `deduplicated` rows are vertically stacked together
merge =
|
pd.concat((unique, deduplicated))
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 23 19:41:56 2021
@author: u0117123
"""
#Import modules
import pandas as pd
import numpy as np
import sklearn
from sklearn.linear_model import LogisticRegression
#Input variables
Validation_Area="Tervuren"
#Referece objects with features path
refObjectPath = r'C:\Users\u0117123\Box Sync\FWO\WP1\Point-cloud-extractions\processing\5_Clustering_classification\Reference'
ClusteredObjectPath = r'C:\Users\u0117123\Box Sync\FWO\WP1\Point-cloud-extractions\processing\5_Clustering_classification\3SA\r10\RF_all'
#%% LOGISTIC REGRESSION MODEL
### STEP 1 ### IMPORT DATA
data_density_loop_all = pd.read_csv(refObjectPath + "\data_density_loop_Reference.csv", sep=";", index_col=(0))
data_density_loop = data_density_loop_all.loc[data_density_loop_all['location'] != Validation_Area]
data_density_loop['height7_1'] = data_density_loop['height7']/data_density_loop['height1']
data_density_loop['height7_2'] = data_density_loop['height7']/data_density_loop['height2']
data_density_loop['height5_1'] = data_density_loop['height5']/data_density_loop['height1']
data_density_loop['height10_2'] = data_density_loop['height10']/data_density_loop['height2']
data_density_loop['height10_1'] = data_density_loop['height10']/data_density_loop['height1']
columns_x = ["min_z", "max_z", "min_slope_rel", "max_slope_rel", "area",
"m_z_chm","m_nr_returns", "3D_dens","height7_1", "height5_1",
"height10_2", "height10_1", "height7_2"]
data_density_loop_x = data_density_loop[columns_x] #independent variables
data_density_loop_ground_p_density = data_density_loop[["ground_p_density"]]
data_density_loop_y = data_density_loop[["Type"]] #Response variable
#Convert response variable to binary values (shrub = 1; tree = 0)
shrub = ["shrub"]
data_density_loop_y["y"] = np.where(data_density_loop_y["Type"].isin(shrub), "1", "0")
data_density_loop_y = data_density_loop_y.drop(['Type'], axis=1)
# convert dataframe response variable to matrix
conv_arr = data_density_loop_y.values
y_array = conv_arr.ravel()
#%%## STEP 2 ### Check for correlations
import matplotlib.pyplot as plt
import seaborn as sns
# Create correlation matrix & selecting upper triangle
cor_matrix = data_density_loop_x.corr().abs()
plt.figure(figsize = (20,10)) # Size of the figure
sns.heatmap(data_density_loop_x.corr().abs(),annot = True)
plt.show()
upper_tri = cor_matrix.where(np.triu(np.ones(cor_matrix.shape),k=1).astype(np.bool))
#print(upper_tri)
# Droping the column with correlation > 95%
to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > 0.95)] #height5_1, height10_1
#print(); print(to_drop)
data_density_loop_x_dropCorr = data_density_loop_x.drop(to_drop, axis=1)
#print(); print(data_density_loop_x_dropCorr.head())
#%%## STEP 3 ### Cross validation loop
#merge independent variables and dependent variable
data_density_loop_xy_dropCorr = pd.concat([data_density_loop_x_dropCorr,data_density_loop_y], axis=1)
data_density_loop_xy_dropCorr = data_density_loop_xy_dropCorr.reset_index(drop=True)
#split in 10 parts
data_density_loop_xy_dropCorr_shuffled = data_density_loop_xy_dropCorr.sample(frac=1, random_state=1) #shuffle dataframe
data_density_loop_xy_dropCorr_shuffled_List = np.array_split(data_density_loop_xy_dropCorr_shuffled, 10)
#Empty dataframes
rfe_features_append = []
sp_features_append = []
accuracy_append = []
#for loop cross validation
for x in range(10):
trainList = []
for y in range(10):
if y == x :
testdf = data_density_loop_xy_dropCorr_shuffled_List[y]
else:
trainList.append(data_density_loop_xy_dropCorr_shuffled_List[y])
traindf = pd.concat(trainList)
#independent variables and response variable
X_train = traindf.drop(columns=['y'])
y_train = traindf['y']
X_test = testdf.drop(columns=['y'])
y_test = testdf['y']
### STEP 3.1 ### Create scaler
from sklearn import preprocessing
import numpy as np
scaler = preprocessing.StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_train_scaled = pd.DataFrame(data = X_train_scaled, columns=X_train.columns)
X_test_scaled = scaler.transform(X_test)
X_test_scaled = pd.DataFrame(data = X_test_scaled, columns=X_test.columns)
### STEP 3.2 ### Feature selection
### Step 3.2.1 Recursive Feature Elimination
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
rfe = RFE(logreg, n_features_to_select = 5) # running RFE with 5 variables as output
rfe = rfe.fit(X_train_scaled, y_train)
#create training and testing dataframe with selected features
col_rfe = X_train_scaled.columns[rfe.support_]
X_train_scaled_rfe = X_train_scaled[col_rfe]
X_test_scaled_rfe = X_test_scaled[col_rfe]
#create dataframe with selected features per fold
rfe_features_columns = ["fold", "features"]
rfe_features = pd.DataFrame(columns = rfe_features_columns)
rfe_features["features"] = X_train_scaled_rfe.columns
rfe_features["fold"] = x
rfe_features_append.append(rfe_features)
### STEP 3.2.2 Select Percentile (ANOVA F-value, retain features with 50% highest score)
from sklearn.feature_selection import SelectPercentile, SelectKBest, f_classif
sp = SelectPercentile(f_classif, percentile=70).fit(X_train_scaled, y_train)
index_selfeat = (sp.get_support(indices=True)).tolist()
X_train_scaled_sp = X_train_scaled.iloc[:,index_selfeat]
X_test_scaled_sp = X_test_scaled.iloc[:,index_selfeat]
#create dataframe with selected features per fold
sp_features_columns = ["fold", "features"]
sp_features = pd.DataFrame(columns = sp_features_columns)
sp_features["features"] = X_train_scaled_sp.columns
sp_features["fold"] = x
sp_features_append.append(sp_features)
### STEP 4 ### Build models using all or selected features
### STEP 4.1 Full model
logreg_Full = LogisticRegression(random_state=0).fit(X_train_scaled, y_train)
# print('Logistic Regression score for training set: %f' % logreg_Full.score(X_train_scaled, y_train))
y_pred_full = logreg_Full.predict(X_test_scaled)
score_full = logreg_Full.score(X_test_scaled, y_test) # Use score method to get accuracy of model
### STEP 4.2 Recursive Feature Elimination
logreg_RFE = LogisticRegression(random_state=0).fit(X_train_scaled_rfe, y_train)
# print('Logistic Regression score for training set: %f' % logreg_RFE.score(X_train_scaled_rfe, y_train))
y_pred_rfe = logreg_RFE.predict(X_test_scaled_rfe)
score_rfe = logreg_RFE.score(X_test_scaled_rfe, y_test) # Use score method to get accuracy of model
### STEP 4.3 Select Percentile
logreg_SP = LogisticRegression(random_state=0).fit(X_train_scaled_sp, y_train)
# print('Logistic Regression score for training set: %f' % logreg_SP.score(X_train_scaled_sp, y_train))
y_pred_sp = logreg_SP.predict(X_test_scaled_sp)
score_sp = logreg_SP.score(X_test_scaled_sp, y_test) # Use score method to get accuracy of model
#create dataframe with scores per fold
accuracy_columns = ["fold", "accuracy_full", "accuracy_rfe", "accuracy_sp"]
accuracy = pd.DataFrame(columns = accuracy_columns)
new_row = {'accuracy_full':score_full, 'accuracy_rfe':score_rfe, 'accuracy_sp':score_sp, 'fold':x}
accuracy = accuracy.append(new_row, ignore_index=True)
accuracy_append.append(accuracy)
rfe_features_append = pd.concat(rfe_features_append)
sp_features_append =
|
pd.concat(sp_features_append)
|
pandas.concat
|
from pytest import raises as pytest_raises
from src.dict_search.dict_search import DictSearch
from src.dict_search import exceptions
from .data import data, complex_data
def test_search_dict_precondition():
with pytest_raises(exceptions.PreconditionError):
list(DictSearch().dict_search(data, 1))
def test_operator_char():
operator_str = "!"
values = DictSearch(operator_str=operator_str).dict_search(
[
{
"$in": 1,
},
{
"$in": 0,
},
],
{"$in": 1},
)
assert len([val for val in values]) == 1
def test_mixed_type_data():
values = DictSearch().dict_search(
[{"demo": 1}, "not_a_dict", 123, {"demo": 2}],
{"demo": {"$gte": 1}},
)
assert len(list(values)) == 2
def test_mixed_type_field():
values = DictSearch().dict_search(data, {"special": False})
assert len([val for val in values]) == 5
def test_wrong_type_comparison():
values = DictSearch().dict_search(data, {"fy": {"$lt": "r"}})
assert len([val for val in values]) == 0
def test_simple_field():
values = DictSearch().dict_search(data, {"fy": 2011})
assert len([val for val in values]) == 3
def test_nested_field():
values = DictSearch().dict_search(data, {"assets": {"curr": {"a": 0}}})
assert len([val for val in values]) == 5
def test_multiple_fields():
values = DictSearch().dict_search(
data, {"assets": {"curr": {"a": 0}, "non_cur": 4586}, "liab": {"non_cur": {"a": 2447}}}
)
results = [val for val in values]
assert results[0]["name"] == "mdb"
assert len(results) == 1
def test_malformed_high_level_operator():
values = DictSearch().dict_search(
[{"assets": "a"}, {"assets": 2}, {"assets": [1, 32]}], {"$and": [1, {"assets": "a"}], "missing": [1, 2]}
)
results = [val for val in values]
assert len(results) == 0
def test_expected_exception():
import pandas as pd
values = list(DictSearch(expected_exceptions=ValueError).dict_search(
{"df":
|
pd.DataFrame()
|
pandas.DataFrame
|
# %%
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
# %%
#tabla = pd.DataFrame({"Patente":[], "Tipo":[], "Marca":[], "Modelo":[], "RUT":[], "Nro. Motor":[], "Año":[], "Nombre a Rutificador":[]})
tablacompleta =
|
pd.DataFrame()
|
pandas.DataFrame
|
# general
import logging
import json
import os
import random
import math
from collections import defaultdict, Counter
import glob
import shutil, io, base64, abc
# general package
from natsort import natsorted
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
import regex as re
import h5py
# image
import skimage
from skimage import measure as sk_measure
from adjustText import adjust_text
# processing
import ctypes
import subprocess
import dill as pickle
#vis
import dabest
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
#methods
import umap
import hdbscan
import diffxpy.api as de
import anndata
from scipy import ndimage, stats
from scipy.spatial.distance import squareform, pdist
import scipy.cluster as spc
from scipy.cluster.vq import kmeans2
from .imzml import IMZMLExtract
from .plotting import Plotter
#web/html
import jinja2
# applications
import progressbar
def makeProgressBar():
return progressbar.ProgressBar(widgets=[
progressbar.Bar(), ' ', progressbar.Percentage(), ' ', progressbar.AdaptiveETA()
])
class SpectraRegion():
pass
class RegionClusterer(metaclass=abc.ABCMeta):
def __init__(self, region:SpectraRegion) -> None:
self.region = region
self.logger = None
self.__set_logger()
def __set_logger(self):
self.logger = logging.getLogger(self.methodname())
self.logger.setLevel(logging.INFO)
if not self.logger.hasHandlers():
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
self.logger.addHandler(consoleHandler)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
consoleHandler.setFormatter(formatter)
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'fit') and callable(subclass.fit) and
hasattr(subclass, 'transform') and callable(subclass.transform) and
hasattr(subclass, 'fit_transform') and callable(subclass.fit_transform) and
hasattr(subclass, 'segmentation') and callable(subclass.segmentation) and
hasattr(subclass, 'region')
)
def methodname(self):
"""Brief description of the specific clusterer
"""
return self.__class__.__name__
@abc.abstractmethod
def fit(self, num_target_clusters:int, max_iterations:int=100, verbose:bool=False):
"""[summary]
Args:
num_target_clusters ([type]): [description]
max_iterations (int, optional): [description]. Defaults to 100.
verbose (bool, optional): Verbose output. Defaults to False.
Raises:
NotImplementedError: [description]
"""
raise NotImplementedError
@abc.abstractmethod
def transform(self, num_target_clusters: int, verbose:bool=False) -> np.array:
"""
Returns the final segmentation
Args:
num_target_clusters (int): number of target clusters
verbose (bool, optional): Verbose output. Defaults to False.
Raises:
NotImplementedError: (abstract class)
Returns:
np.array: segmentation
"""
raise NotImplementedError
@abc.abstractmethod
def segmentation(self) -> np.array:
"""Returns the final segmentation for given region
Raises:
NotImplementedError: [description]
Returns:
np.array: segmentation
"""
raise NotImplementedError
def fit_transform(self, num_target_clusters: int, verbose:bool=False) -> np.array:
"""[summary]
Args:
num_target_clusters (int): number of target clusters
verbose (bool, optional): Verbose output. Defaults to False.
Returns:
np.array: segmentation
"""
self.fit(num_target_clusters=num_target_clusters, verbose=verbose)
return self.transform(num_target_clusters=num_target_clusters, verbose=verbose)
def plot_segments(self, highlight=None, file=None):
"""Plots the segmented array of the current SpectraRegion object.
Args:
highlight (list/tuple/set/int, optional): If the highlight clusters are specified, those will be assigned a cluster id 2. Otherwise 1. Background stays 0. Defaults to None.
"""
showcopy = np.copy(self.segmentation())
if highlight != None:
if not isinstance(highlight, (list, tuple, set)):
highlight = [highlight]
for i in range(0, showcopy.shape[0]):
for j in range(0, showcopy.shape[1]):
if showcopy[i,j] != 0:
if showcopy[i,j] in highlight:
showcopy[i,j] = 2
elif showcopy[i,j] != 0:
showcopy[i,j] = 1
fig, _ = plt.subplots()
Plotter.plot_array_scatter(fig, showcopy, discrete_legend=True)
if not highlight is None and len(highlight) > 0:
plt.title("Highlighted (yellow) clusters: {}".format(", ".join([str(x) for x in highlight])), y=1.08)
if not file is None:
plt.savefig(file, bbox_inches="tight")
plt.show()
plt.close()
class SpectraRegion():
"""
SpectraRegion class for any analysis of imzML spectra regions
"""
@classmethod
def from_pickle(cls, path):
"""Loads a SpectraRegion from pickle file.
Args:
path (str): Path to pickle file to load spectra region from.
Returns:
SpectraRegion: SpectraRegion object from pickle.
"""
obj = None
with open(path, "rb") as fin:
obj = pickle.load(fin)
return obj
def to_pickle(self, path):
"""Pickles the current SpectraRegion object.
Args:
path (str): Path to save the pickle file in.
"""
with open(path, "wb") as fout:
pickle.dump(self, fout)
def to_spatial_anndata(self, grouping="segmented") -> anndata.AnnData:
assert grouping in self.meta
availableGroups = np.unique(self.meta[grouping])
sampleVec = []
clusterVec = []
coordinates = []
exprData = pd.DataFrame()
masses = [("mass_" + str(x)).replace(".", "_") for x in self.idx2mass]
for clus in availableGroups:
positions = np.where(self.meta[grouping] == clus, )
self.logger.info("Collecting cluster: {}".format(clus))
bar = makeProgressBar()
for pxl in bar(positions):
pxl_name = "{}__{}".format(str(len(sampleVec)), "_".join([str(x) for x in pxl]))
sampleVec.append(pxl_name)
clusterVec.append(clus)
coordinates.append( pxl )
exprData[pxl_name] = self.region_array[pxl[0], pxl[1], :]
self.logger.info("DE DataFrame ready. Shape {}".format(exprData.shape))
#from squidpy:
# adata = AnnData(counts, obsm={"spatial": coordinates})
pData = pd.DataFrame()
pData["cluster"] = clusterVec
# columns: genes # var: featurenames
# rows : cells/pixels # obs: rows = cell/pixel information
deData = anndata.AnnData(
X=exprData.values.transpose(),
var=pd.DataFrame(index=masses),
obs=pData,
obsm={"spatial": coordinates}
)
return deData
def ctypesCloseLibrary(self):
"""Unloads the C++ library
"""
dlclose_func = ctypes.CDLL(None).dlclose
dlclose_func.argtypes = [ctypes.c_void_p]
dlclose_func.restype = ctypes.c_int
dlclose_func(self.lib._handle)
def loadLib(self):
"""Prepares everything for the usage of the C++ library
"""
baseFolder = str(os.path.dirname(os.path.realpath(__file__)))
libfile = (glob.glob(os.path.join(baseFolder, "libPIMZ*.so")) + glob.glob(os.path.join(baseFolder, "../build/lib*/pIMZ/", "libPIMZ*.so")))[0]
self.lib = ctypes.cdll.LoadLibrary(libfile)
self.lib.StatisticalRegionMerging_New.argtypes = [ctypes.c_uint32, ctypes.POINTER(ctypes.c_float), ctypes.c_uint8]
self.lib.StatisticalRegionMerging_New.restype = ctypes.c_void_p
self.lib.SRM_calc_similarity.argtypes = [ctypes.c_void_p, ctypes.c_uint32, ctypes.c_uint32, ctypes.POINTER(ctypes.c_float)]
self.lib.SRM_calc_similarity.restype = ctypes.POINTER(ctypes.c_float)
self.lib.StatisticalRegionMerging_mode_dot.argtypes = [ctypes.c_void_p]
self.lib.StatisticalRegionMerging_mode_dot.restype = None
self.lib.StatisticalRegionMerging_mode_eucl.argtypes = [ctypes.c_void_p]
self.lib.StatisticalRegionMerging_mode_eucl.restype = None
def __init__(self, region_array, idx2mass, name=None):
"""Initializes a SpectraRegion object with the following attributes:
- lib: C++ library
- logger (logging.Logger): Reference to the Logger object.
- name (str): Name of the region. Defaults to None.
- region_array (numpy.array): Array of spectra.
- idx2mass (numpy.array): m/z values.
- spectra_similarity (numpy.array): Pairwise similarity matrix. Initialized with None.
- dist_pixel (numpy.array): Pairwise coordinate distance matrix (2-norm). Initialized with None.
- idx2pixel (dict): Dictionary of enumerated pixels to their coordinates.
- pixel2idx (dict): Inverted idx2pixel dict. Dictionary of coordinates mapped to pixel numbers. Initialized with None.
- elem_matrix (array): A list of spectra with positional id correspond to the pixel number. Shape (n_samples, n_features). Initialized with None.
- dimred_elem_matrix (array): Embedding of the elem_matrix in low-dimensional space. Shape (n_samples, n_components). Initialized with None.
- dimred_labels (list): A list of HDBSCAN labels. Initialized with None.
- segmented (numpy.array): Segmeted region_array which contains cluster ids.
- consensus (dict): A dictionary of cluster ids mapped to their respective consensus spectra. Initialized with None.
- consensus_method (str): Name of consensus method: "avg" or "median". Initialized with None.
- consensus_similarity_matrix (array): Pairwise similarity matrix between consensus spectra. Initialized with None.
- de_results_all (dict): Methods mapped to their differential analysis results (as pd.DataFrame). Initialized with an empty defaultdict.
Args:
region_array (numpy.array): Array of spectra defining one region.
idx2mass (numpy.array): m/z values for given spectra.
name (str, optional): Name of this region (required if you want to do a comparative analysis). Defaults to None.
"""
assert(not region_array is None)
assert(not idx2mass is None)
assert(len(region_array[0,0,:]) == len(idx2mass))
self.lib = None
self.loadLib()
self.logger = None
self.__setlogger()
self.name = None
self.region_array = region_array
self.idx2mass = idx2mass
self.spectra_similarity = None
self.dist_pixel = None
self.idx2pixel = {}
self.pixel2idx = {}
self.elem_matrix = None
self.dimred_elem_matrix = None
self.dimred_labels = None
self.consensus = None
self.consensus_method = None
self.consensus_similarity_matrix = None
self.meta = {}
self.de_results_all = defaultdict(lambda: dict())
self.df_results_all = defaultdict(lambda: dict())
for i in range(self.region_array.shape[0]*self.region_array.shape[1]):
x,y = divmod(i, self.region_array.shape[1])
self.idx2pixel[i] = (x,y)
self.pixel2idx[(x,y)] = i
def __setlogger(self):
"""Sets up logging facilities for SpectraRegion.
"""
self.logger = logging.getLogger('SpectraRegion')
if len(self.logger.handlers) == 0:
self.logger.setLevel(logging.INFO)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
self.logger.addHandler(consoleHandler)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
consoleHandler.setFormatter(formatter)
self.logger.info("Added new Stream Handler")
def __getstate__(self):
"""Returns all data necessary to reconstruct the current state.
Returns:
dict: all data in one dict
"""
return {
"name": self.name,
"region_array": self.region_array,
"idx2mass": self.idx2mass,
"spectra_similarity": self.spectra_similarity,
"dist_pixel": self.dist_pixel,
"idx2pixel": self.pixel2idx,
"elem_matrix": self.elem_matrix,
"dimred_elem_matrix": self.dimred_elem_matrix,
"dimred_labels": self.dimred_labels,
"consensus": self.consensus,
"consensus_method": self.consensus_method,
"consensus_similarity_matrix": self.consensus_similarity_matrix,
"de_results_all": self.de_results_all,
"df_results_all": self.df_results_all,
"meta": self.meta
}
def __setstate__(self, state):
"""Reconstructs the current state from a dictionary with all data, and sets up idx2pixel.
Args:
state (dict of data): data required to reconstruct state
"""
self.__dict__.update(state)
self.logger = None
self.__setlogger()
self.idx2pixel = {}
self.pixel2idx = {}
for i in range(self.region_array.shape[0]*self.region_array.shape[1]):
x,y = divmod(i, self.region_array.shape[1])
self.idx2pixel[i] = (x,y)
self.pixel2idx[(x,y)] = i
@property
def segmented(self):
return self.meta["segmented"]
@segmented.setter
def segmented(self, value):
assert type(value) == np.ndarray
assert value.shape == (self.region_array.shape[0], self.region_array.shape[1])
self.meta["segmented"] = value
def rotate_region(self):
self.region_array = np.rot90(self.region_array)
for x in self.meta:
self.meta[x] = np.rot90(self.meta[x])
def flip_region(self, dir="hor"):
assert dir in ["hor", "ver"]
flipCall = None
if dir == "hor":
flipCall = lambda x: np.flip(x, axis=1)
elif dir == "ver":
flipCall = lambda x: np.flip(x, axis=0)
self.region_array = flipCall(self.region_array)
for x in self.meta:
self.meta[x] = flipCall(self.meta[x])
def plot_array(self, fig, arr, discrete_legend=True):
"""Plots an array of values (e.g. segment IDs) into the given figure and adds a discrete legend.
Args:
fig (matplotlib.pyplot.figure): Figure to plot to.
arr (array): array to visualize
discrete_legend (bool, optional): Plots a discrete legend for array values. Defaults to True.
Returns:
matplotlib.pyplot.figure: Figure with plotted figure
"""
if discrete_legend:
valid_vals = sorted(np.unique(arr))
normArray = np.zeros(arr.shape)
val_lookup = {}
positions = []
for uIdx, uVal in enumerate(valid_vals):
normArray[arr == uVal] = uIdx
val_lookup[uIdx] = uVal
positions.append(uIdx)
heatmap = plt.matshow(normArray, cmap=plt.cm.get_cmap('viridis', len(valid_vals)), fignum=fig.number)
# calculate the POSITION of the tick labels
#positions = np.linspace(0, len(valid_vals), len(valid_vals))
def formatter_func(x, pos):
'The two args are the value and tick position'
val = val_lookup[x]
return val
formatter = plt.FuncFormatter(formatter_func)
# We must be sure to specify the ticks matching our target names
plt.colorbar(heatmap, ticks=positions, format=formatter, spacing='proportional')
else:
heatmap = plt.matshow(arr, cmap=plt.cm.get_cmap('viridis'), fignum=fig.number)
plt.colorbar(heatmap)
return fig
def to_aorta3d(self, folder, prefix, regionID, protWeights = None, nodf=False, pathPrefix = None, ctpred=None, kw2segment=None):
"""Extracts available data and prepares files for the 3D representation:
- .clustering.png: Picture of the segmented region.
- .matrix.npy: Matrix of the segmented region.
- .tsv: Marker Proteins Analysis findings. (Optional)
- .info: Configuration file.
Args:
folder (str): Desired output folder.
prefix (str): Desired name of the output files.
regionID (int): Id of the desired region in the .imzML file.
protWeights (ProteinWeights, optional): ProteinWeights object for translation of masses to protein name. Defaults to None.
nodf (bool, optional): If set to True, do not perform differential analysis. Defaults to False.
pathPrefix (str, optional): Desired path prefix for DE data files. Defaults to None.
ctpred (str, optional): Path to .tsv file with cluster-cell type mapping. Defaults to None.
kw2segment (dict, optional): Dictionary keyword => segment; assigns keyword to all listed segments.
"""
cluster2celltype = None# { str(x): str(x) for x in np.unique(self.segmented)}
if ctpred != None:
with open(ctpred, 'r') as fin:
cluster2celltype = {}
for line in fin:
line = line.strip().split("\t")
clusterID = line[0]
clusterType = line[1]
cluster2celltype[clusterID] = clusterType
for x in cluster2celltype:
self.logger.info("Cell-type assigned: {} -> {}".format(x, cluster2celltype[x]))
# segments images
cluster2coords = self.getCoordsForSegmented()
os.makedirs(folder, exist_ok=True)
segmentsPath = os.path.join(folder, prefix + "." + str(regionID) + ".clustering.png")
self.logger.info("Segment Image: {}".format(segmentsPath))
cmap = plt.cm.viridis
norm = plt.Normalize(vmin=self.segmented.min(), vmax=self.segmented.max())
image = cmap(norm(self.segmented))
plt.imsave(segmentsPath, image)
# segment matrix
matrixPath = os.path.abspath(os.path.join(folder, prefix + "." + str(regionID) + ".matrix.npy"))
self.logger.info("Segment Matrix: {}".format(matrixPath))
with open(matrixPath, "wb") as fout:
np.save(fout, self.segmented)
# hdf5 file with intensities
hdf5Path = os.path.abspath(os.path.join(folder, prefix + "." + str(regionID) + ".hdf5"))
with h5py.File(hdf5Path, "w") as data_file:
grp = data_file.create_group("intensities")
for mzIdx in range(0, self.region_array.shape[2]):
mzValue = self.idx2mass[mzIdx]
dset = grp.create_dataset( str(mzIdx) , data=self.region_array[:,:, mzIdx])
dset.attrs["mz"] = mzValue
data_file.close()
cluster2deData = {}
# write DE data
if protWeights != None:
self.logger.info("Starting Marker Proteins Analysis")
if not nodf:
markerGenes = self.find_all_markers(protWeights, use_methods=["ttest"], replaceExisting=False, includeBackground=True)
for cluster in cluster2coords:
outputname = prefix + "." + str(regionID) + "." + str(cluster) +".tsv"
if not nodf:
outfile = os.path.join(folder, outputname)
subdf = markerGenes["ttest"][markerGenes["ttest"]["clusterID"] == str(cluster)]
subdf.to_csv(outfile, sep="\t", index=True)
if pathPrefix != None:
outputname = os.path.join(pathPrefix, outputname)
cluster2deData[cluster] = outputname
# write info
regionInfos = {}
for cluster in cluster2coords:
clusterType = "aorta" if cluster != 0 else "background"
regionInfo = {
"type_det": [clusterType],
"coordinates": [[x[1], x[0]] for x in cluster2coords[cluster]],
}
if cluster2celltype != None:
if str(cluster) in cluster2celltype:
regionInfo["type_det"].append( cluster2celltype[str(cluster)] )
else:
self.logger.info("No cell type info for cluster: '{}'".format(cluster))
if kw2segment != None:
for kw in kw2segment:
if cluster in kw2segment[kw] or str(cluster) in kw2segment[kw]:
regionInfo["type_det"].append( kw )
if cluster in cluster2deData:
regionInfo["de_data"] = cluster2deData[cluster]
regionInfos[str(cluster)] = regionInfo
infoDict = {}
infoDict = {
"region": regionID,
"path_upgma": segmentsPath,
"info": regionInfos,
"segment_file": matrixPath,
"hdf5_file": hdf5Path
}
jsElems = json.dumps([infoDict])
# write config_file
with open(os.path.join(folder, prefix + "." + str(regionID) + ".info"), 'w') as fout:
print(jsElems, file=fout)
def judge_de_masses(self, filter_func):
"""Adds or edits the de_judge element of a differential analysis result dictionary of the given SpectraRegion object by applying the desired function.
Args:
filter_func (function): A function that is applied to every entry of a differential analysis result of every available method.
"""
for test in self.df_results_all:
for comp in self.df_results_all[test]:
testDF = self.df_results_all[test][comp]
self.logger.info("Judging results from {} and comparison {}".format(test, comp))
dfRes = [False] * len(testDF)
for index, row in testDF.iterrows():
res = filter_func(self, row)
dfRes[index] = res
if "de_judge" in testDF.columns.values.tolist():
self.logger.info("Removing existing judge in comp: {}".format(comp))
del testDF["de_judge"]
pos = testDF.columns.values.tolist().index("gene_mass")+1
testDF.insert(loc = pos,
column = 'de_judge',
value = dfRes)
self.logger.info("Storing results from {} and comparison {} (position {})".format(test, comp,pos))
self.df_results_all[test][comp] = testDF
def idx_for_mass(self, mass):
"""Returns the closest index for a specific mass.
Args:
mass (float): mass to look up index for.
Returns:
int: index in m/z array for mass (or closest mass if not exactly found).
"""
emass, eidx = self._get_exmass_for_mass(mass)
return eidx
def get_mass_from_index(self, idx):
return self.idx2mass[idx]
def _get_exmass_for_mass(self, mass, threshold=None):
"""Returns the closest mass and index in .imzML file for a specific mass.
TODO make this really performant!
Args:
mass (float): mass to look up index for.
threshold (float, optional): Maximal distance from mass to contained m/z. Defaults to None.
Returns:
float, int: mass and index of closest contained m/z for mass.
"""
dist2mass = float('inf')
curMass = -1
curIdx = -1
for xidx,x in enumerate(self.idx2mass):
dist = abs(x-mass)
if dist < dist2mass and (threshold==None or dist < threshold):
dist2mass = dist
curMass = x
curIdx = xidx
return curMass, curIdx
def _fivenumber(self, valuelist, addfuncs=None):
"""Creates five number statistics for values in valuelist.
Args:
valuelist (list/tuple/numpy.array (1D)): List of values to use for statistics.
addfuncs (list/tuple/numpy.array): A collection of functions that is applied to the valuelist. Defaults to None.
Returns:
tuple: len, len>0, min, 25-quantile, 50-quantile, 75-quantile, max, (valuelist after application of given function(s) if given)
"""
min_ = np.min(valuelist)
max_ = np.max(valuelist)
(quan25_, quan50_, quan75_) = np.quantile(valuelist, [0.25, 0.5, 0.75])
addRes = []
if addfuncs != None:
addRes = [fx(valuelist) for fx in addfuncs]
return tuple([len(valuelist), len([x for x in valuelist if x > 0]), min_, quan25_, quan50_, quan75_, max_] + addRes)
def detect_highly_variable_masses(self, topn=2000, bins=50, return_mz=False, meanThreshold=0.05):
"""Detects HV (highly variable) masses and reduces the spectra array accordingly.
Args:
topn (int, optional): Top HV indices. Defaults to 2000.
bins (int, optional): Number of bins for sorting based on average expression. Defaults to 50.
return_mz (bool, optional): Whether to return m/z values instead of ids. Defaults to False.
meanThreshold (float, optional): Threshold applied to every element in the bin. Defaults to 0.05.
Returns:
list: list of HV masses
"""
hvIndices = IMZMLExtract.detect_hv_masses(self.region, topn=topn, bins=bins, meanThreshold=meanThreshold)
if return_mz:
return [self.idx2mass[x] for x in hvIndices]
return hvIndices
def plot_intensity_distribution(self, mass):
"""Provides five number statistics, mean, variance and plots a histogram of mass intensity.
Args:
mass (float): mass to look up index for.
"""
bestExMassForMass, bestExMassIdx = self._get_exmass_for_mass(mass)
allMassIntensities = []
for i in range(self.region_array.shape[0]):
for j in range(self.region_array.shape[1]):
allMassIntensities.append(self.region_array[i,j,bestExMassIdx])
print("Five Number stats + mean, var", self._fivenumber(allMassIntensities, addfuncs=[np.mean, lambda x: np.var(x)/np.mean(x)]))
plt.hist(allMassIntensities, bins=len(allMassIntensities))
plt.title("Mass intensity histogram (m/z = {})".format(round(bestExMassForMass, 3)))
plt.show()
def mass_dotplot(self, masses, scale=True, meta_group="segmented", pw=None, title="{mz}", exprThreshold=0.2, return_df=False):
"""Filters the region_region to the given masses and returns the matrix with summed
representation of the gained spectra.
Args:
masses (array): List of masses or protein names (requires pw set).
pw (ProteinWeights, optional): Allows to translate masses names to actual masses in a given ProteinWeights object. Defaults assuming the elements in masses are numeric, hence None.
Returns:
numpy.array: Each element is a sum of intensities at given masses.
"""
if not isinstance(masses, (list, tuple, set, np.ndarray)):
masses = [masses]
useMasses = []
for x in masses:
if type(x) == str:
massprots = pw.get_masses_for_protein(x)
useMasses += list(massprots)
else:
useMasses.append( self._get_exmass_for_mass(x) )
useMasses = list(set(useMasses))
massIndices = np.array([x[1] for x in useMasses])
massValues = [x[0] for x in useMasses]
# prepare data
subsetRA = self.region_array[:,:, massIndices]
# get groups
grouping = self.meta[meta_group]
all_groups = np.unique(grouping)
plotValues = defaultdict(list)
for mi, mass in enumerate(massValues):
for grp in all_groups:
positions = np.where(grouping == grp, )
allValues = subsetRA[:,:,massIndices[mi]][positions]
massAvgExpr = np.mean(allValues)
massPercExpr = sum([1 for x in allValues if x > exprThreshold]) / len(allValues)
plotValues["mass"].append(mass)
plotValues["group"].append(grp)
plotValues["avg_expr"].append(massAvgExpr)
plotValues["perc_expr"].append(massPercExpr)
df = pd.DataFrame.from_dict(plotValues)
plotExpr = "avg_expr"
if scale:
df["avg_expr_orig"] = df["avg_expr"]
zscore = lambda x: (x - x.mean()) / x.std()
df["avg_expr_scaled"] = df.groupby(['group'])['avg_expr'].transform(zscore)
plotExpr = "avg_expr_scaled"
#print(df.sort_values("group"))
fig,_ = plt.subplots()
Plotter.plot_df_dots(fig, df, "group", "mass", plotExpr, "perc_expr", title=title.format(mz=";".join([str(round(x, 3)) if not type(x) in [str] else x for x in masses])))
plt.show()
plt.close()
if return_df:
return df
def __str__(self):
deres = ["{}: {}".format(x, ",\n ".join([str(y) for y in self.de_results_all[x]])) for x in self.de_results_all]
return """SpectraRegion Object
Grid size : {}x{}
Features : {}
Segmentation : {}
DE Results :\n{}
Test
""".format( self.region_array.shape[0],
self.region_array.shape[1],
len(self.idx2mass),
self.segmented is not None,
"\n".join(deres)
)
def __repr__(self) -> str:
return self.__str__()
def mass_heatmap(self, masses, log=False, min_cut_off=None, max_cut_off=None, plot=True, verbose=True, pw=None, title="{mz}", file=None):
"""Filters the region_region to the given masses and returns the matrix with summed
representation of the gained spectra.
Args:
masses (array): List of masses or protein names (requires pw set).
log (bool, optional): Whether to take logarithm of the output matrix. Defaults to False.
min_cut_off (int/float, optional): Lower limit of values in the output matrix. Smaller values will be replaced with min_cut_off. Defaults to None.
max_cut_off (int/float, optional): Upper limit of values in the output matrix. Greater values will be replaced with max_cut_off. Defaults to None.
plot (bool, optional): Whether to plot the output matrix. Defaults to True.
verbose (bool, optional): Whether to add information to the logger. Defaults to True.
pw (ProteinWeights, optional): Allows to translate masses names to actual masses in a given ProteinWeights object. Defaults assuming the elements in masses are numeric, hence None.
Returns:
numpy.array: Each element is a sum of intensities at given masses.
"""
if not isinstance(masses, (list, tuple, set)):
masses = [masses]
useMasses = []
for x in masses:
if type(x) == str:
massprots = pw.get_masses_for_protein(x)
useMasses += list(massprots)
else:
useMasses.append(x)
image = np.zeros((self.region_array.shape[0], self.region_array.shape[1]))
for mass in useMasses:
bestExMassForMass, bestExMassIdx = self._get_exmass_for_mass(mass)
if verbose:
self.logger.info("Processing Mass {} with best existing mass {}".format(mass, bestExMassForMass))
for i in range(self.region_array.shape[0]):
for j in range(self.region_array.shape[1]):
image[i,j] += self.region_array[i,j,bestExMassIdx]
if log:
image = np.log(image)
if min_cut_off != None:
image[image <= min_cut_off] = min_cut_off
if max_cut_off != None:
image[image >= max_cut_off] = max_cut_off
if plot:
heatmap = plt.matshow(image)
plt.colorbar(heatmap)
plt.gca().xaxis.set_ticks_position('bottom')
plt.title(title.format(mz=";".join([str(round(x, 3)) if not type(x) in [str] else x for x in masses])))
if not file is None:
plt.savefig(file, bbox_inches="tight")
plt.show()
plt.close()
return image
def calc_similarity(self, inputarray):
"""Returns cosine similarity matrix which is claculated with help of C++ libarary.
Args:
inputarray (numpy.array): Array of spectra.
Returns:
numpy.array: Pairwise similarity matrix.
"""
# load image
dims = 1
if len(inputarray.shape) > 2:
dims = inputarray.shape[2]
self.logger.info("dimensions inputarray: {}".format(dims))
qs = []
qArr = (ctypes.c_float * len(qs))(*qs)
self.logger.info("Creating C++ obj")
self.logger.info("{} {}".format(dims, inputarray.shape))
# self.obj = lib.StatisticalRegionMerging_New(dims, qArr, 3)
# print(inputarray.shape)
# testArray = np.array([[[1,2,3],[4,5,6]],[[7,8,9],[10,11,12]],[[13,14,15],[16,17,18]],[[19,20,21],[22,23,24]],[[25,26,27],[28,29,30]],[[31,32,33],[34,35,36]]], dtype=np.float32)
# print(testArray.shape)
# image_p = testArray.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
# retValues = lib.SRM_test_matrix(self.obj, testArray.shape[0], testArray.shape[1], image_p)
# exit()
self.logger.info("dimensions {}".format(dims))
self.logger.info("input dimensions {}".format(inputarray.shape))
self.obj = self.lib.StatisticalRegionMerging_New(dims, qArr, len(qs))
self.logger.info("Switching to dot mode")
self.lib.StatisticalRegionMerging_mode_dot(self.obj)
#inputarray = inputarray.astype(np.float32)
inputarray = np.ascontiguousarray(inputarray, np.float32)
image_p = inputarray.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
self.logger.info("Starting calc similarity c++")
retValues = self.lib.SRM_calc_similarity(self.obj, inputarray.shape[0], inputarray.shape[1], image_p)
outclust = np.ctypeslib.as_array(retValues, shape=(inputarray.shape[0] * inputarray.shape[1], inputarray.shape[0] * inputarray.shape[1]))
self.logger.info("outclust dimensions {}".format(outclust.shape))
return outclust
def calculate_similarity(self, mode="spectra", features=[], neighbors = 1):
"""Returns similarity matrix.
Args:
mode (str, optional): Must be "spectra", "spectra_log" or "spectra_log_dist". Defaults to "spectra".\n
- "spectra": Raw similarity matrix.\n
- "spectra_dist": Raw similarity matrix and elementwise adds the distance matrix with 5% rate to the similarity matrix..\n
- "spectra_log": Takes a logarithm and normalizes the similarity matrix by dividing by the maximum values.\n
- "spectra_log_dist": Takes a logarithm, normalizes the similarity matrix by dividing by the maximum values and elementwise adds the distance matrix with 5% rate to the similarity matrix.\n
features (list, optional): A list of desired masses. Defaults to [] meaning all masses.
neighbors (int, optional): Number of neighboring masses to each feature to be included. Defaults to 1.
Returns:
numpy.array: Spectra similarity matrix
"""
assert(mode in ["spectra", "spectra_dist", "spectra_log", "spectra_log_dist"])
if len(features) > 0:
for neighbor in range(neighbors):
features = features + [i + neighbor for i in features] + [i - neighbor for i in features]
features = np.unique(features)
featureIndex = [self._get_exmass_for_mass(x) for x in features]
featureIndex = [y for (x,y) in featureIndex if y != None]
featureIndex = sorted(np.unique(featureIndex))
regArray = np.zeros((self.region_array.shape[0], self.region_array.shape[1], len(featureIndex)))
for i in range(self.region_array.shape[0]):
for j in range(self.region_array.shape[1]):
extracted = [self.region_array[i,j,:][k] for k in tuple(featureIndex)]
regArray[i,j,:] = extracted
else:
regArray = np.array(self.region_array, copy=True)
self.spectra_similarity = self.calc_similarity(regArray)
if mode in ["spectra_log", "spectra_log_dist"]:
self.logger.info("Calculating spectra similarity")
self.spectra_similarity = np.log(self.spectra_similarity + 1)
self.spectra_similarity = self.spectra_similarity / np.max(self.spectra_similarity)
self.logger.info("Calculating spectra similarity done")
if mode in ["spectra_log_dist", "spectra_dist"]:
if self.dist_pixel == None or self.dist_pixel.shape != self.spectra_similarity.shape:
self.dist_pixel = np.zeros((self.spectra_similarity.shape[0], self.spectra_similarity.shape[1]))
self.logger.info("Calculating dist pixel map")
for x in range(0, self.spectra_similarity.shape[0]):
coordIx, coordIy = self.idx2pixel[x]# divmod(x, self.region_array.shape[1])
for y in range(0, self.spectra_similarity.shape[1]):
coordJx, coordJy = self.idx2pixel[y] # divmod(x, self.region_array.shape[1])
self.dist_pixel[x,y] = np.linalg.norm((coordIx-coordJx, coordIy-coordJy))
self.dist_pixel = self.dist_pixel / np.max(self.dist_pixel)
self.logger.info("Calculating dist pixel map done")
self.spectra_similarity = 0.95 * self.spectra_similarity + 0.05 * self.dist_pixel
return self.spectra_similarity
def __segment__upgma(self, number_of_regions):
"""Forms flat clusters with UPGMA clustering method (see scipy.cluster.hierarchy.linkage method='average' for more information) on the similarity matrix.
Args:
number_of_regions (int): Number of desired clusters.
Returns:
numpy.ndarray: An array where each element is the flat cluster number to which original observation belongs.
"""
ssim = 1-self.spectra_similarity
ssim[range(ssim.shape[0]), range(ssim.shape[1])] = 0
Z = spc.hierarchy.linkage(squareform(ssim), method='average', metric='cosine')
c = spc.hierarchy.fcluster(Z, t=number_of_regions, criterion='maxclust')
return c
def __segment__centroid(self, number_of_regions):
"""Forms flat clusters with centroid clustering method (see scipy.cluster.hierarchy.linkage for more information to the method) on the similarity matrix.
Args:
number_of_regions (int): Number of desired clusters.
Returns:
numpy.ndarray: An array where each element is the flat cluster number to which original observation belongs.
"""
ssim = 1-self.spectra_similarity
ssim[range(ssim.shape[0]), range(ssim.shape[1])] = 0
Z = spc.hierarchy.linkage(squareform(ssim), method='centroid', metric='cosine')
c = spc.hierarchy.fcluster(Z, t=number_of_regions, criterion='maxclust')
return c
def __segment__median(self, number_of_regions):
"""Forms flat clusters with median clustering method (see scipy.cluster.hierarchy.linkage for more information to the method) on the similarity matrix.
Args:
number_of_regions (int): Number of desired clusters.
Returns:
numpy.ndarray: An array where each element is the flat cluster number to which original observation belongs.
"""
ssim = 1-self.spectra_similarity
ssim[range(ssim.shape[0]), range(ssim.shape[1])] = 0
Z = spc.hierarchy.linkage(squareform(ssim), method='median', metric='cosine')
c = spc.hierarchy.fcluster(Z, t=number_of_regions, criterion='maxclust')
return c
def __segment__wpgma(self, number_of_regions):
"""Performs WPGMA linkage (see scipy.cluster.hierarchy.weighted for more information to the method) on the similarity matrix.
Args:
number_of_regions (int): Number of desired clusters.
Returns:
numpy.ndarray: An array where each element is the flat cluster number to which original observation belongs.
"""
ssim = 1-self.spectra_similarity
ssim[range(ssim.shape[0]), range(ssim.shape[1])] = 0
Z = spc.hierarchy.weighted(squareform(ssim))
c = spc.hierarchy.fcluster(Z, t=number_of_regions, criterion='maxclust')
return c
def __segment__ward(self, number_of_regions):
"""Performs Ward’s linkage (see scipy.cluster.hierarchy.ward for more information to the method) on the similarity matrix.
Args:
number_of_regions (int): Number of desired clusters.
Returns:
numpy.ndarray: An array where each element is the flat cluster number to which original observation belongs.
"""
ssim = 1-self.spectra_similarity
ssim[range(ssim.shape[0]), range(ssim.shape[1])] = 0
Z = spc.hierarchy.ward(squareform(ssim))
c = spc.hierarchy.fcluster(Z, t=number_of_regions, criterion='maxclust')
return c
def prepare_elem_matrix(self, sparse=False, dims=None):
"""Updates Embedding of the elem_matrix in low-dimensional space (.dimred_elem_matrix) and returns a list of spectra with positional id correspond to the pixel number (.elem_matrix) and a mapping of an id to respective coordinate in .elem_matrix
Args:
sparse (bool, optional): Whether to use compressed sparse row matrix by scipy.sparse.csr_matrix for matrices intialisation. Defaults to False.
dims (int/list, optional): The desired amount of intensity values that will be taken into account performing dimension reduction. Defaults to None, meaning all intensities are considered.
Returns:
array, dict: A list of spectra with positional id correspond to the pixel number. Shape (n_samples, n_features), mapping of an id to respective coordinate tuple
"""
ndims = self.region_array.shape[2]
if not dims is None:
if type(dims) == int:
ndims = dims
else:
ndims = len(dims)
if not sparse:
self.dimred_elem_matrix = np.zeros((self.region_array.shape[0]*self.region_array.shape[1], self.region_array.shape[2]))
elem_matrix = np.zeros((self.region_array.shape[0]*self.region_array.shape[1], ndims))
else:
self.dimred_elem_matrix = csr_matrix((self.region_array.shape[0]*self.region_array.shape[1], self.region_array.shape[2]))
elem_matrix = csr_matrix((self.region_array.shape[0]*self.region_array.shape[1], ndims))
print("Elem Matrix", elem_matrix.shape)
"""
----------> spectra ids
|
|
| m/z values
|
v
"""
idx2ij = {}
for i in range(0, self.region_array.shape[0]):
for j in range(0, self.region_array.shape[1]):
idx = i * self.region_array.shape[1] + j
if not dims is None:
elem_matrix[idx, :] = self.region_array[i,j,dims]
else:
elem_matrix[idx, :] = self.region_array[i,j,:]
idx2ij[idx] = (i,j)
return elem_matrix, idx2ij
def __segment__umap_ward(self, number_of_regions, densmap=False, dims=None, n_neighbors=10):
"""Performs UMAP dimension reduction on region array followed by Euclidean pairwise distance calculation in order to do Ward's linkage.
Args:
number_of_regions (int): Number of desired clusters.
densmap (bool, optional): Whether to use densMAP (density-preserving visualization tool based on UMAP). Defaults to False.
dims (int/list, optional): The desired amount of intensity values that will be taken into account performing dimension reduction. Defaults to None, meaning all intensities are considered.
n_neighbors (int, optional): The size of the local neighborhood (in terms of number of neighboring sample points) used for manifold approximation. For more information check UMAP documentation. Defaults to 10.
Returns:
numpy.ndarray: An array where each element is the flat cluster number to which original observation belongs.
"""
self.elem_matrix, idx2ij = self.prepare_elem_matrix(dims)
self.logger.info("UMAP reduction")
self.dimred_elem_matrix = umap.UMAP(
densmap=densmap,
n_neighbors=n_neighbors,
min_dist=0.0,
n_components=2,
random_state=42,
).fit_transform(self.elem_matrix)
self.logger.info("Ward reduction"),
print(self.dimred_elem_matrix.shape)
pwdist = pdist(self.dimred_elem_matrix, metric="euclidean")
print(pwdist.shape)
Z = spc.hierarchy.ward(pwdist)
self.dimred_labels = spc.hierarchy.fcluster(Z, t=number_of_regions, criterion='maxclust')
return self.dimred_labels
def __segment__umap_hdbscan(self, number_of_regions, densmap=False, dims=None, n_neighbors=10, min_cluster_size=20, num_samples=10000):
"""Performs UMAP dimension reduction on region array followed by the HDBSCAN clustering.
Args:
number_of_regions (int): Number of desired clusters.
densmap (bool, optional): Whether to use densMAP (density-preserving visualization tool based on UMAP). Defaults to False.
dims (int/list, optional): The desired amount of intensity values that will be taken into account performing dimension reduction. Defaults to None, meaning all intensities are considered.
n_neighbors (int, optional): The size of the local neighborhood (in terms of number of neighboring sample points) used for manifold approximation. For more information check UMAP documentation. Defaults to 10.
min_cluster_size (int, optional): The minimum size of HDBSCAN clusters. Defaults to 20.
num_samples (int, optional): Number of intensity values that will be used during HDBSCAN clustering. Defaults to 10000.
Returns:
list: A list of HDBSCAN labels.
"""
self.elem_matrix, idx2ij = self.prepare_elem_matrix(dims)
self.logger.info("UMAP reduction")
self.dimred_elem_matrix = umap.UMAP(
densmap=densmap,
n_neighbors=n_neighbors,
min_dist=0.0,
n_components=2,
random_state=42,
).fit_transform(self.elem_matrix)
self.redo_hdbscan_on_dimred(number_of_regions, min_cluster_size, num_samples)
return self.dimred_labels
def __segment__kmeans(self, number_of_regions):
"""Forms flat clusters with k-means++ clustering method (see scipy.cluster.vq.kmeans2 for more information) on the spectra array.
Args:
number_of_regions (int): Number of desired clusters.
Returns:
numpy.ndarray: An array where each element is the flat cluster number to which original observation belongs.
"""
all_spectra = self.region_array.reshape(-1, self.region_array.shape[2])
centroid, label = kmeans2(all_spectra, k=number_of_regions, iter=10, minit='++')
if 0 in label:
label += 1
return label
def redo_hdbscan_on_dimred(self, number_of_regions, min_cluster_size=15, num_samples=10000, set_segmented=True):
"""Performs HDBSCAN clustering (Hierarchical Density-Based Spatial Clustering of Applications with Noise) with the additional UMAP dimension reduction in order to achieve the desired number of clusters.
Args:
number_of_regions (int): Number of desired clusters.
min_cluster_size (int, optional): The minimum size of HDBSCAN clusters. Defaults to 15.
num_samples (int, optional): Number of intensity values that will be used during HDBSCAN clustering. Defaults to 10000.
set_segmented (bool, optional): Whether to update the segmented array of the current object. Defaults to True.
"""
self.logger.info("HDBSCAN reduction")
if num_samples > self.dimred_elem_matrix.shape[0]:
num_samples = self.dimred_elem_matrix.shape[0]
self.logger.info("HDBSCAN reduction num_samples reset: {}".format(num_samples))
if num_samples == -1 or self.dimred_elem_matrix.shape[0] < num_samples:
selIndices = [x for x in range(0, self.dimred_elem_matrix.shape[0])]
else:
selIndices = random.sample([x for x in range(0, self.dimred_elem_matrix.shape[0])], num_samples)
dr_matrix = self.dimred_elem_matrix[selIndices, :]
self.logger.info("HDBSCAN Clusterer with matrix {}".format(dr_matrix.shape))
clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, prediction_data=True).fit(dr_matrix)
clusterer.generate_prediction_data()
self.logger.info("HDBSCAN Soft Clusters with matrix {}".format(self.dimred_elem_matrix.shape))
soft_clusters = hdbscan.prediction.membership_vector(clusterer, self.dimred_elem_matrix)
self.logger.info("HDBSCAN Soft Clusters as output matrix {}".format(soft_clusters.shape))
self.logger.info("HDBSCAN Soft Clusters: {}".format(soft_clusters.shape))
print(soft_clusters)
self.logger.info("HDBSCAN Labeling")
self.dimred_labels = np.array([np.argmax(x) for x in soft_clusters])+1 # +1 avoids 0
if len(np.unique(self.dimred_labels)) > number_of_regions:
self.logger.info("Cluster Reduction for UMAP Result")
self.segmented = np.reshape(self.dimred_labels, (self.region_array.shape[0], self.region_array.shape[1]))
self.reduce_clusters(number_of_regions)
self.dimred_labels = np.reshape(self.segmented, (self.region_array.shape[0] * self.region_array.shape[1],))
self.dimred_labels = list(self.dimred_labels)
if set_segmented:
image_UPGMA = np.zeros(self.region_array.shape, dtype=np.int16)
image_UPGMA = image_UPGMA[:,:,0]
# cluster 0 has special meaning: not assigned !
assert(not 0 in [self.dimred_labels[x] for x in self.dimred_labels])
for i in range(0, image_UPGMA.shape[0]):
for j in range(0, image_UPGMA.shape[1]):
image_UPGMA[i,j] = self.dimred_labels[self.pixel2idx[(i,j)]]
self.segmented = image_UPGMA
def reduce_clusters(self, number_of_clusters):
"""Reducing the number of clusters in segmented array by "reclustering" after the Ward's clustering on pairwise similarity matrix between consensus spectra.
Args:
number_of_clusters (int): Number of desired clusters.
"""
self.logger.info("Cluster Reduction")
_ = self.consensus_spectra()
self.consensus_similarity()
Z = spc.hierarchy.ward(self.consensus_similarity_matrix)
c = spc.hierarchy.fcluster(Z, t=number_of_clusters, criterion='maxclust')
dimred_labels = np.reshape(self.segmented, (self.region_array.shape[0] * self.region_array.shape[1],))
origlabels = np.array(dimred_labels, copy=True)
for cidx, cval in enumerate(c):
dimred_labels[origlabels == (cidx+1)] = cval
self.segmented = np.reshape(dimred_labels, (self.region_array.shape[0], self.region_array.shape[1]))
self.consensus = None
self.consensus_similarity_matrix = None
def vis_umap(self, legend=True, marker_size=(2.0, 10.0)):
"""Visualises a scatterplot of the UMAP/densMAP assigned pixels.
Args:
legend (bool, optional): Whether to include the legend to the plot. Defaults to True.
marker_size (tuple, optional): Tuple of preferred marker sizes for unassigned marker_size[0] and lable specific points marker_size[1]. Defaults to (2.0, 10.0).
"""
assert(not self.dimred_elem_matrix is None)
assert(not self.dimred_labels is None)
nplabels = np.array(self.dimred_labels)
plt.figure()
clustered = (nplabels >= 0)
self.logger.info("Pixels : {}".format(self.dimred_elem_matrix.shape[0]))
self.logger.info("Unassigned: {}".format(self.dimred_elem_matrix[~clustered, ].shape[0]))
plt.scatter(self.dimred_elem_matrix[~clustered, 0],
self.dimred_elem_matrix[~clustered, 1],
color=(1, 0,0),
label="Unassigned",
s=marker_size[0])
uniqueClusters = sorted(set([x for x in nplabels if x >= 0]))
for cidx, clusterID in enumerate(uniqueClusters):
cmap=plt.cm.get_cmap('viridis', len(uniqueClusters))
clusterColor = cmap(cidx / len(uniqueClusters))
plt.scatter(self.dimred_elem_matrix[nplabels == clusterID, 0],
self.dimred_elem_matrix[nplabels == clusterID, 1],
color=clusterColor,
label=str(clusterID),
s=marker_size[1])
if legend:
plt.legend(loc="upper left", bbox_to_anchor=(1.05, 1))
plt.show()
plt.close()
def plot_tic(self, min_cut_off=None, max_cut_off=None, masses=None, hist=False, plot_log=False):
"""Displays a matrix where each pixel is the sum of intensity values over all m/z summed in the corresponding pixel in region_array.
Args:
min_cut_off (int/float, optional): Minimum allowed value. Smaller values will be replaced with min_cut_off value. Defaults to None.
max_cut_off (int/float, optional): Maximum allowed value. Greater values will be replaced with max_cut_off value. Defaults to None.
masses (numpy.array/list, optional): A list of masses to which each spectrum will be reduced. Defaults to None, meaning all masses are considered.
hist (bool, optional): Whether to plot a cumularive histogram of values (sums) frequencies. Defaults to False.
plot_log (bool, optional): Whether to logarithm the resulting matrix. Defaults to False.
Returns:
numpy.array: A matrix with summed intensities of each spectrum.
"""
assert(not self.region_array is None)
showcopy = np.zeros((self.region_array.shape[0], self.region_array.shape[1]))
massIndices = [x for x in range(self.region_array.shape[2])]
if masses != None:
massIndices = []
for mass in masses:
mx, idx = self._get_exmass_for_mass(mass)
massIndices.append(idx)
massIndices = sorted(massIndices)
allCounts = []
for i in range(0, showcopy.shape[0]):
for j in range(0, showcopy.shape[1]):
pixelcount = np.sum(self.region_array[i,j, massIndices])
showcopy[i,j] = pixelcount
allCounts.append(pixelcount)
if min_cut_off != None:
showcopy[showcopy <= min_cut_off] = min_cut_off
if max_cut_off != None:
showcopy[showcopy >= max_cut_off] = max_cut_off
if plot_log:
showcopy = np.log(showcopy)
fig, _ = plt.subplots()
Plotter.plot_array_scatter(fig, showcopy, discrete_legend=False)
plt.show()
plt.close()
if hist:
fig = plt.figure()
plt.hist(allCounts, bins=len(allCounts), cumulative=True, histtype="step")
plt.show()
plt.close()
return showcopy
def plot_volcano(self, method, comparison, title, outfile=None, topn=30, masses=None, gene_names=None, only_selected=False):
"""Plots a volcano plot representing the differential analysis results of the current object.
Args:
method (str): Test method for differential expression analysis. “empire”, “ttest” or “rank”.
comparison (tuple): A tuple of two tuples each consisting of cluster ids compared.
title ((str): Title of the resulting plot.
outfile (str, optional): The path where to save the resulting plot. Defaults to None.
topn (int, optional): Number of the most significantly up/dowm regulated genes. Defaults to 30.
masses (list, optional): A collection of floats that represent the desired masses to be labled. Defaults to None.
gene_names (list, optional): A collection of strings that represent the desired gene names to be labled. Defaults to None.
only_selected (bool, optional): Whether to plot all results and highlight the selected masses/genes (=False) or plot only selectred masses/genes (=True). Defaults to False.
"""
dataframe = pd.merge(self.df_results_all[method][comparison], self.de_results_all[method][comparison], left_on=['gene_ident'],right_on=['gene'])
genes = ['{:.4f}'.format(x) for x in list(dataframe['gene_mass'])]
if masses:
if only_selected:
dataframe = dataframe.loc[dataframe['gene_mass'].isin(masses)]
genes = ['{:.4f}'.format(x) for x in list(dataframe['gene_mass'])]
if gene_names:
if only_selected:
dataframe = dataframe.loc[dataframe['gene_x'].isin(gene_names)]
genes = list(dataframe['gene_x'])
fc = list(dataframe['log2fc'])
pval = list(dataframe['pval'])
FcPvalGene = [(fc[i], pval[i], genes[i]) for i in range(len(genes))]
if topn>0:
SpectraRegion._plot_volcano(FcPvalGene, title, outfile, showGeneCount=topn, showGene=gene_names)
else:
SpectraRegion._plot_volcano(FcPvalGene, title, outfile, showGeneCount=len(genes), showGene=gene_names)
def _plot_volcano(FcPvalGene, title, outfile=None, showGeneCount=30, showGene=None):
"""Fucntion that performs plotting of the volcano plot for plot_volcano() function.
Args:
FcPvalGene (list): List of tuples (fold change, p-value, identification mass or name)
title (str): Title of the plot.
outfile (str, optional): The path where to save the resulting plot. Defaults to None.
showGeneCount (int, optional): Number of the most significantly up/dowm regulated genes. Defaults to 30.
showGene (list, optional): A collection of strings that represent the desired gene names to be labled. Defaults to None.
"""
color1 = "#883656" #"#BA507A"
color1_nosig = "#BA507A"
color1_nosig_less = "#d087a4"
color2 = "#4d6841"
color2_nosig = "#70975E"
color2_nosig_less = "#99b78b"
colors = {"down": (color1, color1_nosig, color1_nosig_less), "up": (color2, color2_nosig,color2_nosig_less)}
with plt.style.context("default"):
plt.figure(figsize=(16,10))
FcPvalGene = sorted(FcPvalGene, key=lambda x: x[1])
xydots = [(x[0], -np.log10(x[1])) for x in FcPvalGene]
maxally = max([x[1] for x in xydots if not np.isinf(x[1])])
xydots = [(x, y if y <= maxally else maxally) for x,y in xydots]
dotgene = [x[2] for x in FcPvalGene]
pvalThresh = -np.log10(0.05)
showGeneCount_pos = showGeneCount
showGeneCount_neg = showGeneCount
showGenes = []
for x in FcPvalGene:
gene = x[2]
geneFC = x[0]
if showGene:
if gene in showGene and showGeneCount_neg > 0:
showGenes.append(gene)
showGeneCount_neg -= 1
if gene in showGene and showGeneCount_pos > 0:
showGenes.append(gene)
showGeneCount_pos -= 1
else:
if geneFC < 0 and showGeneCount_neg > 0:
showGenes.append(gene)
showGeneCount_neg -= 1
if geneFC > 0 and showGeneCount_pos > 0:
showGenes.append(gene)
showGeneCount_pos -= 1
texts = []
sel_down_xy = []
nosig_down_xy = []
nosigless_down_xy = []
sel_up_xy = []
nosig_up_xy = []
nosigless_up_xy = []
upregCount = 0
downregCount = 0
upregSigCount = 0
downregSigCount = 0
unregCount = 0
for gi, (x,y) in enumerate(xydots):
if x < 0:
if y < pvalThresh or abs(x) < 1:
downregCount += 1
else:
downregSigCount += 1
elif x > 0:
if y < pvalThresh or abs(x) < 1:
upregCount += 1
else:
upregSigCount += 1
elif x == 0:
unregCount += 1
if dotgene[gi] in showGenes:
if x < 0:
sel_down_xy.append((x,y))
else:
sel_up_xy.append((x,y))
texts.append(plt.text(x * (1 + 0.01), y * (1 + 0.01) , dotgene[gi], fontsize=12, bbox=dict(boxstyle='round', facecolor='white', alpha=0.7)))
else:
if x < 0:
if y < pvalThresh or abs(x) < 1:
nosigless_down_xy.append((x,y))
else:
nosig_down_xy.append((x,y))
else:
if y < pvalThresh or abs(x) < 1:
nosigless_up_xy.append((x,y))
else:
nosig_up_xy.append((x,y))
#print(len(sel_xy), "of", len(genes))
ymax = max([y for x,y in xydots])
xmin = min([x for x,y in xydots])
xmax = max([x for x,y in xydots])
plt.plot([x[0] for x in nosigless_up_xy], [x[1] for x in nosigless_up_xy], '.', color=colors["up"][2])
plt.plot([x[0] for x in nosigless_down_xy], [x[1] for x in nosigless_down_xy], '.', color=colors["down"][2])
plt.plot([x[0] for x in nosig_up_xy], [x[1] for x in nosig_up_xy], 'o', color=colors["up"][1])
plt.plot([x[0] for x in nosig_down_xy], [x[1] for x in nosig_down_xy], 'o', color=colors["down"][1])
plt.plot([x[0] for x in sel_up_xy], [x[1] for x in sel_up_xy], 'o', color=colors["up"][0])
plt.plot([x[0] for x in sel_down_xy], [x[1] for x in sel_down_xy], 'o', color=colors["down"][0])
if plt.xlim()[0]<-0.5:
plt.hlines(y=pvalThresh, xmin=plt.xlim()[0], xmax=-0.5, linestyle="dotted")
if plt.xlim()[1]>0.5:
plt.hlines(y=pvalThresh, xmin=0.5, xmax=plt.xlim()[1], linestyle="dotted")
yMaxLim = plt.ylim()[1]
plt.vlines(x=0.5, ymin=pvalThresh, ymax=yMaxLim, linestyle="dotted")
plt.vlines(x=-0.5, ymin=pvalThresh, ymax=yMaxLim, linestyle="dotted")
adjust_text(texts, force_points=0.2, force_text=0.2, expand_points=(2, 2), expand_text=(1, 1), arrowprops=dict(arrowstyle="-", color='black', lw=0.5))
# texts.append(plt.text(x * (1 + 0.01), y * (1 + 0.01) , dotgene[gi], fontsize=12))
plt.title(title, fontsize = 40)
plt.xlabel("logFC", fontsize = 32)
plt.ylabel("Neg. Log. Adj. P-Value", fontsize = 32)
plt.xticks(fontsize=14)
infoText = "Total Genes: {}; Up-Reg. (sig.): {} ({}); Down-Reg. (sig.): {} ({}); Un-Reg.: {}".format(
upregCount+downregCount+upregSigCount+downregSigCount+unregCount,
upregCount, upregSigCount,
downregCount, downregSigCount,
unregCount
)
plt.figtext(0.5, 0.01, infoText, wrap=True, horizontalalignment='center', fontsize=14)
if outfile:
print(outfile)
plt.savefig(outfile, bbox_inches="tight")
def set_null_spectra(self, condition):
"""Goes thought the region array and sets the intensity values to zero if the condition is met.
Args:
condition (function): Condition of canceling out an intensity value.
"""
#bar = progressbar.Bar()
for i in range(0, self.region_array.shape[0]):#bar(range(0, self.region_array.shape[0])):
for j in range(0, self.region_array.shape[1]):
if condition(self.region_array[i,j, :]):
self.region_array[i,j,:] = 0
def plot_segments(self, coloring="segmented", highlight=None, file=None):
"""Plots the segmented array of the current SpectraRegion object.
Args:
highlight (list/tuple/set/int, optional): If the highlight clusters are specified, those will be assigned a cluster id 2. Otherwise 1. Background stays 0. Defaults to None.
"""
assert(coloring in self.meta and not self.meta[coloring] is None)
showcopy = np.copy(self.meta[coloring])
if highlight != None:
if not isinstance(highlight, (list, tuple, set)):
highlight = [highlight]
for i in range(0, showcopy.shape[0]):
for j in range(0, showcopy.shape[1]):
if showcopy[i,j] != 0:
if showcopy[i,j] in highlight:
showcopy[i,j] = 2
elif showcopy[i,j] != 0:
showcopy[i,j] = 1
fig, _ = plt.subplots()
Plotter.plot_array_scatter(fig, showcopy, discrete_legend=True)
if not highlight is None and len(highlight) > 0:
plt.title("Highlighted (yellow) clusters: {}".format(", ".join([str(x) for x in highlight])), y=1.08)
if not file is None:
plt.savefig(file, bbox_inches="tight")
plt.show()
plt.close()
def list_segment_counts(self):
"""Prints the size of each cluster in segmenetd array.
"""
regionCounts = Counter()
for i in range(0, self.segmented.shape[0]):
for j in range(0, self.segmented.shape[1]):
regionCounts[ self.segmented[i,j] ] += 1
for region in natsorted([x for x in regionCounts]):
print(region, ": ", regionCounts[region])
def segment_clusterer(self, clusterer:RegionClusterer, verbose=False):
segmentation = clusterer.segmentation()
# segmentation has same dimensions as original array
assert (segmentation.shape == (self.region_array.shape[0], self.region_array.shape[1]))
self.segmented = segmentation
def segment(self, method="UPGMA", dims=None, number_of_regions=10, n_neighbors=10, min_cluster_size=20, num_samples=1000):
"""Performs clustering on similarity matrix.
Args:
method (str, optional): Clustering method: "UPGMA", "WPGMA", "WARD", "KMEANS", "UMAP_DBSCAN", "CENTROID", "MEDIAN", "UMAP_WARD", "DENSMAP_WARD" or "DENSMAP_DBSCAN". Defaults to "UPGMA".\n
- "UPGMA": Unweighted pair group method with arithmetic mean.\n
- "WPGMA": Weighted pair group method with arithmetic mean.\n
- "WARD": Ward variance minimization algorithm.\n
- "KMEANS": k-means++ clustering.\n
- "UMAP_DBSCAN": Uniform Manifold Approximation and Projection for Dimension Reduction (UMAP) followed by Density-Based Spatial Clustering of Applications with Noise (DBSCAN).\n
- "DENSMAP_DBSCAN": densMAP performs an optimization of the low dimensional representation followed by Density-Based Spatial Clustering of Applications with Noise (DBSCAN).\n
- "CENTROID": Unweighted pair group method with centroids (UPGMC).\n
- "MEDIAN": Weighted pair group method with centroids (WPGMC).\n
- "UMAP_WARD": Uniform Manifold Approximation and Projection for Dimension Reduction (UMAP) followed by Ward variance minimization algorithm (WARD).\n
- "DENSMAP_WARD": densMAP performs an optimization of the low dimensional representation followed by Ward variance minimization algorithm (WARD).\n
dims ([type], optional): The desired amount of intesity values that will be taken into account performing dimension reduction. Defaults to None, meaning all intesities are considered.
number_of_regions (int, optional): Number of desired clusters. Defaults to 10.
n_neighbors (int, optional): The size of the local neighborhood (in terms of number of neighboring sample points) used for manifold approximation. For more information check UMAP documentation. Defaults to 10.
min_cluster_size (int, optional): The minimum size of HDBSCAN clusters. Defaults to 20.
num_samples (int, optional): Number of intensity values that will be used during HDBSCAN clustering. Defaults to 1000.
Returns:
numpy.array: An array with cluster ids as elements.
"""
assert(method in ["UPGMA", "WPGMA", "WARD", "KMEANS", "UMAP_DBSCAN", "CENTROID", "MEDIAN", "UMAP_WARD", "DENSMAP_DBSCAN", "DENSMAP_WARD"])
if method in ["UPGMA", "WPGMA", "WARD", "CENTROID", "MEDIAN"]:
assert(not self.spectra_similarity is None)
self.logger.info("Calculating clusters")
c = None
if method == "UPGMA":
c = self.__segment__upgma(number_of_regions)
elif method == "WPGMA":
c = self.__segment__wpgma(number_of_regions)
elif method == "CENTROID":
c = self.__segment__centroid(number_of_regions)
elif method == "MEDIAN":
c = self.__segment__median(number_of_regions)
elif method == "WARD":
c = self.__segment__ward(number_of_regions)
elif method == "UMAP_DBSCAN":
c = self.__segment__umap_hdbscan(number_of_regions, dims=dims, n_neighbors=n_neighbors, min_cluster_size=min_cluster_size, num_samples=num_samples)
elif method == "UMAP_WARD":
c = self.__segment__umap_ward(number_of_regions, dims=dims, n_neighbors=n_neighbors)
elif method == "DENSMAP_DBSCAN":
c = self.__segment__umap_hdbscan(number_of_regions, densmap=True, dims=dims, n_neighbors=n_neighbors)
elif method == "DENSMAP_WARD":
c = self.__segment__umap_ward(number_of_regions, densmap=True, dims=dims, n_neighbors=n_neighbors)
elif method == "KMEANS":
c = self.__segment__kmeans(number_of_regions)
self.logger.info("Calculating clusters done")
image_UPGMA = np.zeros(self.region_array.shape, dtype=np.int16)
image_UPGMA = image_UPGMA[:,:,0]
# cluster 0 has special meaning: not assigned !
assert(not 0 in [c[x] for x in c])
for i in range(0, image_UPGMA.shape[0]):
for j in range(0, image_UPGMA.shape[1]):
image_UPGMA[i,j] = c[self.pixel2idx[(i,j)]]
self.segmented = image_UPGMA
self.logger.info("Calculating clusters saved")
return self.segmented
def manual_segmentation(self, image_path):
"""Plots the labeled array according to the given segmentation image.
Args:
image_path (str/numpy.array): Either path to the image file or the numpy array of the image.
"""
if type(image_path) in [np.array]:
self.logger.info("Received Image as Matrix")
img = image_path
else:
img = skimage.io.imread(image_path)
labeledArr, num_ids = ndimage.label(img, structure=np.ones((3,3)))
plt.matshow(labeledArr)
plt.show()
def set_background(self, clusterIDs):
"""Sets all given cluster ids to 0, meaning the background.
Args:
clusterIDs (tuple/list/set/int): Cluster id(s) that form the background (cluster id 0).
"""
if not type(clusterIDs) in [tuple, list, set]:
clusterIDs = [clusterIDs]
for clusterID in clusterIDs:
self.segmented[ self.segmented == clusterID ] = 0
def filter_clusters(self, method='remove_singleton', bg_x=4, bg_y=4, minIslandSize=10):
"""Filters the segmented array.
Args:
method (str, optional): Possible methods: "remove_singleton", "most_similar_singleton", "merge_background", "remove_islands", "gauss". Defaults to 'remove_singleton'.\n
- "remove_singleton": If there are clusters that include only one pixel, they will be made a part of the background.\n
- "most_similar_singleton": If there are clusters that include only one pixel, they will be compared to consensus spectra of all cluster and then added to the cluster with the lowest distance.\n
- "merge_background": Collects cluster ids at the borders and assigns all findings with background id 0.\n
- "remove_islands": Removes all pixel groups that include less then minimum allowed elements.\n
- "gauss": In case there is only two distinguishable cluster id in 3x3 area around the cluster will be assigned the most frequent cluster id of those two.\n
bg_x (int, optional): The x border limits whithin the clusters whould be assigned to background. Defaults to 4.
bg_y (int, optional): The y border limits whithin the clusters whould be assigned to background. Defaults to 4.
minIslandSize (int, optional): How many pixels an island is allowed to have. Defaults to 10.
Returns:
numpy.array: Array with reduced number of cluster ids.
"""
assert(method in ["remove_singleton", "most_similar_singleton", "merge_background", "remove_islands", "gauss"])
cluster2coords = self.getCoordsForSegmented()
if method == "gauss":
result = np.zeros(self.segmented.shape)
for i in range(result.shape[0]):
for j in range(result.shape[1]):
neighbours = list()
if i-1>=0:
neighbours.append(self.segmented[i-1][j])
if j-1>=0:
neighbours.append(self.segmented[i][j-1])
if i-1>=0 and j-1>=0:
neighbours.append(self.segmented[i-1][j-1])
if i+1<result.shape[0]:
neighbours.append(self.segmented[i+1][j])
if j+1<result.shape[1]:
neighbours.append(self.segmented[i][j+1])
if i+1<result.shape[0] and j+1<result.shape[1]:
neighbours.append(self.segmented[i+1][j+1])
d = {x:neighbours.count(x) for x in neighbours}
key, freq = d.keys(), d.values()
keys = np.asarray(list(key))
freqs = np.asarray(list(freq))
if len(np.unique(keys))<=2:
result[i,j] = keys[np.argmax(freqs)]
else:
result[i,j] = self.segmented[i,j]
self.segmented = result
elif method == "remove_islands":
exarray = self.segmented.copy()
exarray[exarray >= 1] = 1
labeledArr, num_ids = ndimage.label(exarray, structure=np.ones((3,3)))
for i in range(0, num_ids+1):
labelCells = np.count_nonzero(labeledArr == i)
if labelCells <= minIslandSize:
self.segmented[labeledArr == i] = 0
elif method == "remove_singleton":
for clusterID in cluster2coords:
if clusterID == 0:
continue # unassigned cluster - ignore it
clusCoords = cluster2coords[clusterID]
if len(clusCoords) == 1:
self.segmented[self.segmented == clusterID] = 0
elif method == "most_similar_singleton":
assert(self.consensus != None)
for clusterID in cluster2coords:
if clusterID == 0:
continue # unassigned cluster - ignore it
clusCoords = cluster2coords[clusterID]
if len(clusCoords) == 1:
cons2sim = {}
for cid in self.consensus:
sim = self.__calc_direct_similarity(self.region_array[clusCoords[0]], self.consensus[cid])
cons2sim[cid] = sim
mostSimClus = sorted([(x, cons2sim[x]) for x in cons2sim], key=lambda x: x[1], reverse=True)[0][0]
self.segmented[self.segmented == clusterID] = mostSimClus
elif method == "merge_background":
# which clusters are in 3x3 border boxes and not in 10x10 middle box?
borderSegments = set()
xdim = bg_x
ydim = bg_y
for i in range(0, min(xdim, self.segmented.shape[0])):
for j in range(0, min(ydim, self.segmented.shape[1])):
clusterID = self.segmented[i, j]
borderSegments.add(clusterID)
for i in range(max(0, self.segmented.shape[0]-xdim), self.segmented.shape[0]):
for j in range(max(0, self.segmented.shape[1]-ydim), self.segmented.shape[1]):
clusterID = self.segmented[i, j]
borderSegments.add(clusterID)
for i in range(max(0, self.segmented.shape[0]-xdim), self.segmented.shape[0]):
for j in range(0, min(ydim, self.segmented.shape[1])):
clusterID = self.segmented[i, j]
borderSegments.add(clusterID)
for i in range(0, min(xdim, self.segmented.shape[0])):
for j in range(max(0, self.segmented.shape[1]-ydim), self.segmented.shape[1]):
clusterID = self.segmented[i, j]
borderSegments.add(clusterID)
self.logger.info("Assigning clusters to background: {}".format(borderSegments))
for x in borderSegments:
self.segmented[self.segmented == x] = 0
return self.segmented
def __cons_spectra__avg(self, cluster2coords, array):
"""Constructs an average spectrum for each cluster id.
Args:
cluster2coords (dict): A dictionary of cluster ids mapped to the corresponding coordinates.
array (numpy.array): Array of spectra.
Returns:
dict: A dictionary with cluster ids mapped to the respective average spectrum.
"""
if array is None:
array = self.region_array
cons_spectra = {}
for clusID in cluster2coords:
spectraCoords = cluster2coords[clusID]
if len(spectraCoords) == 1:
coord = spectraCoords[0]
# get spectrum, return spectrum
avgSpectrum = array[coord[0], coord[1]]
else:
avgSpectrum = np.zeros((1, array.shape[2]))
for coord in spectraCoords:
avgSpectrum += array[coord[0], coord[1]]
avgSpectrum = avgSpectrum / len(spectraCoords)
cons_spectra[clusID] = avgSpectrum[0]
return cons_spectra
def getCoordsForSegmented(self):
"""Returns a dictionary of cluster ids mapped to the corresponding coordinates.
Returns:
dict: Each cluster ids mapped to the corresponding coordinates.
"""
cluster2coords = defaultdict(list)
for i in range(0, self.segmented.shape[0]):
for j in range(0, self.segmented.shape[1]):
clusterID = int(self.segmented[i, j])
#if clusterID == 0:
# continue # unassigned cluster
cluster2coords[clusterID].append((i,j))
return cluster2coords
def _get_median_spectrum(self, region_array):
"""Calculates the median spectrum from all spectra in region_array.
Args:
region_array (numpy.array): Array of spectra.
Returns:
numpy.array: An array where each element is a median value of all spectra at each specific m/z index.
"""
median_profile = np.array([0.0] * region_array.shape[2])
for i in range(0, region_array.shape[2]):
median_profile[i] = np.median(region_array[:,:,i])
medProfAbove = [x for x in median_profile if x > 0]
if len(medProfAbove) == 0:
self.logger.info("Mostly Zero Median Profile!")
startedLog = 0.0
else:
startedLog = np.quantile(medProfAbove, [0.05])[0]
if startedLog == 0:
startedLog = 0.001
self.logger.info("Started Log Value: {}".format(startedLog))
median_profile += startedLog
return median_profile
def __cons_spectra__median(self, cluster2coords, array=None):
"""Constructs an median spectrum for each cluster id.
Args:
cluster2coords (dict): A dictionary of cluster ids mapped to the corresponding coordinates.
array (numpy.array, optional): Array of spectra. Defaults to None, that means using the region_array of the object.
Returns:
dict: A dictionary with cluster ids mapped to the respective median spectrum.
"""
if array is None:
array = self.region_array
cons_spectra = {}
for clusID in cluster2coords:
spectraCoords = cluster2coords[clusID]
if len(spectraCoords) == 1:
coord = spectraCoords[0]
# get spectrum, return spectrum
medianSpectrum = array[coord[0], coord[1], :]
else:
clusterSpectra = np.zeros((1, len(spectraCoords), array.shape[2]))
for cIdx, coord in enumerate(spectraCoords):
clusterSpectra[0, cIdx, :] = array[coord[0], coord[1], :]
medianSpectrum = self._get_median_spectrum(clusterSpectra)
cons_spectra[clusID] = medianSpectrum
return cons_spectra
def consensus_spectra(self, method="avg", set_consensus=True, array=None):
"""Constructs a consensus spectrum for each cluster id by using the specified method.
Args:
method (str, optional): Method that is supposed to be used for consensus spectra calculation. Either "avg" (average) or "median". Defaults to "avg".
set_consensus (bool, optional): Whether to set the calculated consensus and the respective method as object attributes. Defaults to True.
array (numpy.array, optional): Array of spectra. Defaults to None, that means using the region_array of the object.
Returns:
dict: A dictionary with cluster ids mapped to the respective consensus spectrum.
"""
if array is None:
array = self.region_array
else:
pass#print("Using array argument")
assert(not self.segmented is None)
assert(method in ["avg", "median"])
self.logger.info("Calculating consensus spectra")
cluster2coords = self.getCoordsForSegmented()
cons_spectra = None
if method == "avg":
cons_spectra = self.__cons_spectra__avg(cluster2coords, array=array)
elif method == "median":
cons_spectra = self.__cons_spectra__median(cluster2coords, array=array)
if set_consensus:
self.logger.info("Setting consensus spectra")
self.consensus = cons_spectra
self.consensus_method = method
self.logger.info("Calculating consensus spectra done")
return cons_spectra
def plot_boxplot(self, masses):
"""Plots seaborn.boxplot depicting the range of intensity values of each desired mass within each cluster.
Args:
masses (float/list/tuple/set): A desired mass or collection of masses.
background (int, optional): Cluster id of the background. Defaults to 0.
"""
assert(not self.segmented is None)
if not isinstance(masses, (list, tuple, set)):
masses = [masses]
cluster2coords = self.getCoordsForSegmented()
image = np.zeros((self.region_array.shape[0], self.region_array.shape[1]))
for mass in masses:
bestExMassForMass, bestExMassIdx = self._get_exmass_for_mass(mass)
self.logger.info("Processing Mass {} with best existing mass {}".format(mass, bestExMassForMass))
clusterIntensities = defaultdict(list)
for clusterid in cluster2coords:
for coord in cluster2coords[clusterid]:
intValue = self.region_array[coord[0], coord[1], bestExMassIdx]
clusterIntensities[clusterid].append(intValue)
clusterVec = []
intensityVec = []
massVec = []
specIdxVec = []
for x in clusterIntensities:
elems = clusterIntensities[x]
specIdxVec += [i for i in range(0, len(elems))]
clusterVec += ["Cluster " + str(x)] * len(elems)
intensityVec += elems
massVec += [mass] * len(elems)
dfObj = pd.DataFrame({"mass": massVec, "specidx": specIdxVec, "cluster": clusterVec, "intensity": intensityVec})
sns.boxplot(data=dfObj, x="cluster", y="intensity")
plt.title("Intensities per cluster for {}m/z".format(",".join([str(x) for x in masses])))
plt.xticks(rotation=90)
plt.show()
plt.close()
def mass_dabest(self, masses, background=0):
"""Plots seaborn.boxplot depicting the range of intensity values of each desired mass within each cluster. Additionally, plots mean difference effect sizes with help of the DABEST package. The given cluster id is considered a control group.
Args:
masses (float/list/tuple/set): A desired mass or collection of masses.
background (int, optional): Cluster id of the background. Defaults to 0.
"""
assert(not self.segmented is None)
if not isinstance(masses, (list, tuple, set)):
masses = [masses]
cluster2coords = self.getCoordsForSegmented()
assert(background in cluster2coords)
image = np.zeros((self.region_array.shape[0], self.region_array.shape[1]))
for mass in masses:
bestExMassForMass, bestExMassIdx = self._get_exmass_for_mass(mass)
self.logger.info("Processing Mass {} with best existing mass {}".format(mass, bestExMassForMass))
clusterIntensities = defaultdict(list)
for clusterid in cluster2coords:
for coord in cluster2coords[clusterid]:
intValue = self.region_array[coord[0], coord[1], bestExMassIdx]
clusterIntensities[clusterid].append(intValue)
clusterVec = []
intensityVec = []
massVec = []
specIdxVec = []
for x in clusterIntensities:
elems = clusterIntensities[x]
specIdxVec += [i for i in range(0, len(elems))]
clusterVec += ["Cluster " + str(x)] * len(elems)
intensityVec += elems
massVec += [mass] * len(elems)
dfObj = pd.DataFrame({"mass": massVec, "specidx": specIdxVec, "cluster": clusterVec, "intensity": intensityVec})
sns.boxplot(data=dfObj, x="cluster", y="intensity")
plt.title("Intensities per cluster for {}m/z".format(",".join([str(x) for x in masses])))
plt.xticks(rotation=90)
plt.show()
plt.close()
dfobj_db = dfObj.pivot(index="specidx", columns='cluster', values='intensity')
allClusterIDs = natsorted([x for x in set(clusterVec) if not " {}".format(background) in x])
multi_groups = dabest.load(dfobj_db, idx=tuple(["Cluster {}".format(background)]+allClusterIDs))
dabestFig = multi_groups.mean_diff.plot()
dabestFig.suptitle("DABEST intensities per cluster for {}m/z".format(",".join([str(x) for x in masses])))
def plot_inter_consensus_similarity(self, clusters=None):
"""Plots seaborn.boxplot depicting the cosine similarity distributions by comparison of spectra belonging to specified cluster ids to all available clusters.
Args:
clusters (numpy.array/list, optional): A list of desired cluster ids. Defaults to None, meaning to include all available clusters.
"""
cluster2coords = self.getCoordsForSegmented()
clusterLabels = sorted([x for x in cluster2coords])
self.logger.info("Found clusterLabels {}".format(clusterLabels))
if clusters == None:
clusters = sorted([x for x in cluster2coords])
for cluster in clusters:
self.logger.info("Processing cluster {}".format(cluster))
ownSpectra = [ self.region_array[xy[0], xy[1], :] for xy in cluster2coords[cluster] ]
clusterSimilarities = {}
for clusterLabel in clusterLabels:
allSpectra = [ self.region_array[xy[0], xy[1], :] for xy in cluster2coords[clusterLabel] ]
clusterSims = []
for i in range(0, len(ownSpectra)):
for j in range(0, len(allSpectra)):
clusterSims.append( self.__get_spectra_similarity(ownSpectra[i], allSpectra[j]) )
clusterSimilarities[clusterLabel] = clusterSims
clusterVec = []
similarityVec = []
for x in clusterSimilarities:
elems = clusterSimilarities[x]
clusterVec += [x] * len(elems)
similarityVec += elems
dfObj = pd.DataFrame({"cluster": clusterVec, "similarity": similarityVec})
sns.boxplot(data=dfObj, x="cluster", y="similarity")
plt.show()
plt.close()
def plot_consensus_similarity(self, mode="heatmap"):
"""Plots the similarity matrix either represented as a heatmap of similarity matrix or as seaborn.boxplot depicting similarity distributions of similarity values within the clusters.
Args:
mode (str, optional): Either "heatmap" or "spectra". Defaults to "heatmap".
"""
assert(not self.consensus_similarity_matrix is None)
assert(mode in ["heatmap", "spectra"])
if mode == "heatmap":
allLabels = [''] + sorted([x for x in self.consensus])
heatmap = plt.matshow(self.consensus_similarity_matrix)
plt.gca().set_xticklabels( allLabels )
plt.gca().set_yticklabels( allLabels )
plt.colorbar(heatmap)
plt.show()
plt.close()
elif mode == "spectra":
cluster2coords = self.getCoordsForSegmented()
clusterLabels = sorted([x for x in cluster2coords])
self.logger.info("Found clusterLabels {}".format(clusterLabels))
clusterSimilarities = {}
for clusterLabel in clusterLabels:
self.logger.info("Processing clusterLabel {}".format(clusterLabel))
clusterSims = []
useCoords = cluster2coords[clusterLabel]
for i in range(0, len(useCoords)):
for j in range(i+1, len(useCoords)):
iIdx = self.pixel2idx[useCoords[i]]
jIdx = self.pixel2idx[useCoords[j]]
sim = self.spectra_similarity[iIdx, jIdx]
clusterSims.append(sim)
clusterSimilarities[clusterLabel] = clusterSims
#allSpectra = [ self.region_array[xy[0], xy[1], :] for xy in cluster2coords[clusterLabel]]
#bar = progressbar.ProgressBar()
#clusterSims = []
#for i in bar(range(0, len(allSpectra))):
# for j in range(i+1, len(allSpectra)):
# clusterSims.append( self.__get_spectra_similarity(allSpectra[i], allSpectra[j]) )
#clusterSimilarities[clusterLabel] = clusterSims
clusterVec = []
similarityVec = []
for x in clusterSimilarities:
elems = clusterSimilarities[x]
clusterVec += [x] * len(elems)
similarityVec += elems
dfObj = pd.DataFrame({"cluster": clusterVec, "similarity": similarityVec})
sns.boxplot(data=dfObj, x="cluster", y="similarity")
plt.show()
plt.close()
def __get_spectra_similarity(self, vA, vB):
"""Calculates cosine similarity between two vectors of the same length.
Args:
vA (numpy.array/list): First vector.
vB (numpy.array/list): Second vector.
Returns:
float: cosine similarity.
"""
return np.dot(vA, vB) / (np.sqrt(np.dot(vA,vA)) * np.sqrt(np.dot(vB,vB)))
def consensus_similarity( self ):
"""
Updates consensus_similarity_matrix attribute of SpectraRegion object. The updated matrix consists of similarity values between the spectra in the consensus dictionary.
"""
assert(not self.consensus is None)
allLabels = sorted([x for x in self.consensus])
specLength = len(self.consensus[allLabels[0]])
# bring consensus into correct form
consMatrix = np.zeros((len(allLabels), specLength))
for lidx, label in enumerate(allLabels):
consMatrix[lidx, :] = self.consensus[label]
self.consensus_similarity_matrix = np.zeros((len(allLabels), len(allLabels)))
for i in range(len(allLabels)):
vA = self.consensus[allLabels[i]]
for j in range(i, len(allLabels)):
vB = self.consensus[allLabels[j]]
simValue = self.__get_spectra_similarity(vA, vB)
self.consensus_similarity_matrix[i, j] = simValue
self.consensus_similarity_matrix[j, i] = simValue
def __get_expression(self, massValue, segments, mode="avg"):
"""Gives an expression (intensity values) overview of the given mass in the region.
Args:
massValue (float): A desired mass.
segments (numpy.array/list/tuple/set/int): Desired cluster id(s).
mode (numpy.array/list/tuple/set/str, optional): Whether to calculate the average and/or median value of the found expression values. Defaults to "avg".
Returns:
tuple: the first element consists of value(s) calculated with specified mode(s), number of found expression values, number of found expression values that differ from 0.
"""
assert(massValue != None)
assert(segments != None)
if not isinstance(mode, (list, tuple, set)):
mode = [mode]
if not isinstance(segments, (list, tuple, set)):
segments = [segments]
assert(all([x in ["avg", "median"] for x in mode]))
cluster2coords = self.getCoordsForSegmented()
assert(all([y in cluster2coords for y in segments]))
# best matchng massvalue - rounding difference, etc
massValue, massIndex = self._get_exmass_for_mass(massValue)
allExprValues = []
for segment in segments:
segmentPixels = cluster2coords[segment]
for pixel in segmentPixels:
exprValue = self.region_array[pixel[0], pixel[1], massIndex]
allExprValues.append(exprValue)
num, anum = len(allExprValues), len([x for x in allExprValues if x > 0])
resElem = []
for modElem in mode:
if modElem == "avg":
resElem.append( np.mean(allExprValues) )
elif modElem == "median":
resElem.append( np.median(allExprValues) )
return tuple(resElem), num, anum
def get_spectra_matrix(self, segments):
"""Returns a matrix with all spectra in .imzML file that correspond to the given segments.
Args:
segments (numpy.array/list): A list of desired cluster ids.
Returns:
numpy.array: An array where each element is spectrum that was previously found to be part of one of the given clusters given in segments parameter.
"""
cluster2coords = self.getCoordsForSegmented()
relPixels = []
for x in segments:
relPixels += cluster2coords.get(x, [])
spectraMatrix = np.zeros((len(relPixels), len(self.idx2mass)))
for pidx, px in enumerate(relPixels):
spectraMatrix[pidx, :] = self.region_array[px[0], px[1], :]
return spectraMatrix
def get_expression_from_matrix(self, matrix, massValue, segments, mode="avg"):
"""Gives an expression (intensity values) overview of the given matrix.
Args:
matrix (numpy.array): A matrix from which the intensity values will be extracted.
massValue (float): A desired mass.
segments (numpy.array/list/tuple/set/int): Desired cluster id(s).
mode (numpy.array/list/tuple/set/str, optional): Whether to calculate the average and/or median value of the found expression values. "arg" (average) and/or "median". Defaults to "avg".
Returns:
tuple: the first element consists of value(s) calculated with specified mode(s), number of found expression values, number of found expression values that differ from 0.
"""
assert(massValue != None)
assert(segments != None)
if not isinstance(mode, (list, tuple, set)):
mode = [mode]
if not isinstance(segments, (list, tuple, set)):
segments = [segments]
assert(all([x in ["avg", "median"] for x in mode]))
# best matchng massvalue - rounding difference, etc
massValue, massIndex = self._get_exmass_for_mass(massValue)
allExprValues = list(matrix[:, massIndex])
num, anum = len(allExprValues), len([x for x in allExprValues if x > 0])
resElem = []
for modElem in mode:
if modElem == "avg":
resElem.append( np.mean(allExprValues) )
elif modElem == "median":
resElem.append( np.median(allExprValues) )
return tuple(resElem), num, anum
def __check_neighbour(self, mat, x, y, background):
"""Decides whether the given pixel suppose to be a part of the background cluster parameter.
Args:
mat (numpy.array): CLustered image with cluster ids as elements.
x (int): x-Coordinate.
y (int): y-Coordinate.
background (int): Cluster id of the cluster to be compared to.
Returns:
bool: Whether the pixel is neighbor of the cluster in the background parameter.
"""
if x < mat.shape[0]-1 and mat[x+1][y] in background:
return True
elif x > 1 and mat[x-1][y] in background:
return True
elif y < mat.shape[1]-1 and mat[x][y+1] in background:
return True
elif y > 1 and mat[x][y-1] in background:
return True
elif x < mat.shape[0]-1 and y < mat.shape[1]-1 and mat[x+1][y+1] in background:
return True
elif x > 1 and y > 1 and mat[x-1][y-1] in background:
return True
elif x < mat.shape[0]-1 and y > 1 and mat[x+1][y-1] in background:
return True
elif y < mat.shape[1]-1 and x > 1 and mat[x-1][y+1] in background:
return True
else:
return False
def cartoonize(self, background, aorta, plaque, blur=False):
"""Simplifies the clustered image.
Args:
background (list/numpy.array): A list of clusters id that contains background clusters.
aorta (list/numpy.array): A list of clusters id that contains aorta clusters.
plaque (list/numpy.array): A list of clusters id that contains plaque clusters.
blur (bool, optional): Whether to apply a multidimensional uniform filter to blur the image. Defaults to False.
Returns:
numpy.array: Simplified image with three clusters.
"""
assert(not self.segmented is None)
img = np.copy(self.segmented)
cartoon_img = np.zeros((img.shape))
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if img[i,j] in background:
cartoon_img[i,j] = 0
elif img[i,j] in aorta or self.__check_neighbour(img, i, j, background):
cartoon_img[i,j] = 1
else:
if not self.__check_neighbour(img, i, j, background) and self.__check_neighbour(cartoon_img, i, j, aorta) or img[i][j] in plaque:
cartoon_img[i,j] = 2
else:
cartoon_img[i,j] = 0
if blur:
cartoon_img = ndimage.uniform_filter(cartoon_img, size=4)
return cartoon_img
def __merge_clusters(self, matrix, clusters):
"""Combines the given clusters to one cluster with the id of the first element in the clusters list.
Args:
matrix (numpy.array): Segmented array with cluster ids as elements.
clusters (list/numpy.array): Cluster ids to merge.
Returns:
numpy.array: Updated segmeted array.
"""
merged = np.copy(self.segmented)
for cluster in clusters:
merged[merged == cluster] = clusters[0]
return merged
def cartoonize2(self, imze, background, aorta, plaque, ignore_background=True, blur=False):
"""Simplifies the segmented array by comparing median spectra of the given cluster groups to the whole spectra region.
Args:
imze (IMZMLExtract): IMZMLExtract object.
background (list/numpy.array): A list of clusters id that contains background clusters.
aorta (list/numpy.array): A list of clusters id that contains aorta clusters.
plaque (list/numpy.array): A list of clusters id that contains plaque clusters.
ignore_background (bool, optional): Whether to consider only aorta and plaque median spectra by "reclustering". Defaults to True.
blur (bool, optional): Whether to apply a multidimensional uniform filter to blur the image. Defaults to False.
Returns:
numpy.array: Simplified segmented array with only three clusters.
"""
assert(not self.segmented is None)
merged = self.segmented
if len(background)>1:
merged = self.__merge_clusters(merged, background)
if len(aorta)>1:
merged = self.__merge_clusters(merged, aorta)
if len(plaque)>1:
merged = self.__merge_clusters(merged, plaque)
background = background[0]
aorta = aorta[0]
plaque = plaque[0]
tmp = self.segmented
self.segmented = merged
cons = self.consensus_spectra(method="median", set_consensus=False)
self.segmented = tmp
sim_background = np.zeros(self.segmented.shape)
sim_aorta = np.zeros(self.segmented.shape)
sim_plaque = np.zeros(self.segmented.shape)
for i in range(sim_background.shape[0]):
for j in range(sim_background.shape[1]):
spectra = self.region_array[i][j]
sim_background[i][j] = imze.compare_sequence(spectra, cons[background])
sim_aorta[i][j] = imze.compare_sequence(spectra, cons[aorta])
sim_plaque[i][j] = imze.compare_sequence(spectra, cons[plaque])
sim_max = np.zeros((sim_background.shape[0], sim_background.shape[1]))
for i in range(sim_background.shape[0]):
for j in range(sim_background.shape[1]):
if ignore_background and self.segmented[i][j] == background:
sim_max[i,j] = self.segmented[i][j]
else:
sim_max[i,j] = np.argmax([sim_background[i][j], sim_aorta[i][j], sim_plaque[i][j]])
if blur:
sim_max = ndimage.uniform_filter(sim_max, size=4)
return sim_max
def get_surroundings(self, mat, x, y):
"""Determines the cluster ids and their frequencies of the 3x3 surroundings of the given pixel.
Args:
mat (numpy.array): The matrix where the surrounding pixels will be computed.
x (int): x-Coordinate of the desired pixel.
y (int): y-Coordinate of the desired pixel.
Returns:
collections.Counter: Cluster ids and the respective frequencies in 3x3 window from the given pixel coordinates.
"""
res = list()
if x < mat.shape[0]-1:
res.append(mat[x+1][y])
if x > 1:
res.append(mat[x-1][y])
if y < mat.shape[1]-1:
res.append(mat[x][y+1])
if y > 1:
res.append(mat[x][y-1])
if x < mat.shape[0]-1 and y < mat.shape[1]-1:
res.append(mat[x+1][y+1])
if x > 1 and y > 1:
res.append(mat[x-1][y-1])
if x < mat.shape[0]-1 and y > 1:
res.append(mat[x+1][y-1])
if y < mat.shape[1]-1 and x > 1:
res.append(mat[x-1][y+1])
return Counter(res)
def add_cellwall(self, mat, between1, between2, threshold = 2):
"""Adds the cluster for the cell wall at those pixels that have significant number of between1 and between2 assigned pixels.
Args:
mat (numpy.array): A segmented array with a clustered image where the cell wall cluster should be added.
between1 (int): First cluster id of the selected cluster pair. Between those the new cluster will be added.
between2 (int): Second cluster id of the selected cluster pair. Between those the new cluster will be added.
threshold (int, optional): The minimal number of between1 and between2 neighboring clusters for each pixel to be considered as a cell wall component. Defaults to 2.
Returns:
numpy.array: Updated segmented array where the cell wall cluster has cluster id 3.
"""
new_mat = np.copy(mat)
new_mat = new_mat+1
new_mat[new_mat==1] = 0
for i in range(new_mat.shape[0]):
for j in range(new_mat.shape[1]):
s = self.get_surroundings(mat, i, j)
if s[between1] > threshold and s[between2] > threshold:
new_mat[i][j] = 1
return new_mat
def plot_wireframe(self, imze, background, aorta, plaque, norm=False):
"""Plots the background, aorta, and plaque pixelwise probabilities.
Args:
imze (IMZMLExtract): IMZMLExtract object.
background (list/numpy.array): A list of clusters id that contains background clusters.
aorta (list/numpy.array): A list of clusters id that contains aorta clusters.
plaque (list/numpy.array): A list of clusters id that contains plaque clusters.
norm (bool, optional): Whether to divide all probabilities by global maximum probability. Defaults to False.
Returns:
numpy.array, numpy.array, numpy.array: Three arrays of probabilities for background, aorta, and plaque.
"""
out = self.cartoonize(background, aorta, plaque, blur=False)
tmp = self.segmented
self.segmented = out
cons = self.consensus_spectra(method="median", set_consensus=False)
self.segmented = tmp
sim_background = np.zeros(out.shape)
sim_aorta = np.zeros(out.shape)
sim_plaque = np.zeros(out.shape)
for i in range(out.shape[0]):
for j in range(out.shape[1]):
spectra = self.region_array[i][j]
sim_background[i][j] = imze.compare_sequence(spectra, cons[0])
sim_aorta[i][j] = imze.compare_sequence(spectra, cons[1])
sim_plaque[i][j] = imze.compare_sequence(spectra, cons[2])
if norm:
sim_background = np.sqrt(sim_background)
sim_aorta = np.sqrt(sim_aorta)
sim_plaque = np.sqrt(sim_plaque)
(X, Y) = np.meshgrid(np.arange(self.segmented.shape[1]), np.arange(self.segmented.shape[0]))
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(111, projection='3d')
# Plot a basic wireframe.
ax.plot_wireframe(X, Y, sim_background, color='green', label='Background')
ax.plot_wireframe(X, Y, sim_aorta, color='red', label='Aorta')
ax.plot_wireframe(X, Y, sim_plaque, label='Plaque')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('P')
ax.legend()
plt.show()
return sim_background, sim_aorta, sim_plaque
def _makeHTMLStringFilterTable(self, expDF):
"""Transform given pandas dataframe into HTML output.
Args:
expDF (pd.DataFrame): Values for output.
Returns:
htmlHead, htmlBody (str): HTML code for head and body.
"""
headpart = """
"""
bodypart = """
{% if title %}
{{title}}
{% endif %}
<button id="csvButton" type="button">Save current table!</button>
<table id="{{html_element_id}}" class="display" cellspacing="0" width="100%">
<thead>
<tr>
{% for column in columns %}
<th>{{column}}</th>
{% endfor %}
</tr>
</thead>
<tbody>
{% for key,row in rows.iterrows() %}
<tr>
{% for column in columns %}
<td>{{ row[column] }}</td>
{% endfor %}
</tr>
{% endfor %}
</tbody>
<tfoot>
<tr>
{% for column in columns %}
<th>{{column}}</th>
{% endfor %}
</tr>
</tfoot>
</table>
<script src="tablefilter/tablefilter.js"></script>
<script data-config>
var filtersConfig = {
base_path: 'tablefilter/',
alternate_rows: true,
rows_counter: true,
btn_reset: true,
loader: true,
status_bar: true,
mark_active_columns: true,
highlight_keywords: true,
sticky_headers: true,
col_types: [{{coltypes}}],
custom_options: {
cols:[],
texts: [],
values: [],
sorts: []
},
col_widths: [],
extensions:[{ name: 'sort' }]
};
var tf = new TableFilter("{{html_element_id}}", filtersConfig);
tf.init();
function download_csv(csv, filename) {
var csvFile;
var downloadLink;
// CSV FILE
csvFile = new Blob([csv], {type: "text/csv"});
// Download link
downloadLink = document.createElement("a");
// File name
downloadLink.download = filename;
// We have to create a link to the file
downloadLink.href = window.URL.createObjectURL(csvFile);
// Make sure that the link is not displayed
downloadLink.style.display = "none";
// Add the link to your DOM
document.body.appendChild(downloadLink);
// Lanzamos
downloadLink.click();
}
function isHidden(el) {
var style = window.getComputedStyle(el);
return ((style.display === 'none') || (style.visibility === 'hidden'))
}
function export_table_to_csv(html, filename) {
var csv = [];
var rows = document.querySelectorAll("table tr");
for (var i = 0; i < rows.length; i++) {
var row = [], cols = rows[i].querySelectorAll("td, th");
if (!isHidden(rows[i]))
{
for (var j = 0; j < cols.length; j++)
{
colText = ""+cols[j].innerText;
colText = colText.replace(/(\\r\\n|\\n|\\r)/gm, ';')
row.push(colText);
}
if (row.length > 0)
{
csv.push(row.join("\\t"));
}
}
}
// Download CSV
download_csv(csv.join("\\n"), filename);
}
document.addEventListener('readystatechange', event => {
if (event.target.readyState === "interactive") { //same as: document.addEventListener("DOMContentLoaded"... // same as jQuery.ready
console.log("Ready state");
document.getElementById("csvButton").addEventListener("click", function () {
var html = document.getElementById("{{html_element_id}}").outerHTML;
export_table_to_csv(html, "table.tsv");
});
}
if (event.target.readyState === "complete") {
console.log("Now external resources are loaded too, like css,src etc... ");
document.getElementById("csvButton").addEventListener("click", function () {
var html = document.getElementById("{{html_element_id}}").outerHTML;
export_table_to_csv(html, "table.tsv");
});
}
});
</script>
"""
jsCols = []
columnNames = expDF.columns.values.tolist()
for cname in columnNames:
if expDF[cname].dtypes in [int, float]:
jsCols.append("\"number\"")
else:
jsCols.append("\"string\"")
vHeader = [str(x) for x in columnNames]
#print()
self.logger.info("Got Columns: {}".format([x for x in zip(vHeader, jsCols)]))
html_element_id= None
if html_element_id == None:
html_element_id = "dftable"
jinjaTemplate = jinja2.Template(bodypart)
output = jinjaTemplate.render(rows=expDF, columns=vHeader, title="",
html_element_id=html_element_id, coltypes=", ".join(jsCols))
return (headpart, output)
def get_mask(self, regions):
"""Returns updated segmented shaped matrix with the desired region coordinates replaced with ones.
Args:
regions (list/tuple/set/int): A desired region or collection of cluster ids.
Returns:
numpy.array: An updated matrix with all specified cluster ids replaced with cluster id equals 1.
"""
if not isinstance(regions, (list, tuple, set)):
regions = [regions]
outmask = np.zeros(self.segmented.shape)
for region in regions:
outmask[self.segmented == region] = 1
return outmask
def export_deres(self, method, resKey, outpath, title="DE Result"):
"""This methods writes out a HTMl-formatted table for all found DE results.
Args:
method (str): Method to export result for
resKey (tuple): List of regions to look for result for
outpath (str): outpath of HTML table. Required js-sources are copied into the same folder.
title (str, optional): Title for result table
"""
expDF = self.df_results_all[method][resKey].copy(deep=True)
mass2image = {}
requiredMasses = set(self.df_results_all[method][resKey]["gene_mass"].values)
self.logger.info("Fetching Mass Heatmaps for all {} required masses".format(len(requiredMasses)))
fgMask = self.get_mask(regions=resKey[0])
bgMask = self.get_mask(regions=resKey[1])
for mass in set(requiredMasses):
mass_data = self.mass_heatmap(mass, plot=False, verbose=False)
heatmap = plt.matshow(mass_data, fignum=100)
plt.colorbar(heatmap)
# Find contours at a constant value of 0.5
contours = sk_measure.find_contours(bgMask, 0.5)
# Display the image and plot all contours found
for n, contour in enumerate(contours):
plt.plot(contour[:, 1], contour[:, 0], linewidth=2, color="blue")
# Find contours at a constant value of 0.5
contours = sk_measure.find_contours(fgMask, 0.5)
# Display the image and plot all contours found
for n, contour in enumerate(contours):
plt.plot(contour[:, 1], contour[:, 0], linewidth=2, color="green")
pic_IObytes = io.BytesIO()
plt.savefig(pic_IObytes, format='png')
pic_IObytes.seek(0)
pic_hash = base64.b64encode(pic_IObytes.read()).decode()
plt.close(100)
imgStr = "<img src='data:image/png;base64,{}' alt='Red dot' />".format(pic_hash)
mass2image[mass] = imgStr
massImgValues = [mass2image.get(mass, "") for mass in expDF["gene_mass"].values]
pos = expDF.columns.values.tolist().index("gene_mass")+1
self.logger.info("Adding Mass Heatmap at pos {} of {} with {} entries".format(pos, len(expDF.columns.values.tolist()), len(massImgValues)))
expDF.insert(loc = pos,
column = 'Mass Heatmap',
value = massImgValues)
(headpart, bodypart) = self._makeHTMLStringFilterTable(expDF)
#
# Plot segments
#
#
valid_vals = np.unique(self.segmented)
heatmap = plt.matshow(self.segmented, cmap=plt.cm.get_cmap('viridis', len(valid_vals)), fignum=100)
min_ = min(valid_vals)
max_ = max(valid_vals)
positions = np.linspace(min_, max_, len(valid_vals))
val_lookup = dict(zip(positions, valid_vals))
def formatter_func(x, pos):
'The two args are the value and tick position'
val = val_lookup[x]
return val
formatter = plt.FuncFormatter(formatter_func)
# We must be sure to specify the ticks matching our target names
plt.colorbar(heatmap, ticks=positions, format=formatter, spacing='proportional')
pic_IObytes = io.BytesIO()
plt.savefig(pic_IObytes, format='png')
pic_IObytes.seek(0)
pic_hash = base64.b64encode(pic_IObytes.read()).decode()
plt.close(100)
imgStrSegments = "<img src='data:image/png;base64,{}' alt='Red dot' />".format(pic_hash)
#
# Plot segment highlights
#
#
showcopy = np.copy(self.segmented)
for i in range(0, showcopy.shape[0]):
for j in range(0, showcopy.shape[1]):
if showcopy[i,j] != 0:
if showcopy[i,j] in resKey[0]:
showcopy[i,j] = 2
elif showcopy[i,j] != 0:
showcopy[i,j] = 1
valid_vals = np.unique(showcopy)
heatmap = plt.matshow(showcopy, cmap=plt.cm.get_cmap('viridis', len(valid_vals)), fignum=100)
min_ = min(valid_vals)
max_ = max(valid_vals)
positions = np.linspace(min_, max_, len(valid_vals))
val_lookup = dict(zip(positions, valid_vals))
def formatter_func(x, pos):
'The two args are the value and tick position'
val = val_lookup[x]
return val
formatter = plt.FuncFormatter(formatter_func)
# We must be sure to specify the ticks matching our target names
plt.colorbar(heatmap, ticks=positions, format=formatter, spacing='proportional')
pic_IObytes = io.BytesIO()
plt.savefig(pic_IObytes, format='png')
pic_IObytes.seek(0)
pic_hash = base64.b64encode(pic_IObytes.read()).decode()
plt.close(100)
imgStrSegmentHighlight = "<img src='data:image/png;base64,{}' alt='Red dot' />".format(pic_hash)
bodypart = "<p>{}<p><p>{}<p>\n{}".format(imgStrSegments, imgStrSegmentHighlight, bodypart)
if title != None:
bodypart = "<h1>"+title+"</h1>" + bodypart
htmlfile="<html>\n<head>\n" + headpart + "</head>\n<body>\n" + bodypart + "</body>\n</html>"
with open(outpath, 'w') as outHtml:
outHtml.write(htmlfile)
def copyFolders(root_src_dir, root_target_dir):
for src_dir, dirs, files in os.walk(root_src_dir):
dst_dir = src_dir.replace(root_src_dir, root_target_dir)
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for file_ in files:
src_file = os.path.join(src_dir, file_)
dst_file = os.path.join(dst_dir, file_)
if os.path.exists(dst_file):
os.remove(dst_file)
shutil.copy(src_file, dst_dir)
sourceDir = os.path.dirname(__file__) + "/tablefilter"
targetDir = os.path.dirname(outpath) + "/tablefilter"
self.logger.info("copy tablefilter files from {} to {}".format(sourceDir, targetDir))
copyFolders(sourceDir, targetDir)
def deres_to_df(self, method, resKey, protWeights, mz_dist=3, mz_best=False, keepOnlyProteins=True, inverse_fc=False, max_adj_pval=0.05, min_log2fc=0.5):
"""Transforms differetial expression (de) result in de_results_all dictionary of the SpectraRegion object into a DataFrame.
Args:
method (str): Test method for differential expression. "empire", "ttest" or "rank".
resKey (tuple): List of regions where to look for the result.
protWeights (ProteinWeights): ProteinWeights object for translation of masses to protein name.
mz_dist (float/int, optional): Allowed offset for protein lookup of needed masses. Defaults to 3.
mz_best (bool, optional): Wether to consider only the closest found protein within mz_dist (with the least absolute mass difference). Defaults to False.
keepOnlyProteins (bool, optional): If True, differential masses without protein name will be removed. Defaults to True.
inverse_fc (bool, optional): If True, the de result logFC will be inversed (negated). Defaults to False.
max_adj_pval (float, optional): Threshold for maximum adjusted p-value that will be used for filtering of the de results. Defaults to 0.05.
min_log2fc (float, optional): Threshold for minimum log2fc that will be used for filtering of the de results. Defaults to 0.5.
Returns:
pandas.DataFrame: DataFrame of differetial expression (de) result.
"""
clusterVec = []
geneIdentVec = []
massVec = []
foundProtVec = []
lfcVec = []
qvalVec = []
detMassVec = []
avgExpressionVec = []
medianExpressionVec = []
totalSpectraVec = []
measuredSpectraVec = []
avgExpressionBGVec = []
medianExpressionBGVec = []
totalSpectraBGVec = []
measuredSpectraBGVec = []
deResDF = self.de_results_all[method][resKey]
ttr = deResDF.copy(deep=True)#self.de_results[resKey]
self.logger.info("DE result for case {} with {} results".format(resKey, ttr.shape))
#ttr = deRes.summary()
log2fcCol = "log2fc"
massCol = "gene"
adjPvalCol = "qval"
ttrColNames = list(ttr.columns.values)
self.logger.info("DF column names {}".format(ttrColNames))
if log2fcCol in ttrColNames and massCol in ttrColNames and adjPvalCol in ttrColNames:
dfColType = "diffxpy"
else:
#id numFeatures pval abs.log2FC log2FC fdr SDcorr fc.pval fc.fdr nonde.fcwidth fcCI.90.start fcCI.90.end fcCI.95.start fcCI.95.end fcCI.99.start fcCI.99.end
if "id" in ttrColNames and "log2FC" in ttrColNames and "fc.fdr" in ttrColNames:
log2fcCol = "log2FC"
massCol = "id"
adjPvalCol = "fc.fdr"
dfColType = "empire"
if inverse_fc:
self.logger.info("DE result logFC inversed")
ttr[log2fcCol] = -ttr[log2fcCol]
fttr = ttr[ttr[adjPvalCol].lt(max_adj_pval) & ttr[log2fcCol].abs().gt(min_log2fc)]
self.logger.info("DE result for case {} with {} results (filtered)".format(resKey, fttr.shape))
targetSpectraMatrix = self.get_spectra_matrix(resKey[0])
bgSpectraMatrix = self.get_spectra_matrix(resKey[1])
self.logger.info("Created matrices with shape {} and {} (target, bg)".format(targetSpectraMatrix.shape, bgSpectraMatrix.shape))
for row in fttr.iterrows():
geneIDent = row[1][massCol]
ag = geneIDent.split("_")
massValue = float("{}.{}".format(ag[1], ag[2]))
foundProt = []
if protWeights != None:
foundProt = protWeights.get_protein_from_mass(massValue)
if mz_best and len(foundProt) > 0:
foundProt = [foundProt[0]]
if keepOnlyProteins and len(foundProt) == 0:
continue
lfc = row[1][log2fcCol]
qval = row[1][adjPvalCol]
expT, totalSpectra, measuredSpecta = self.get_expression_from_matrix(targetSpectraMatrix, massValue, resKey[0], ["avg", "median"])
exprBG, totalSpectraBG, measuredSpectaBG = self.get_expression_from_matrix(bgSpectraMatrix, massValue, resKey[0], ["avg", "median"])
avgExpr, medianExpr = expT
avgExprBG, medianExprBG = exprBG
if len(foundProt) > 0:
for protMassTuple in foundProt:
prot,protMass = protMassTuple
clusterVec.append("_".join([str(x) for x in resKey[0]]))
geneIdentVec.append(geneIDent)
massVec.append(massValue)
foundProtVec.append(prot)
detMassVec.append(protMass)
lfcVec.append(lfc)
qvalVec.append(qval)
avgExpressionVec.append(avgExpr)
medianExpressionVec.append(medianExpr)
totalSpectraVec.append(totalSpectra)
measuredSpectraVec.append(measuredSpecta)
avgExpressionBGVec.append(avgExprBG)
medianExpressionBGVec.append(medianExprBG)
totalSpectraBGVec.append(totalSpectraBG)
measuredSpectraBGVec.append(measuredSpectaBG)
else:
clusterVec.append("_".join([str(x) for x in resKey[0]]))
geneIdentVec.append(geneIDent)
massVec.append(massValue)
foundProtVec.append("")
detMassVec.append("-1")
lfcVec.append(lfc)
qvalVec.append(qval)
avgExpressionVec.append(avgExpr)
medianExpressionVec.append(medianExpr)
totalSpectraVec.append(totalSpectra)
measuredSpectraVec.append(measuredSpecta)
avgExpressionBGVec.append(avgExprBG)
medianExpressionBGVec.append(medianExprBG)
totalSpectraBGVec.append(totalSpectraBG)
measuredSpectraBGVec.append(measuredSpectaBG)
#requiredColumns = ["gene", "clusterID", "avg_logFC", "p_val_adj", "mean", "num", "anum"]
df = pd.DataFrame()
df["clusterID"] = clusterVec
df["gene_ident"] = geneIdentVec
df["gene_mass"] = massVec
df["gene"] = foundProtVec
df["protein_mass"] = detMassVec
df["avg_logFC"] = lfcVec
df["qvalue"] = qvalVec
df["num"] = totalSpectraVec
df["anum"]= measuredSpectraVec
df["mean"] = avgExpressionVec
df["median"] = medianExpressionVec
df["num_bg"] = totalSpectraBGVec
df["anum_bg"]= measuredSpectraBGVec
df["mean_bg"] = avgExpressionBGVec
df["median_bg"] = medianExpressionBGVec
self.df_results_all[method][resKey] = df.copy(deep=True)
return df
def find_all_markers(self, protWeights, keepOnlyProteins=True, replaceExisting=False, includeBackground=True, mz_dist=3, mz_best=False, backgroundCluster=[0], out_prefix="nldiffreg", outdirectory=None, use_methods = ["empire", "ttest", "rank"], count_scale={"ttest": 1, "rank": 1, "empire": 10000}):
"""Finds all marker proteins for a specific clustering.
Args:
protWeights (ProteinWeights): ProteinWeights object for translation of masses to protein name.
keepOnlyProteins (bool, optional): If True, differential masses without protein name will be removed. Defaults to True.
replaceExisting (bool, optional): If True, previously created marker-gene results will be overwritten. Defaults to False.
includeBackground (bool, optional): If True, the cluster specific expression data are compared to all other clusters incl. background cluster. Defaults to True.
mz_dist (float/int, optional): Allowed offset for protein lookup of needed masses. Defaults to 3.
mz_best (bool, optional): Wether to consider only the closest found protein within mz_dist (with the least absolute mass difference). Defaults to False.
backgroundCluster ([int], optional): Clusters which are handled as background. Defaults to [0].
out_prefix (str, optional): Prefix for results file. Defaults to "nldiffreg".
outdirectory ([type], optional): Directory used for empire files. Defaults to None.
use_methods (list, optional): Test methods for differential expression. Defaults to ["empire", "ttest", "rank"].\n
- "empire": Empirical and Replicate based statistics (EmpiRe).\n
- "ttest": Welch’s t-test for differential expression using diffxpy.api.\n
- "rank": Mann-Whitney rank test (Wilcoxon rank-sum test) for differential expression using diffxpy.api.\n
count_scale (dict, optional): Count scales for different methods (relevant for empire, which can only use integer counts). Defaults to {"ttest": 1, "rank": 1, "empire": 10000}.
Returns:
dict of pd.dataframe: for each test conducted, one data frame with all marker masses for each cluster
"""
cluster2coords = self.getCoordsForSegmented()
dfbyMethod = defaultdict(lambda:
|
pd.DataFrame()
|
pandas.DataFrame
|
from numpy import array, float16
from pytorch_forecasting.data import (
TimeSeriesDataSet,
)
from datetime import timedelta
import pytorch_lightning as pl
from pytorch_lightning.callbacks import (
EarlyStopping,
)
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_forecasting.models import RecurrentNetwork
from pytorch_forecasting.metrics import RMSE
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import requests
import json
import datetime
import ast
import time
from io import StringIO
import numpy as np
pd.set_option('display.max_columns', 20)
pd.set_option('display.width', 2000)
#Note: Usage Example at very bottom.
# ******************************************************************************************************************
# ********************************************** SMARD+MONTEL DATAS ************************************************
# ******************************************************************************************************************
def get_data_for_prediction(requestedTimeStamp,
numberOfDaysInPast=60):
"""
:param requestedTimeStamp: Date and Time of the request. Should be a pandas datetime object
:param numberOfDaysInPast: Int value of days in the past needed for prediction
:return: Full dataset of required information
Output Columns: (['Wind Onshore[MWh]', 'Steinkohle[MWh]', 'Erdgas[MWh]',
'Gesamt[MWh]', 'Value', 'Base', 'Peak']
"""
endDate = requestedTimeStamp.strftime('%Y-%m-%d')
startDate = requestedTimeStamp - datetime.timedelta(days=numberOfDaysInPast)
montelStartDate = startDate.strftime('%Y-%m-%d')
# Get MONTEL API DATA
montelApiDf = getDataFromAPI_HourlyIntervals(startDate=montelStartDate, endDate=endDate)
begin_timestamp = startDate # From last Value of data
end_timestamp = str(montelApiDf.iloc[-1].name)
montelMissingData = montelApiDf.loc[begin_timestamp:end_timestamp]
# GET SMARD DATA
realizedPower = [1004071, 1004067, 1004069, 1004070]
realizedConsumption = [5000410]
#5000410
modules_realized = realizedPower
modules_consumed = realizedConsumption
Days_behind = numberOfDaysInPast + 1
EnergyProd = requestSmardData(modulIDs=modules_realized,
timestamp_from_in_milliseconds=(int(time.time()) * 1000) - (
Days_behind * 24 * 3600) * 1000)
EnergyUsage = requestSmardData(modulIDs=modules_consumed,
timestamp_from_in_milliseconds=(int(time.time()) * 1000) - (
Days_behind * 24 * 3600) * 1000)
# CLEAN UP DATA. REMOVE '-' from unknowns
EnergyUsage['Datum'] = EnergyUsage['Datum'] + '-' + EnergyUsage['Uhrzeit']
EnergyUsage = EnergyUsage.drop(columns=['Uhrzeit'])
EnergyUsage['Datum'] = pd.to_datetime(EnergyUsage['Datum'], format='%d.%m.%Y-%H:%M')
EnergyUsage = EnergyUsage.rename(columns={'Datum': 'Date', 'Gesamt (Netzlast)[MWh]': 'Gesamt[MWh]'})
EnergyUsage['Gesamt[MWh]'] = (EnergyUsage['Gesamt[MWh]'].replace('-', np.nan)).astype(np.float64)
EnergyProd['Datum'] = EnergyProd['Datum'] + '-' + EnergyProd['Uhrzeit']
EnergyProd = EnergyProd.drop(columns=['Uhrzeit'])
EnergyProd['Datum'] = pd.to_datetime(EnergyProd['Datum'], format='%d.%m.%Y-%H:%M')
EnergyProd = EnergyProd.rename(columns={'Datum': 'Date'})
EnergyProd['Wind Onshore[MWh]'] = (EnergyProd['Wind Onshore[MWh]'].replace('-', np.nan)).astype(np.float64)
EnergyProd['Steinkohle[MWh]'] = (EnergyProd['Steinkohle[MWh]'].replace('-', np.nan)).astype(np.float64)
EnergyProd['Erdgas[MWh]'] = (EnergyProd['Erdgas[MWh]'].replace('-', np.nan)).astype(np.float64)
EnergyProd['Pumpspeicher[MWh]'] = (EnergyProd['Pumpspeicher[MWh]'].replace('-', np.nan)).astype(np.float64)
EnergyUsage = EnergyUsage.resample('H', on='Date').mean()
EnergyProd = EnergyProd.resample('H', on='Date').mean()
# Remove Duplicates
EnergyProd = EnergyProd.loc[~EnergyProd.index.duplicated(keep='first')]
EnergyUsage = EnergyUsage.loc[~EnergyUsage.index.duplicated(keep='first')]
montelMissingData = montelMissingData.loc[~montelMissingData.index.duplicated(keep='first')]
MissingDataset = pd.concat([EnergyProd,EnergyUsage, montelMissingData], axis=1)
MissingDataset = MissingDataset.dropna()
return MissingDataset
# ******************************************************************************************************************
# ********************************************** SMARD DATA REQUEST ************************************************
# ******************************************************************************************************************
def requestSmardData(
modulIDs=[8004169],
timestamp_from_in_milliseconds=(int(time.time()) * 1000) - (3 * 3600) * 1000,
timestamp_to_in_milliseconds=(int(time.time()) * 1000),
region="DE",
language="de",
type="discrete"
):
'''
Requests and returns a dataframe of SMARD.de data
:param modulIDs: ID of desired modules
:param timestamp_from_in_milliseconds: Time from current
:param timestamp_to_in_milliseconds: Desired timepoint
:param region: Region of data
:param language: Language of data
:param type: Type of data
:return: Dataframe
'''
# http request content
url = "https://www.smard.de/nip-download-manager/nip/download/market-data"
body = json.dumps({
"request_form": [
{
"format": "CSV",
"moduleIds": modulIDs,
"region": region,
"timestamp_from": timestamp_from_in_milliseconds,
"timestamp_to": timestamp_to_in_milliseconds,
"type": type,
"language": language
}]})
# http response
data = requests.post(url, body)
# create pandas dataframe out of response string (csv)
df = pd.read_csv(StringIO(data.text), sep=';')
return df
# ******************************************************************************************************************
# ********************************************** MONTEL API REQUEST ************************************************
# ******************************************************************************************************************
def getDataFromAPI_HourlyIntervals(startDate, endDate):
"""
Input Data should be in the following form:
year-month-day
:param startDate: '2015-01-01'
:param endDate: '2019-01-01'
:return: Montel Api Dataframe in 15min intervals
"""
def repeatlist(list_before, i):
list_after = [val for val in list_before for k in range(i)]
return list_after
# Get Bearer Token
page = requests.get('https://coop.eikon.tum.de/mbt/mbt.json')
dictsoup = (ast.literal_eval(page.text))
token = str((dictsoup['access_token']))
# token = "<KEY>"
url = 'http://api.montelnews.com/spot/getprices'
headers = {"Authorization": 'Bearer ' + token}
params = {
'spotKey': '14',
'fields': ['Base', 'Peak', 'Hours'],
'fromDate': str(startDate),
'toDate': str(endDate),
'currency': 'eur',
'sortType': 'Ascending'
}
response = requests.get(url, headers=headers, params=params)
data = response.json()
value = []
Timespan = []
date = []
base = []
peak = []
for parts in data['Elements']: # if we create extrenal, can hold data in data1
date.append(parts['Date'])
base.append(parts['Base'])
peak.append(parts['Peak'])
for df in parts['TimeSpans']:
value.append(df['Value'])
Timespan.append(df['TimeSpan'])
date = repeatlist(date, 24)
base = repeatlist(base, 24)
peak = repeatlist(peak, 24)
MontelData = pd.DataFrame(list(zip(date, Timespan, value, base, peak)),
columns=['Date', 'Timespan', 'Value', 'Base', 'Peak'])
MontelData[['time', 'end']] = MontelData['Timespan'].str.split('-', 1, expand=True)
MontelData = MontelData.drop(columns=['Timespan', 'end'])
MontelData['Date'] = MontelData['Date'].str.replace('T00:00:00.0000000', '')
MontelData['Date'] = MontelData['Date'] + '-' + MontelData['time']
#MontelData['Date'] = MontelData[~MontelData['Date'].str.contains('dst')]
MontelData = MontelData.drop(columns=['time'])
MontelData['Date'] = pd.to_datetime(MontelData['Date'], format='%Y-%m-%d-%H:00')
MontelData15 = MontelData.set_index('Date')
MontelData15 = MontelData15.resample('H').mean()
MontelData15 = MontelData15.interpolate(method='time') # FINAL DATA
MontelData15 = MontelData15.dropna()
return MontelData15.loc[~MontelData15.index.duplicated(keep='first')]
# Three Datasets
# Electricity Price data from Montel
# Electricity production and consumption from Smard
# ******************************************************************************************************************
# ********************************************** MULTI VARIATE LSTM ************************************************
# ******************************************************************************************************************
# returns Dataframe of the following predicted variables inorder by hour. :
# Erdgas[MWh], Gesamt[MWh], Steinkohle[MWh], Wind Onshore[MWh], Value
# All Used variables are from either MONTELAPI or ENERGYPRODUCTION
def predict_price_LSTM(targetDatetime,
pathToCheckpoint,
historicalDays=60,
makePredicition=True,
loadFromCheckpoint=1,
trainingEnabled=0,
gpuEnabled=0,
batch_size=16,
loss_Function=RMSE(),
epochsNumber=90,
numberLayers=2,
hiddenSize=512,
numWorkers=8
):
"""
:param targetDatetime: Date and time of requested day to predict. Should be given as a pandas datetime object
:param pathToCheckpoint: Computer Path to the LSTM model Checkpoint
:param historicalDays: Number of days prior to the requested day to predict. Minimum number = 14. Default = 21
:param makePredicition: Set Equal to True if you want a prediction at the output. Default = True
:param loadFromCheckpoint: If activated, Checkpoint will be loaded into model. Default = 1
:param trainingEnabled: If activated, training will be enabled. Default = 0
:param gpuEnabled: If gpu available, Model will be trained with GPU at target position Default = 0
:param batch_size: For training. Default = 16
:param loss_Function: Loss function for training. Default = RMSE
:param epochsNumber: Number of epochs for training. Default = 90
:param numberLayers: Number of layers in model to be created. Default = 2
:param hiddenSize: Number of hidden states in lstm. Default = 512
:param numWorkers: number of workers specified for dataloader. Default = 8
:return: Returns a dataframe of predicted values 1 hour intervals.
Also return individual steps of 1 hour, 1 day and 1 week ahead predictions
"""
# ProcessFlags
hourlyData = 1
if loadFromCheckpoint == 1:
chk_path = pathToCheckpoint
if hourlyData == 1:
max_prediction_length = 168 # forecast 1 week
max_encoder_length = 168 * 2 # use 2 weeks of history
data = get_data_for_prediction(targetDatetime, historicalDays)
data['GroupID'] = 'A'
data['time_idx'] = array(range(data.shape[0]))
data.reset_index(level=0, inplace=True)
Array1= np.array(data['Wind Onshore[MWh]'])
Array2= np.array(data['Steinkohle[MWh]'])
Array3= np.array(data['Erdgas[MWh]'])
pos=0
for i in Array1:
if int(i)<10:
Array1[pos] = i * 1000
pos =pos+1
pos=0
for i in Array2:
if int(i) < 10:
Array2[pos] = i * 1000
pos = pos + 1
pos = 0
for i in Array3:
if int(i) < 10:
Array3[pos] = i * 1000
pos = pos + 1
data.drop(columns={'Wind Onshore[MWh]','Steinkohle[MWh]','Erdgas[MWh]'})
data['Wind Onshore[MWh]']= Array1
data['Steinkohle[MWh]']= Array2
data['Erdgas[MWh]']= Array3
#print(data)
# data['Date'] = pd.to_datetime(data['Date'], format='%d/%m/%Y %H:00')
# **************************************************************************************************************
# ********************************************* PREPROCESSING **************************************************
# **************************************************************************************************************
# fill in any missing values historical data may have
training_cutoff = data["Date"].max() - timedelta(days=7)
groupind = data['GroupID']
groupind2 = data['time_idx']
groupind3 = data['Date']
data = data.drop(columns=['GroupID', 'time_idx', 'Date'])
data = data.apply(lambda x: x.fillna(x.mean()))
data =
|
pd.concat([data, groupind], axis=1)
|
pandas.concat
|
from sqlite3 import ProgrammingError
import pandas as pd
import pytest
import snowflake.connector
from mock import patch
from toucan_connectors import DataSlice
from toucan_connectors.json_wrapper import JsonWrapper
from toucan_connectors.snowflake import SnowflakeDataSource
from toucan_connectors.snowflake_common import SnowflakeCommon
@pytest.fixture
def snowflake_datasource():
return SnowflakeDataSource(
name='test_name',
domain='test_domain',
database='database_1',
warehouse='warehouse_1',
query='select * from my_table where toto=%(foo);',
query_object={'schema': 'SHOW_SCHEMA', 'table': 'MY_TABLE', 'columns': ['col1', 'col2']},
parameters={'foo': 'bar', 'pokemon': 'pikachu'},
)
data_result_none = []
data_result_one = [
{
'1 Column Name': 'value',
'2 Column Name': 'value',
'3 Column Name': 'value',
'4 Column Name': 'value',
'5 Column Name': 'value',
'6 Column Name': 'value',
'7 Column Name': 'value',
'8 Column Name': 'value',
'9 Column Name': 'value',
'10 Column Name': 'value',
'11 Column Name': 'value',
}
]
data_result_5 = JsonWrapper.load(
open(
'tests/fixtures/fixture_snowflake_common/data_5.json',
)
)
data_result_all = JsonWrapper.load(
open(
'tests/fixtures/fixture_snowflake_common/data_10.json',
)
)
databases_result_all = [{'name': 'database_1'}, {'name': 'database_2'}]
databases_result_none = []
databases_result_one = [{'name': 'database_1'}]
warehouses_result_all = [{'name': 'warehouse_1'}, {'name': 'warehouse_2'}]
warehouses_result_none = []
warehouses_result_one = [{'name': 'warehouse_1'}]
@patch('snowflake.connector.connect', return_value=snowflake.connector.SnowflakeConnection)
@patch('snowflake.connector.cursor.SnowflakeCursor.execute', return_value=None)
@patch('pandas.DataFrame.from_dict', return_value=pd.DataFrame(databases_result_all))
def test_get_database_without_filter(database_result, execute_query, connect):
result = SnowflakeCommon().get_databases(connect)
assert database_result.call_count == 1
assert result[0] == 'database_1'
assert result[1] == 'database_2'
assert len(result) == 2
@patch('snowflake.connector.connect', return_value=snowflake.connector.SnowflakeConnection)
@patch('snowflake.connector.cursor.SnowflakeCursor.execute')
@patch(
'pandas.DataFrame.from_dict',
return_value=pd.DataFrame(databases_result_none),
)
def test_get_database_with_filter_no_result(database_result, execute_query, connect):
result = SnowflakeCommon().get_databases(connect, 'database_3')
assert database_result.call_count == 1
assert len(result) == 0
@patch('snowflake.connector.connect', return_value=snowflake.connector.SnowflakeConnection)
@patch('snowflake.connector.cursor.SnowflakeCursor.execute')
@patch(
'pandas.DataFrame.from_dict',
return_value=pd.DataFrame(databases_result_one),
)
def test_get_database_with_filter_one_result(database_result, execute_query, connect):
result = SnowflakeCommon().get_databases(connect, 'database_1')
assert database_result.call_count == 1
assert result[0] == 'database_1'
assert len(result) == 1
@patch('snowflake.connector.connect', return_value=snowflake.connector.SnowflakeConnection)
@patch('snowflake.connector.cursor.SnowflakeCursor.execute')
@patch(
'pandas.DataFrame.from_dict',
return_value=pd.DataFrame(warehouses_result_all),
)
def test_get_warehouse_without_filter(warehouse_result, execute_query, connect, mocker):
result = SnowflakeCommon().get_warehouses(connect)
assert warehouse_result.call_count == 1
assert result[0] == 'warehouse_1'
assert result[1] == 'warehouse_2'
assert len(result) == 2
@patch('snowflake.connector.connect', return_value=snowflake.connector.SnowflakeConnection)
@patch('snowflake.connector.cursor.SnowflakeCursor.execute')
@patch(
'pandas.DataFrame.from_dict',
return_value=pd.DataFrame(warehouses_result_none),
)
def test_get_warehouse_with_filter_no_result(warehouse_result, execute_query, connect):
result = SnowflakeCommon().get_warehouses(connect, 'warehouse_3')
assert warehouse_result.call_count == 1
assert len(result) == 0
@patch('snowflake.connector.connect', return_value=snowflake.connector.SnowflakeConnection)
@patch('snowflake.connector.cursor.SnowflakeCursor.execute')
@patch(
'pandas.DataFrame.from_dict',
return_value=pd.DataFrame(warehouses_result_one),
)
def test_get_warehouse_with_filter_one_result(warehouse_result, execute_query, connect):
result = SnowflakeCommon().get_warehouses(connect, 'warehouse_1')
assert warehouse_result.call_count == 1
assert result[0] == 'warehouse_1'
assert len(result) == 1
@patch('snowflake.connector.connect', return_value=snowflake.connector.SnowflakeConnection)
@patch('snowflake.connector.cursor.SnowflakeCursor.execute')
@patch(
'pandas.DataFrame.from_dict',
return_value=pd.DataFrame(data_result_all),
)
def test_retrieve_data(result, execute_query, connect, snowflake_datasource):
df: pd.DataFrame = SnowflakeCommon().retrieve_data(connect, snowflake_datasource)
assert result.call_count == 3
assert len(df) == 14
@patch('snowflake.connector.connect', return_value=snowflake.connector.SnowflakeConnection)
@patch('snowflake.connector.cursor.SnowflakeCursor.execute')
@patch(
'pandas.DataFrame.from_dict',
return_value=pd.DataFrame(data_result_all),
)
def test_get_slice_without_limit_without_offset(
result, execute_query, connect, snowflake_datasource
):
ds: DataSlice = SnowflakeCommon().get_slice(connect, snowflake_datasource)
assert result.call_count == 3
assert len(ds.df) == 14
assert ds.stats.total_returned_rows == 14
@patch('snowflake.connector.connect', return_value=snowflake.connector.SnowflakeConnection)
@patch('snowflake.connector.cursor.SnowflakeCursor.execute')
@patch(
'pandas.DataFrame.from_dict',
return_value=pd.DataFrame(data_result_5),
)
def test_get_slice_with_limit_without_offset(result, execute_query, connect, snowflake_datasource):
ds: DataSlice = SnowflakeCommon().get_slice(connect, snowflake_datasource, limit=5)
assert result.call_count == 3
assert len(ds.df) == 5
assert ds.stats.total_returned_rows == 5
@patch('snowflake.connector.connect', return_value=snowflake.connector.SnowflakeConnection)
@patch('snowflake.connector.cursor.SnowflakeCursor.execute')
@patch(
'pandas.DataFrame.from_dict',
return_value=pd.DataFrame(data_result_none),
)
def test_get_slice_with_limit_without_offset_no_data(
result, execute_query, connect, snowflake_datasource
):
ds: DataSlice = SnowflakeCommon().get_slice(connect, snowflake_datasource, limit=5)
assert result.call_count == 3
assert len(ds.df) == 0
assert ds.stats.total_returned_rows == 0
@patch('snowflake.connector.connect', return_value=snowflake.connector.SnowflakeConnection)
@patch('snowflake.connector.cursor.SnowflakeCursor.execute')
@patch(
'pandas.DataFrame.from_dict',
return_value=pd.DataFrame(data_result_one),
)
def test_get_slice_with_limit_without_offset_not_enough_data(
result, execute_query, connect, snowflake_datasource
):
ds: DataSlice = SnowflakeCommon().get_slice(connect, snowflake_datasource, limit=5)
assert result.call_count == 3
assert len(ds.df) == 1
assert ds.stats.total_returned_rows == 1
@patch('snowflake.connector.connect', return_value=snowflake.connector.SnowflakeConnection)
@patch('snowflake.connector.cursor.SnowflakeCursor.execute')
@patch(
'pandas.DataFrame.from_dict',
return_value=pd.DataFrame(data_result_all),
)
def test_get_slice_with_limit_with_offset(result, execute_query, connect, snowflake_datasource):
ds: DataSlice = SnowflakeCommon().get_slice(connect, snowflake_datasource, offset=5, limit=5)
assert result.call_count == 3
assert len(ds.df) == 14
assert ds.stats.total_returned_rows == 14
@patch('snowflake.connector.connect', return_value=snowflake.connector.SnowflakeConnection)
@patch('snowflake.connector.cursor.SnowflakeCursor.execute')
@patch(
'pandas.DataFrame.from_dict',
return_value=pd.DataFrame(data_result_none),
)
def test_get_slice_with_limit_with_offset_no_data(
result, execute_query, connect, snowflake_datasource
):
ds: DataSlice = SnowflakeCommon().get_slice(connect, snowflake_datasource, offset=5, limit=5)
assert result.call_count == 3
assert len(ds.df) == 0
assert ds.stats.total_returned_rows == 0
@patch('snowflake.connector.connect', return_value=snowflake.connector.SnowflakeConnection)
@patch('snowflake.connector.cursor.SnowflakeCursor.execute')
@patch(
'pandas.DataFrame.from_dict',
return_value=pd.DataFrame(data_result_one),
)
def test_get_slice_with_limit_with_offset_not_enough_data(
result, execute_query, connect, snowflake_datasource
):
ds: DataSlice = SnowflakeCommon().get_slice(connect, snowflake_datasource, offset=5, limit=5)
assert result.call_count == 3
assert len(ds.df) == 1
assert ds.stats.total_returned_rows == 1
@patch('snowflake.connector.connect', return_value=snowflake.connector.SnowflakeConnection)
@patch('snowflake.connector.cursor.SnowflakeCursor.execute')
@patch(
'pandas.DataFrame.from_dict',
return_value=pd.DataFrame(data_result_all),
)
def test_get_slice_without_limit_with_offset(result, execute_query, connect, snowflake_datasource):
ds: DataSlice = SnowflakeCommon().get_slice(connect, snowflake_datasource, offset=5)
assert result.call_count == 3
assert len(ds.df) == 14
assert ds.stats.total_returned_rows == 14
@patch(
'pandas.DataFrame.from_dict',
return_value=
|
pd.DataFrame(data_result_all)
|
pandas.DataFrame
|
"""
This module contains all US-specific data loading and data cleaning routines.
"""
import requests
import pandas as pd
import numpy as np
idx = pd.IndexSlice
def get_raw_covidtracking_data_il():
""" Gets the current daily CSV from COVIDTracking """
url = 'https://raw.githubusercontent.com/dancarmoz/israel_moh_covid_dashboard_data/master/hospitalized_and_infected.csv'
data = pd.read_csv(url)
return data
def get_raw_cities_il_data():
# Get the latest csv file from this link:
baseurl = 'https://data.gov.il'
nextapi = '/api/3/action/datastore_search?resource_id=8a21d39d-91e3-40db-aca1-f73f7ab1df69&limit=100000'
datafs = []
while (nextapi):
url = baseurl + nextapi
with requests.get(url) as r:
if (not r.json()['result']['records']):
break
datafs.append(pd.DataFrame(r.json()['result']['records']))
nextapi = r.json()['result']['_links']['next']
data =
|
pd.concat(datafs)
|
pandas.concat
|
"""
Simple set of functions for summarizing over a group
"""
import pandas as pd
import numpy as np
import re
def filter_is(df, col, val):
return df[df[col] == val]
def filter_in(df, col, vals):
ind = [np.any(x in vals) for x in df[col]]
return df[ind]
def filter_gt(df, col, val):
return df[df[col] > val]
def filter_lt(df, col, val):
return df[df[col] < val]
def is_subset(set1, set2):
"""
Return True, if all members of set2 are contained in set1.
i.e, set2 is a subset of set1. See example for clarity:
Example
-------
>>> is_subset(set(["A","B"]), set(["A"]))
True
>>> is_subset(set(["A","B"]), set(["A","C"]))
False
"""
return set(set2)-set(set1) == set()
def is_almost_subset(set1, set2, min_set_diff = 2):
"""
Return True, if no more than min_set_diff members of set2
are contained in set1.
i.e, set2 is almost a subset of set1. See example for clarity:
Example
-------
>>> is_almost_subset(set(["A","B","C","D"]), set(["A", "K"]), 2)
True
>>> is_almost_subset(set(["A","B","C","D"]), set(["A", "K"]), 1)
False
"""
return len(set(set2)-set(set1)) < min_set_diff
def test_for_subsets(list_of_sets):
"""
test_for_subsets (formerly known as turtles_all_the_way_down)
For a ranked list of sets, return a vector where 1
indicated the set is not a subset of any of sets
that come before it in the list.
This is useful for eliminating clusters (sets) of TCRs
which are smaller than a higher ranked and larger set
that contains all its members. See example for clarity:
Example
-------
>>> test_for_subsets([["A","B","C"], ["A","C","D"], ["A","D"], ["B","E"],["B","C"]])
[1, 1, 0, 1, 0]
>>> test_for_subsets([ [1,2,3], [1,3,4], [1,4], [2,5],[2,3]])
[1, 1, 0, 1, 0]
"""
tracker = [1]
if isinstance(list_of_sets, pd.Series):
list_of_sets = list_of_sets.to_list()
checked_sets = [list_of_sets[0]]
for s in list_of_sets[1:]:
if np.any([is_subset(cs, s) for cs in checked_sets]):
tracker.append(0)
else:
tracker.append(1)
checked_sets.append(s)
assert len(tracker) == len(list_of_sets)
return tracker
def test_for_almost_subsets(list_of_sets, thr = 3):
"""
test_for_subsets (formerly known as turtles_all_the_way_down)
For a ranked list of sets, return a vector where 1
indicated the set is not a subset of any of sets
that come before it in the list.
This is useful for eliminating clusters (sets) of TCRs
which are smaller than a higher ranked and larger set
that contains all its members. See example for clarity:
Example
-------
>>> test_for_almost_subsets([["A","B","C"], ["A","C","D"], ["A","D"], ["B","E"],["B","C"]], 1)
[1, 1, 0, 1, 0]
>>> test_for_almost_subsets([ [1,2,3], [1,3,4], [1,4], [2,5],[2,3]], 1)
[1, 1, 0, 1, 0]
"""
tracker = [1]
if isinstance(list_of_sets, pd.Series):
list_of_sets = list_of_sets.to_list()
checked_sets = [list_of_sets[0]]
for s in list_of_sets[1:]:
if np.any([is_almost_subset(cs, s, thr) for cs in checked_sets]):
tracker.append(0)
else:
tracker.append(1)
checked_sets.append(s)
assert len(tracker) == len(list_of_sets)
return tracker
def _dist_summ(data, precision = 1, scientific = True):
"""
Summarise distribution [as min,q1,median,q3, max]
Parameters
----------
data : list
List of numeric data
precision : int
How many integers precision in scientific notation = 1,
scientific : bool
Default is True, to return result in scientific notation
Examples
--------
>>> _dist_summ([1,2,3,4,5])
['1.e+00', '2.e+00', '3.e+00', '4.e+00', '5.e+00']
_dist_summ([1,2,3,4,5], scientific=False)
[1, 2.0, 3.0, 4.0, 5]
"""
dmin = np.min(data)
dQ1 = np.percentile(data, q = 25, interpolation = 'midpoint')
dmedian = np.median(data)
dQ3 = np.percentile(data, q = 75, interpolation = 'midpoint')
dmax = np.max(data)
r = [dmin, dQ1, dmedian, dQ3, dmax]
if scientific:
return [np.format_float_scientific(s, precision = precision) for s in r]
else:
return r
def _select(df, iloc_rows, col = 'cdr3_b_aa'):
return df.iloc[iloc_rows,][col].to_list()
def _summ(df, indices, column = None , f=None, fdf = None, **kwargs):
"""
_summ implements a split, apply some function, combine result routine.
Parameters
----------
f : callable
a function callable on a list of series
fdf : callable
a function callable on a dataframe
df : pd.DataFrame
DataFrame
indices : list
list of lists containing integers corresponding to the iloc rows of a the < df >
column : str or None
column name, should be None if using a fdf
Returns
-------
summary : list of identical lenght to indices
Examples
--------
>>> from tcrdist.summarize import _summ, _occurs_N_str, _top_N_str
>>> df = pd.DataFrame({'catvar':["a","b","b","c"], "numvar":[10,1,100,3]})
>>> _summ(df, indices = [[0,1], [2,3]], column = 'numvar', f = np.median)
[5.5, 51.5]
>>> _summ(df, indices = [[0,1], [2,3]], column = 'catvar', f = _occurs_N_str, N = 2)
['b (50.0%), a (50.0%)', 'c (50.0%), b (50.0%)']
>>> _summ(df, indices = [[0,1], [2,3]], column = 'catvar', fdf = _top_N_str, **{'col': 'catvar', 'count_col': 'numvar','N':2})
['a (90.9%), b (9.1%)', 'b (97.1%), c (2.9%)']
"""
summary = list()
for ind in indices:
if f is not None:
if isinstance(df.iloc[ind, ][column], pd.Series):
selection = df.iloc[ind, ][column].to_list()
else:
selection = df.iloc[ind, ][column]
summary.append(f(selection, **kwargs))
elif fdf is not None:
selection = df.iloc[ind, ]
summary.append(fdf(selection, **kwargs))
else:
raise(ValueError("No function (f) or function on a DataFrame (fdf) were supplied\n"))
assert len(summary) == len(indices)
return summary
def _occurs_N_str(m, N):
"""
Return occurances in a pd.Series as a string
Example
-------
>>> _occurs_N_str(["a","b","b","c"], 1)
'b (50.0%)'
>>> _occurs_N_str(["a","b","b","c"], 2)
'b (50.0%), c (25.0%)'
>>> _occurs_N_str(["a","b","b","c"], 3)
'b (50.0%), c (25.0%), a (25.0%)'
"""
if isinstance(m, pd.Series):
gby = m.value_counts()
else:
m = pd.Series(m)
gby = m.value_counts()
gby = 100 * gby / gby.sum()
gby = gby.sort_values(ascending=False)
out = ', '.join(['%s (%2.1f%%)' % (idx, v) for idx,v in gby.iteritems()][:N])
return out
def _top_N_str(m, col, count_col, N):
"""
Example
-------
>>> df = pd.DataFrame({'catvar':["a","b","b","c"], "numvar":[10,1,100,3]})
>>> _top_N_str(df, col = 'catvar', count_col ='numvar', N=2)
'b (88.6%), a (8.8%)'
"""
gby = m.groupby(col)[count_col].agg(np.sum)
gby = 100 * gby / gby.sum()
gby = gby.sort_values(ascending=False)
out = ', '.join(['%s (%2.1f%%)' % (idx, v) for idx,v in gby.iteritems()][:N])
return out
def _extract_percentage(s, key):
"""
extractor for pattern '%s (%2.1f%%)', see examples for clarity
Parameter
---------
s : str
string pattern '%s (%2.1f%%)','%s (%2.1f%%)','%s (%2.1f%%)'
k : str
key for the percentage you want to extract
Returns
-------
tuple (str, float)
Examples
--------
>>> _extract_percentage('naive_CD8 (100.0%)', 'naive_CD8')
('naive_CD8', '100.0')
>>> _extract_percentage('naive_CD8 (100.0%)', 'PBMC')
('PMBC', 0.0)
>>> _extract_percentage('naive_CD8 (94.1%), PBMC (5.9%)', 'PBMC')
('PBMC', '5.9')
"""
ls = s.split(",")
try:
rs = [re.search(pattern = '([A-Za-z0-9_]+) [(]([0-9]+[\.][0-9])%[)]', string = s) for s in ls]
rgs = [reg.groups() for reg in rs]
return (key, {k:v for k,v in rgs}[key])
except:
return key, 0.0
def member_summ(res_df, clone_df, key_col = 'neighbors_i', count_col='count', addl_cols=[], addl_n=1):
"""Return additional summary info about each result (row)) based on the members of the cluster.
This is helpful for preparing strings to add to the tooltip in hierdiff.plot_hclust_props.
Parameters
----------
res_df : pd.DataFrame [nclusters x result cols]
Returned from neighborhood_diff or hcluster_diff
clone_df : pd.DataFrame [nclones x metadata]
Contains metadata for each clone.
key_col : str
Column in res_df that specifies the iloc of members in the clone_df
count_col : str
Column in clone_df that specifies counts.
Default none assumes count of 1 cell for each row.
addl_cols : list
Columns to summarize
addl_n : int
Number of top N clones to include in the summary of
each cluster.
Returns
-------
summ : pd.DataFrame [nclusters x summary columns]
Columns that can be joined with res_df
Example
-------
summ_df = member_summ(res_df, clone_df)
res_df = res_df.join(summ_df, how='left')"""
def _top_N_str(m, col, count_col, N):
gby = m.groupby(col)[count_col].agg(np.sum)
gby = 100 * gby / gby.sum()
gby = gby.sort_values(ascending=False)
out = ', '.join(['%s (%2.1f%%)' % (idx, v) for idx,v in gby.iteritems()][:N])
return out
split = []
for resi, res_row in res_df.iterrows():
m = clone_df.iloc[res_row[key_col]]
mode_i = m[count_col].idxmax()
summ = {}
for c in [c for c in clone_df.columns if 'cdr3' in c]:
summ[c] = _top_N_str(m, c, count_col, 1)
for c in [c for c in clone_df.columns if 'gene' in c]:
summ[c] = _top_N_str(m, c, count_col, 3)
x_val_cols = [c for c in res_df.columns if 'x_val_' in c]
x_freq_cols = [c for c in res_df.columns if 'x_freq_' in c]
for label_col, freq_col in zip(x_val_cols, x_freq_cols):
summ[res_row[label_col]] = np.round(res_row[freq_col], 3)
for c in [c for c in addl_cols]:
summ[c] = _top_N_str(m, c, count_col, addl_n)
summ = pd.Series(summ, name=resi)
split.append(summ)
summ =
|
pd.DataFrame(split)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pytest
from PermutationImportance.data_verification import verify_data, determine_variable_names
from PermutationImportance.error_handling import InvalidDataException, InvalidInputException
def test_pandas_dataframes():
inputs = np.array([[1, 2, 3], [2, 4, 6]])
outputs = np.array([1, 0])
data = (inputs, outputs)
result = verify_data(data)
for exp, res in zip(data, result):
assert (exp == res).all()
A = [1, 2]
B = [2, 4]
C = [3, 6]
D = [1, 0]
inputs =
|
pd.DataFrame({'A': A, 'B': B, 'C': C})
|
pandas.DataFrame
|
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']),
]
for index in examples:
self.assertFalse(index.is_monotonic_increasing)
self.assertFalse(index.is_monotonic_decreasing)
def test_equals(self):
same_values = Index(self.index, dtype=object)
self.assertTrue(self.index.equals(same_values))
self.assertTrue(same_values.equals(self.index))
def test_identical(self):
i = Index(self.index.copy())
self.assertTrue(i.identical(self.index))
same_values_different_type = Index(i, dtype=object)
self.assertFalse(i.identical(same_values_different_type))
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
self.assertFalse(i.identical(self.index))
self.assertTrue(Index(same_values, name='foo', dtype=object
).identical(i))
self.assertFalse(
self.index.copy(dtype=object)
.identical(self.index.copy(dtype='int64')))
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
self.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
self.assertTrue(res.equals(noidx_res))
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.int64)
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
self.assertTrue(res.equals(noidx_res))
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([2, 12])
elidx = np.array([1, 6])
eridx = np.array([4, 1])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
return_indexers=True)
res2 = self.index.intersection(other_mono)
self.assertTrue(res.equals(res2))
eridx = np.array([1, 4])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)
eres = idx2
eridx = np.array([0, 2, 3, -1, -1])
elidx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
"""
def test_join_right(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='right',
return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)
eres = idx2
elidx = np.array([0, 2, 3, -1, -1])
eridx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,9,7])
res = idx.join(idx2, how='right', return_indexers=False)
eres = idx2
self.assert(res.equals(eres))
"""
def test_join_non_int_index(self):
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14,
16, 18], dtype=object)
self.assertTrue(outer.equals(outer2))
self.assertTrue(outer.equals(expected))
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index([6, 8, 10], dtype=object)
self.assertTrue(inner.equals(inner2))
self.assertTrue(inner.equals(expected))
left = self.index.join(other, how='left')
self.assertTrue(left.equals(self.index))
left2 = other.join(self.index, how='left')
self.assertTrue(left2.equals(other))
right = self.index.join(other, how='right')
self.assertTrue(right.equals(other))
right2 = other.join(self.index, how='right')
self.assertTrue(right2.equals(self.index))
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
joined, lidx, ridx = left.join(left, return_indexers=True)
exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])
self.assertTrue(joined.equals(exp_joined))
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.int64)
self.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.int64)
self.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = self.index.join(self.index, how=kind)
self.assertIs(self.index, joined)
def test_intersection(self):
other = Index([1, 2, 3, 4, 5])
result = self.index.intersection(other)
expected = np.sort(np.intersect1d(self.index.values, other.values))
self.assert_numpy_array_equal(result, expected)
result = other.intersection(self.index)
expected = np.sort(np.asarray(np.intersect1d(self.index.values,
other.values)))
self.assert_numpy_array_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
res = i2.intersection(i1)
self.assertEqual(len(res), 0)
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
now = datetime.now()
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = np.concatenate((self.index, other))
self.assert_numpy_array_equal(result, expected)
result = other.union(self.index)
expected = np.concatenate((other, self.index))
self.assert_numpy_array_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
# can't
data = ['foo', 'bar', 'baz']
self.assertRaises(TypeError, Int64Index, data)
# shouldn't
data = ['0', '1', '2']
self.assertRaises(TypeError, Int64Index, data)
def test_view_Index(self):
self.index.view(Index)
def test_prevent_casting(self):
result = self.index.astype('O')
self.assertEqual(result.dtype, np.object_)
def test_take_preserve_name(self):
index = Int64Index([1, 2, 3, 4], name='foo')
taken = index.take([3, 0, 1])
self.assertEqual(index.name, taken.name)
def test_int_name_format(self):
from pandas import Series, DataFrame
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
repr(s)
repr(df)
def test_print_unicode_columns(self):
df = pd.DataFrame(
{u("\u05d0"): [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
self.assertTrue(len(r) < 100)
self.assertTrue("..." in r)
def test_repr_roundtrip(self):
tm.assert_index_equal(eval(repr(self.index)), self.index)
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
idx = Int64Index([1, 2], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
class TestDatetimeIndex(Base, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def create_index(self):
return date_range('20130101',periods=5)
def test_pickle_compat_construction(self):
pass
def test_numeric_compat(self):
super(TestDatetimeIndex, self).test_numeric_compat()
if not compat.PY3_2:
for f in [lambda : np.timedelta64(1, 'D').astype('m8[ns]') * pd.date_range('2000-01-01', periods=3),
lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]:
self.assertRaises(TypeError, f)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=date_range('20130101',periods=3,tz='US/Eastern',name='foo')
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
class TestPeriodIndex(Base, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def create_index(self):
return period_range('20130101',periods=5,freq='D')
def test_pickle_compat_construction(self):
pass
class TestTimedeltaIndex(Base, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def create_index(self):
return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1)
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * idx)
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_pickle_compat_construction(self):
pass
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setUp(self):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=self.index_names, verify_integrity=False)
def create_index(self):
return self.index
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
self.assertTrue(i.labels[0].dtype == 'int8')
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(40)])
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(400)])
self.assertTrue(i.labels[1].dtype == 'int16')
i = MultiIndex.from_product([['a'],range(40000)])
self.assertTrue(i.labels[1].dtype == 'int32')
i = pd.MultiIndex.from_product([['a'],range(1000)])
self.assertTrue((i.labels[0]>=0).all())
self.assertTrue((i.labels[1]>=0).all())
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_set_names_and_rename(self):
# so long as these are synonyms, we don't need to test set_names
self.assertEqual(self.index.rename, self.index.set_names)
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
with assertRaisesRegexp(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, [new_names[0], self.index_names[1]])
res = ind.set_names(new_names2[0], level=0, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, [new_names2[0], self.index_names[1]])
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assertRaisesRegexp(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0] = levels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0] = labels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with assertRaisesRegexp(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 =
|
MultiIndex(levels=levels, labels=labels)
|
pandas.core.index.MultiIndex
|
# -*- coding: utf-8 -*-
################################################################################
# Description: Python script to analyze the results of the asset allocation exp.
# Author: <NAME>
# Email: <EMAIL>
# Date: dom 24 lug 2016 21:31:25 BST
################################################################################
#--------#
# Import #
#--------#
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import matplotlib
import errno
import ffn
# General settings
matplotlib.style.use('seaborn-colorblind')
params = {'legend.fontsize': 'x-large',
'figure.figsize': (20, 10),
'figure.facecolor': 'white',
'figure.edgecolor': 'black',
'axes.labelsize': 'x-large',
'axes.titlesize': 'x-large',
'xtick.labelsize': 'x-large',
'ytick.labelsize': 'x-large'}
pylab.rcParams.update(params)
# Colors used
colors = ['black',
'dimgrey',
'steelblue',
'lightsteelblue']
#-----------------------#
# Algorithms considered #
#-----------------------#
algorithms = set(['ARAC', 'PGPE', 'NPGPE', 'RSARAC', 'RSPGPE', 'RSNPGPE'])
#-------------------#
# Utility functions #
#-------------------#
def createDirectory(dirPath):
""" Create directory at a given path (absolute).
Args:
dirPath (str): absolute path for new directory.
"""
if not os.path.exists(os.path.expanduser(dirPath)):
try:
os.makedirs(os.path.expanduser(dirPath))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
#-----------#
# Functions #
#-----------#
def analyzeConvergence(filesList, algorithmName):
""" Aggregate the convergence information of a series of independent
experiments of a certain learning algorithms.
Args:
filesList (list of str): list of the files of convergence information
Returns:
dfReward (pd.DataFrame): dataframe containing the aggregate average reward
dfStddev (pd.DataFrame): datraframe containing the aggregate standard dev
dfSharpe (pd.DataFrame): dataframe containing the aggreagate Sharpe ratio
"""
# Initialize output dataframes
temp = pd.read_csv(os.path.expanduser(filesList[0]), index_col=0)
dfRewardExp = pd.DataFrame(index=temp.index)
dfStddevExp = pd.DataFrame(index=temp.index)
dfSharpeExp = pd.DataFrame(index=temp.index)
# For all the files
for f in filesList:
expName = f[::-1].split('/', 1)[0][::-1][:-4]
df = pd.read_csv(os.path.expanduser(f), index_col=0)
dfRewardExp[expName] = df['average']
dfStddevExp[expName] = df['stdev']
dfSharpeExp[expName] = df['sharpe']
# Compute mean and stddev across experiments
c1 = algorithmName
c2 = algorithmName + '_delta'
dfReward = pd.DataFrame(index=temp.index, columns=[c1, c2])
dfStddev = pd.DataFrame(index=temp.index, columns=[c1, c2])
dfSharpe = pd.DataFrame(index=temp.index, columns=[c1, c2])
dfReward[c1] = dfRewardExp.mean(axis=1)
dfReward[c2] = dfRewardExp.std(axis=1)
dfStddev[c1] = dfStddevExp.mean(axis=1)
dfStddev[c2] = dfStddevExp.std(axis=1)
dfSharpe[c1] = dfSharpeExp.mean(axis=1)
dfSharpe[c2] = dfSharpeExp.std(axis=1)
# Return
return dfReward, dfStddev, dfSharpe
def compareAlgorithmConvergence(debugDir, imagesDir=None):
""" Compare the convergence properties of several learning algorithms. The
function produces images and csv summaries of the analysis in the given
directories.
Args:
outputDir (str): output directory.
imagesDir (str): images directory.
"""
dfReward =
|
pd.DataFrame()
|
pandas.DataFrame
|
import datetime
import re
import warnings
from os.path import dirname, join
import numpy as np
import pandas as pd
from techminer.core import explode
from techminer.core.extract_country_name import extract_country_name
from techminer.core.extract_words import extract_words
from techminer.core.map import map_
from techminer.core.text import remove_accents
from techminer.core.thesaurus import load_file_as_dict
warnings.filterwarnings("ignore")
from nltk import word_tokenize
class ScopusImporter:
def __init__(
self,
input_file="scopus.csv",
output_file="techminer.csv",
article=True,
article_in_press=True,
book=True,
book_chapter=True,
business_article=True,
conference_paper=True,
conference_review=False,
data_paper=True,
editorial=False,
letter=False,
note=False,
review=True,
short_survey=True,
erratum=False,
report=False,
retracted=False,
abstract_report=False,
undefined=False,
):
self.input_file = input_file
self.output_file = output_file
self.data = None
self.article = article
self.article_in_press = article_in_press
self.book = book
self.book_chapter = book_chapter
self.business_article = business_article
self.conference_paper = conference_paper
self.conference_review = conference_review
self.data_paper = data_paper
self.editorial = editorial
self.letter = letter
self.note = note
self.review = review
self.short_survey = short_survey
self.erratum = erratum
self.report = report
self.retracted = retracted
self.abstract_report = abstract_report
self.undefined = undefined
def run(self):
##
## Load data
##
self.data = pd.read_csv(self.input_file)
##
## Remove blank spaces
##
self.data = self.data.applymap(lambda w: w.strip() if isinstance(w, str) else w)
##
## Document ID
##
self.data["ID"] = range(len(self.data))
##
## Steps
##
self.rename_columns()
self.select_documents()
self.remove_accents()
self.remove_no_author_name_available()
self.format_author_names()
self.count_number_of_authors_per_document()
self.calculate_frac_number_of_documents_per_author()
self.remove_no_author_id_available()
self.disambiguate_author_names()
self.remove_text_in_foreing_languages()
self.extract_country_names()
self.extract_country_first_author()
self.reduce_list_of_countries()
self.transform_author_keywords_to_lower_case()
self.transform_index_keywords_to_lower_case()
self.remove_copyright_mark_from_abstracts()
self.transform_global_citations_NA_to_zero()
self.format_abb_source_title()
self.create_historiograph_id()
self.create_local_references()
self.transform_abstract_to_lower_case()
self.british_to_amerian()
self.keywords_in_abstract()
# self.extract_title_keywords()
# self.extract_title_words()
# self.extract_abstract_phrases_and_words()
# self.highlight_author_keywords_in_titles()
# self.highlight_author_keywords_in_abstracts()
self.compute_bradford_law_zones()
##
## Replace blanks by pd.NA
##
self.data = self.data.applymap(
lambda w: pd.NA if isinstance(w, str) and w == "" else w
)
self.data = self.data.applymap(
lambda w: w.replace(chr(8211), "-") if isinstance(w, str) else w
)
##
## Transformer output
##
if self.output_file is None:
return self.data
self.data.to_csv(self.output_file, index=False)
self.logging_info("Finished!!!")
def logging_info(self, msg):
print(
"{} - INFO - {}".format(
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"), msg
)
)
# def extract_title_keywords(self):
# self.logging_info("Keywords extraction from title ...")
# author_keywords = self.data.Author_Keywords.dropna()
# author_keywords = author_keywords.map(lambda w: w.lower().split(";"))
# author_keywords = author_keywords.explode().tolist()
# author_keywords = set(author_keywords)
# index_keywords = self.data.Index_Keywords.dropna()
# index_keywords = index_keywords.map(lambda w: w.lower().split(";"))
# index_keywords = index_keywords.explode().tolist()
# index_keywords = set(index_keywords)
# keywords = author_keywords | index_keywords
# self.data["Title_Keywords"] = self.data.Title.copy()
# self.data["Title_Keywords"] = self.data.Title_Keywords.map(
# lambda w: word_tokenize(w.lower()), na_action="ignore"
# )
# self.data["Title_Keywords"] = self.data.Title_Keywords.map(
# lambda w: set(w), na_action="ignore"
# )
# self.data["Title_Keywords"] = self.data.Title_Keywords.map(
# lambda w: keywords & w, na_action="ignore"
# )
# self.data["Title_Keywords"] = self.data.Title_Keywords.map(
# lambda w: ";".join(w), na_action="ignore"
# )
def select_documents(self):
document_types = []
if self.article is True:
document_types.append("Article")
if self.article_in_press is True:
document_types.append("Article-in-Press")
if self.book is True:
document_types.append("Book")
if self.book_chapter is True:
document_types.append("Book Chapter")
if self.business_article is True:
document_types.append("Business Article")
if self.conference_paper is True:
document_types.append("Conference Paper")
if self.conference_review is True:
document_types.append("Conference Review")
if self.data_paper is True:
document_types.append("Data Paper")
if self.editorial is True:
document_types.append("Editorial")
if self.letter is True:
document_types.append("Letter")
if self.note is True:
document_types.append("Note")
if self.review is True:
document_types.append("Review")
if self.short_survey is True:
document_types.append("Short Survey")
if self.erratum is True:
document_types.append("Erratum")
if self.report is True:
document_types.append("Report")
if self.retracted is True:
document_types.append("Retracted")
if self.abstract_report is True:
document_types.append("Abstract Report")
if self.undefined is True:
document_types.append("Undefined")
self.data = self.data[
self.data.Document_Type.map(
lambda w: w in document_types, na_action="ignore"
)
]
self.data.index = range(len(self.data))
def keywords_in_abstract(self):
self.logging_info("Extracting Keywords from abstracts ...")
author_keywords = self.data.Author_Keywords.dropna()
author_keywords = author_keywords.map(lambda w: w.lower().split(";"))
author_keywords = author_keywords.explode().tolist()
author_keywords = set(author_keywords)
index_keywords = self.data.Index_Keywords.dropna()
index_keywords = index_keywords.map(lambda w: w.lower().split(";"))
index_keywords = index_keywords.explode().tolist()
index_keywords = set(index_keywords)
keywords = author_keywords | index_keywords
##
## Prepare compound keywords
##
compound_keywords = [w for w in keywords if len(w.split()) > 1]
compound_keywords = sorted(compound_keywords, key=len, reverse=True)
##
## Preserves compound keywords in abstrct
##
phrases = self.data.Abstract.copy()
for k in compound_keywords:
pattern = re.compile(re.escape(k), re.IGNORECASE)
phrases = phrases.map(
lambda w: pattern.sub(k.replace(" ", "_"), w), na_action="ignore"
)
##
## Tokenize words
##
phrases = phrases.map(
lambda w: set(word_tokenize(w.lower())),
na_action="ignore",
)
##
## Restore compund words
##
phrases = phrases.map(
lambda w: [m.replace("_", " ") for m in w],
na_action="ignore",
)
##
## Extracts keywords from text
###
self.data["Abstract_Keywords"] = phrases.map(
lambda w: ";".join(sorted(keywords & set(w))), na_action="ignore"
)
self.data["Abstract_Author_Keywords"] = phrases.map(
lambda w: ";".join(sorted(author_keywords & set(w))), na_action="ignore"
)
self.data["Abstract_Index_Keywords"] = phrases.map(
lambda w: ";".join(sorted(index_keywords & set(w))), na_action="ignore"
)
def british_to_amerian(self):
self.logging_info("Translate british spelling to american spelling ...")
module_path = dirname(__file__)
filename = join(module_path, "data/bg2am.data")
bg2am = load_file_as_dict(filename)
for british_word in bg2am:
self.data = self.data.applymap(
lambda w: w.replace(british_word, bg2am[british_word][0])
if isinstance(w, str)
else w
)
def rename_columns(self):
for column_to_delete in [
"Abstract HL",
# "Abstract",
"Access Type",
# "Affiliations",
"Art. No.",
# "Authors_ID",
"Authors with affiliations",
# "Bradford_Law_Zone",
"CODEN",
"Correspondence Address",
"DOI",
"Editors",
"EID",
# "Global_References",
# "ID",
"ISBN",
"ISSN",
"Issue",
"Language of Original Document",
"Link",
# "Local_References",
# "Num_Authors",
"Page count",
"Page end",
"Page start",
"Publication Stage",
"Publisher",
"PubMed ID",
"Source",
# "Global_Citations",
# "Local_Citations",
"Title HL",
# "Title",
"Volume",
# "Year",
]:
if column_to_delete in self.data.columns:
self.data.pop(column_to_delete)
scopus2tags = {
"Abbreviated Source Title": "Abb_Source_Title",
"Abstract": "Abstract",
"Access Type": "Access_Type",
"Affiliations": "Affiliations",
"Art. No.": "Art_No",
"Author Keywords": "Author_Keywords",
"Author(s) ID": "Authors_ID",
"Authors with affiliations": "Authors_with_affiliations",
"Authors": "Authors",
"Cited by": "Global_Citations",
"CODEN": "CODEN",
"Correspondence Address": "Correspondence_Address",
"Document Type": "Document_Type",
"DOI": "DOI",
"Editors": "Editors",
"EID": "EID",
"Index Keywords": "Index_Keywords",
"ISBN": "ISBN",
"ISSN": "ISSN",
"Issue": "Issue",
"Language of Original Document": "Language_of_Original_Document",
"Link": "Link",
"Page count": "Page_count",
"Page end": "Page_end",
"Page start": "Page_start",
"Publication Stage": "Publication_Stage",
"Publisher": "Publisher",
"PubMed ID": "PubMed_ID",
"References": "Global_References",
"Source title": "Source_title",
"Source": "Source",
"Title": "Title",
"Volume": "Volume",
"Year": "Year",
}
self.logging_info("Renaming and selecting columns ...")
self.data = self.data.rename(columns=scopus2tags)
def remove_accents(self):
self.logging_info("Removing accents ...")
self.data = self.data.applymap(
lambda w: remove_accents(w) if isinstance(w, str) else w
)
def remove_no_author_name_available(self):
if "Authors" not in self.data.columns:
return
self.logging_info('Removing "[No author name available]" ...')
self.data["Authors"] = self.data.Authors.map(
lambda w: pd.NA if w == "[No author name available]" else w
)
def format_author_names(self):
if "Authors" not in self.data.columns:
return
self.logging_info("Formatting author names ...")
self.data["Authors"] = self.data.Authors.map(
lambda w: w.replace(",", ";").replace(".", "") if pd.isna(w) is False else w
)
def count_number_of_authors_per_document(self):
if "Authors" not in self.data.columns:
return
self.logging_info("Counting number of authors per document...")
self.data["Num_Authors"] = self.data.Authors.map(
lambda w: len(w.split(";")) if not pd.isna(w) else 0
)
def calculate_frac_number_of_documents_per_author(self):
if "Authors" not in self.data.columns:
return
self.logging_info("Counting frac number of documents per author...")
self.data["Frac_Num_Documents"] = self.data.Authors.map(
lambda w: 1.0 / len(w.split(";")) if not pd.isna(w) else 0
)
def remove_no_author_id_available(self):
if "Authors_ID" not in self.data.columns:
return
self.data["Authors_ID"] = self.data.Authors_ID.map(
lambda w: pd.NA if w == "[No author id available]" else w
)
def disambiguate_author_names(self):
if "Authors" not in self.data.columns or "Authors_ID" not in self.data.columns:
return
self.logging_info("Disambiguate author names ...")
self.data["Authors"] = self.data.Authors.map(
lambda w: w[:-1] if not pd.isna(w) and w[-1] == ";" else w
)
self.data["Authors_ID"] = self.data.Authors_ID.map(
lambda w: w[:-1] if not pd.isna(w) and w[-1] == ";" else w
)
data = self.data[["Authors", "Authors_ID"]]
data = data.dropna()
data["*info*"] = [(a, b) for (a, b) in zip(data.Authors, data.Authors_ID)]
data["*info*"] = data["*info*"].map(
lambda w: [
(u.strip(), v.strip()) for u, v in zip(w[0].split(";"), w[1].split(";"))
]
)
data = data[["*info*"]].explode("*info*")
data = data.reset_index(drop=True)
names_ids = {}
for idx in range(len(data)):
author_name = data.at[idx, "*info*"][0]
author_id = data.at[idx, "*info*"][1]
if author_name in names_ids.keys():
if author_id not in names_ids[author_name]:
names_ids[author_name] = names_ids[author_name] + [author_id]
else:
names_ids[author_name] = [author_id]
ids_names = {}
for author_name in names_ids.keys():
suffix = 0
for author_id in names_ids[author_name]:
if suffix > 0:
ids_names[author_id] = author_name + "(" + str(suffix) + ")"
else:
ids_names[author_id] = author_name
suffix += 1
self.data["Authors"] = self.data.Authors_ID.map(
lambda z: ";".join([ids_names[w.strip()] for w in z.split(";")])
if not pd.isna(z)
else z
)
def remove_text_in_foreing_languages(self):
if "Title" not in self.data.columns:
return
self.logging_info("Removing part of titles in foreing languages ...")
self.data["Title"] = self.data.Title.map(
lambda w: w[0 : w.find("[")] if pd.isna(w) is False and w[-1] == "]" else w
)
def extract_country_names(self):
if "Affiliations" not in self.data.columns:
return
self.logging_info("Extracting country names ...")
self.data["Countries"] = map_(self.data, "Affiliations", extract_country_name)
def extract_country_first_author(self):
if "Countries" not in self.data.columns:
return
self.logging_info("Extracting country of first author ...")
self.data["Country_1st_Author"] = self.data.Countries.map(
lambda w: w.split(";")[0] if isinstance(w, str) else w
)
def reduce_list_of_countries(self):
if "Countries" not in self.data.columns:
return
self.logging_info("Reducing list of countries ...")
self.data["Countries"] = self.data.Countries.map(
lambda w: ";".join(set(w.split(";"))) if isinstance(w, str) else w
)
def transform_author_keywords_to_lower_case(self):
if "Author_Keywords" not in self.data.columns:
return
self.logging_info("Transforming Author Keywords to lower case ...")
self.data["Author_Keywords"] = self.data.Author_Keywords.map(
lambda w: w.lower() if not pd.isna(w) else w
)
self.data["Author_Keywords"] = self.data.Author_Keywords.map(
lambda w: ";".join(sorted([z.strip() for z in w.split(";")]))
if not pd.isna(w)
else w
)
def transform_index_keywords_to_lower_case(self):
if "Index_Keywords" not in self.data.columns:
return
self.logging_info("Transforming Index Keywords to lower case ...")
self.data["Index_Keywords"] = self.data.Index_Keywords.map(
lambda w: w.lower() if not pd.isna(w) else w
)
self.data["Index_Keywords"] = self.data.Index_Keywords.map(
lambda w: ";".join(sorted([z.strip() for z in w.split(";")]))
if not
|
pd.isna(w)
|
pandas.isna
|
"""
prepare and save dataframe for eda, model training and evaluation :
Data Source :
deepdr : https://isbi.deepdr.org
kaggle : https://www.kaggle.com/c/aptos2019-blindness-detection/
merge two datasets :
- deepdr dataset have : train, valid, test dataset with image quality and diagnosis label
- kaggle dataset have : train and test dataset with diagnosis label
goal :
- use deepdr dataset for training quality model
- merged deepdr and kaggle dataset for training diagnosis model
therefore need to prepare following dataframes : -> save under ./output folder
training quality check model: (use only deepdr dataset)
(use original train for train-valid spilit, use original valid as test --> so that can evaluate with test)
q_traindf : columns = ['im_path', 'im_quality']
q_testdf : columns = ['im_path', 'im_quality']
training diagnosis model: (merge deepdr and kaggle dataset)
(merge deepdr train, valid, and keggle train --> train-test split)
d_traindf : columns = ['im_path', 'diagnosis']
d_testdf : columns = ['im_path', 'diagnosis']
if want to see kaggle score :
k_testdf : columns = ['id_code', 'diagnosis']
"""
import pandas as pd
import config
def generate_quality_df():
"""
generate dataframe for training and evaluating image quality model : only deepdr dataset
(use original train for train-valid spilit, use original valid as test --> so that can evaluate with test)
save : ./output/q_traindf.csv and ./output/q_testdf.csv
"""
# read csv containing labels corresponding to the images
train_csv= f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-training/regular-fundus-training.csv'
test_csv = f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-validation/regular-fundus-validation.csv'
print(config.PATH_DISK)
print(config.PATH_VM)
print(train_csv)
train = pd.read_csv(train_csv)
test = pd.read_csv(test_csv)
# generate dataframe with image path and overall quality lable
traindf = pd.DataFrame()
testdf = pd.DataFrame()
traindf['im_path'] = train['image_path'].apply(lambda x : x.replace('\\', '/')).apply(lambda x : f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-training/Images'+x[24:]) # mac
testdf['im_path'] = test['image_path'].apply(lambda x : x.replace('\\', '/')).apply(lambda x : f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-validation/Images'+x[26:]) # mac
traindf['im_quality'] = train['Overall quality'].astype('str')
testdf['im_quality'] = test['Overall quality'].astype('str')
# save output
traindf.to_csv(f'{config.PATH_VM}/data/output/q_traindf.csv')
testdf.to_csv(f'{config.PATH_VM}/data/output/q_testdf.csv')
#print(f'quality : total {traindf.shape[0] + testdf.shape[0]}, train {traindf.shape[0]}, test {testdf.shape[0]}')
print('quality : total {}, train {}, test {}'.format(traindf.shape[0] + testdf.shape[0], traindf.shape[0], testdf.shape[0]))
def generate_diagnosis_df_deepdr():
"""
prepare dataframe for training diagnosis model : using deepdr data
Note : this dataframe from deepdr dataset will be merged with the one
from kaggle dataset, in kaggle dataset train and valid images were not
separated, therefore here also merge train and valid, after mering with
kaggle dataset train and valid will be splitted in model training part.
"""
# read csv containing labels corresponding to the images
train_csv= f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-training/regular-fundus-training.csv'
valid_csv = f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-validation/regular-fundus-validation.csv'
train = pd.read_csv(train_csv)
valid = pd.read_csv(valid_csv)
# generate dataframe with image path and overall quality lable
traindf = pd.DataFrame()
validdf = pd.DataFrame()
traindf['im_path'] = train['image_path'].apply(lambda x : x.replace('\\', '/')).apply(lambda x : f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-training/Images'+x[24:]) # mac
validdf['im_path'] = valid['image_path'].apply(lambda x : x.replace('\\', '/')).apply(lambda x : f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-validation/Images'+x[26:]) # mac
traindf['diagnosis'] = train['patient_DR_Level'].astype('str')
validdf['diagnosis'] = valid['patient_DR_Level'].astype('str')
return
|
pd.concat([traindf, validdf])
|
pandas.concat
|
import warnings
from decimal import Decimal
import numpy as np
from tafra import Tafra, object_formatter
import pandas as pd # type: ignore
from typing import Dict, List, Any, Iterator
import pytest # type: ignore
from unittest.mock import MagicMock
class TestClass:
...
class Series:
name: str = 'x'
values: np.ndarray = np.arange(5)
dtype: str = 'int'
class DataFrame:
_data: Dict[str, Series] = {'x': Series(), 'y': Series()}
columns: List[str] = ['x', 'y']
dtypes: List[str] = ['int', 'int']
def __getitem__(self, column: str) -> Series:
return self._data[column]
def __setitem__(self, column: str, value: np.ndarray) -> None:
self._data[column].values = value
print = MagicMock()
def build_tafra() -> Tafra:
return Tafra({
'x': np.array([1, 2, 3, 4, 5, 6]),
'y': np.array(['one', 'two', 'one', 'two', 'one', 'two'], dtype='object'),
'z': np.array([0, 0, 0, 1, 1, 1])
})
def check_tafra(t: Tafra) -> bool:
assert len(t._data) == len(t._dtypes)
for c in t.columns:
assert isinstance(t[c], np.ndarray)
assert isinstance(t.data[c], np.ndarray)
assert isinstance(t._data[c], np.ndarray)
assert isinstance(t.dtypes[c], str)
assert isinstance(t._dtypes[c], str)
assert t._rows == len(t._data[c])
pd.Series(t._data[c])
_ = t.to_records()
_ = t.to_list()
_ = t.to_list()
|
pd.DataFrame(t._data)
|
pandas.DataFrame
|
"""
User-specific code for pgrid.
You would edit the information to reflect whatever grid you are working on.
"""
import numpy as np
import pandas as pd
from lo_tools import zfun, Lfun
import sys
from pathlib import Path
pth = Path(__file__).absolute().parent.parent.parent / 'LO' / 'pgrid'
if str(pth) not in sys.path:
sys.path.append(str(pth))
import gfun_utility as gfu
import gfun
# This is the name of the grid that you are working on.
gridname = 'ae0'
# default s-coordinate info (could override below)
s_dict = {'THETA_S': 4, 'THETA_B': 2, 'TCLINE': 10, 'N': 30,
'VTRANSFORM': 2, 'VSTRETCHING': 4}
# Set the gridname and tag to use when creating the Ldir paths.
# They are used for accessing the river tracks, which may be developed for one
# grid but reused in others.
if gridname in ['ai0','hc0', 'sal0', 'so0']:
# these cases reuse (all or some of) the LiveOcean cas6 model rivers
base_gridname = 'cas6'
base_tag = 'v3'
elif gridname in ['ae0']:
# for analytical cases we create the river info and track in
# make_initial_info() below, but we still assign gridname and tag so
# that they get saved in the right places
base_gridname = 'ae0'
base_tag = 'v0'
def make_initial_info(gridname=gridname):
# Add an elif section for your grid.
if gridname == 'sal0':
# A Salish Sea grid, used as an example.
dch = gfun.default_choices()
aa = [-124, -122, 47, 49]
res = 600 # target resolution (m)
Lon_vec, Lat_vec = gfu.simple_grid(aa, res)
dch['nudging_edges'] = ['north', 'west']
# Make the rho grid.
lon, lat = np.meshgrid(Lon_vec, Lat_vec)
# initialize the bathymetry array
z = np.nan * lon
# add bathymetry automatically from files
for t_fn in dch['t_list']:
print('\nOPENING BATHY FILE: ' + t_fn.name)
tlon_vec, tlat_vec, tz = gfu.load_bathy_nc(t_fn)
tlon, tlat = np.meshgrid(tlon_vec, tlat_vec)
z_part = zfun.interp2(lon, lat, tlon, tlat, tz)
# put good values of z_part in z
z[~np.isnan(z_part)] = z_part[~np.isnan(z_part)]
if dch['use_z_offset']:
z = z + dch['z_offset']
elif gridname == 'hc0':
dch = gfun.default_choices()
aa = [-123.2, -122.537, 47.3, 47.9]
res = 100 # target resolution (m)
Lon_vec, Lat_vec = gfu.simple_grid(aa, res)
dch['t_list'] = [dch['t_dir'] / 'psdem' / 'PS_27m.nc']
dch['nudging_edges'] = ['north']
dch['nudging_days'] = (0.1, 1.0)
# Make the rho grid.
lon, lat = np.meshgrid(Lon_vec, Lat_vec)
# initialize the bathymetry array
z = np.nan * lon
# add bathymetry automatically from files
for t_fn in dch['t_list']:
print('\nOPENING BATHY FILE: ' + t_fn.name)
tlon_vec, tlat_vec, tz = gfu.load_bathy_nc(t_fn)
tlon, tlat = np.meshgrid(tlon_vec, tlat_vec)
z_part = zfun.interp2(lon, lat, tlon, tlat, tz)
# put good values of z_part in z
z[~np.isnan(z_part)] = z_part[~np.isnan(z_part)]
if dch['use_z_offset']:
z = z + dch['z_offset']
elif gridname == 'ai0':
dch = gfun.default_choices()
aa = [-122.82, -122.36, 47.758, 48.18]
res = 100 # target resolution (m)
Lon_vec, Lat_vec = gfu.simple_grid(aa, res)
dch['t_list'] = [dch['t_dir'] / 'psdem_10m' / 'PS_30m.nc']
dch['nudging_edges'] = ['north', 'south', 'east', 'west']
dch['nudging_days'] = (0.1, 1.0)
# Make the rho grid.
lon, lat = np.meshgrid(Lon_vec, Lat_vec)
# initialize the bathymetry array
z = np.nan * lon
# add bathymetry automatically from files
for t_fn in dch['t_list']:
print('\nOPENING BATHY FILE: ' + t_fn.name)
tlon_vec, tlat_vec, tz = gfu.load_bathy_nc(t_fn)
tlon, tlat = np.meshgrid(tlon_vec, tlat_vec)
z_part = zfun.interp2(lon, lat, tlon, tlat, tz)
# put good values of z_part in z
z[~np.isnan(z_part)] = z_part[~np.isnan(z_part)]
if dch['use_z_offset']:
z = z + dch['z_offset']
elif gridname == 'so0':
# South Sound
dch = gfun.default_choices()
dch['z_offset'] = -1.3 # NAVD88 is 1.3 m below MSL at Seattle
dch['excluded_rivers'] = ['skokomish']
aa = [-123.13, -122.76, 47, 47.42]
res = 50 # target resolution (m)
Lon_vec, Lat_vec = gfu.simple_grid(aa, res)
dch['t_list'] = [dch['t_dir'] / 'srtm15' / 'topo15.nc',
dch['t_dir'] / 'psdem_10m' / 'PS_30m.nc']
dch['nudging_edges'] = ['east']
dch['nudging_days'] = (0.1, 1.0)
# Make the rho grid.
lon, lat = np.meshgrid(Lon_vec, Lat_vec)
# initialize the bathymetry array
z = np.nan * lon
# add bathymetry automatically from files
for t_fn in dch['t_list']:
print('\nOPENING BATHY FILE: ' + t_fn.name)
tlon_vec, tlat_vec, tz = gfu.load_bathy_nc(t_fn)
tlon, tlat = np.meshgrid(tlon_vec, tlat_vec)
z_part = zfun.interp2(lon, lat, tlon, tlat, tz)
# put good values of z_part in z
z[~np.isnan(z_part)] = z_part[~np.isnan(z_part)]
if dch['use_z_offset']:
z = z + dch['z_offset']
elif gridname == 'ae0':
# analytical model estuary
dch = gfun.default_choices()
lon_list = [-2, 0, 1, 2]
x_res_list = [2500, 500, 500, 2500]
lat_list = [43, 44.9, 45.1, 47]
y_res_list = [2500, 500, 500, 2500]
Lon_vec, Lat_vec = gfu.stretched_grid(lon_list, x_res_list,
lat_list, y_res_list)
lon, lat = np.meshgrid(Lon_vec, Lat_vec)
dch['analytical'] = True
dch['nudging_edges'] = ['north', 'south', 'west']
dch['use_z_offset'] = False
# tidy up dch
dch['z_offset'] = 0.0
dch['t_dir'] = 'BLANK'
dch['t_list'] = ['BLANK']
# make bathymetry by hand
z = np.zeros(lon.shape)
x, y = zfun.ll2xy(lon, lat, 0, 45)
zshelf = x * 1e-3
zestuary = -20 + 20*x/1e5 + 20/(1e4)*np.abs(y)
z = zshelf
mask = zestuary < z
z[mask] = zestuary[mask]
# create a river file
Ldir = Lfun.Lstart(gridname=base_gridname, tag=base_tag)
ri_dir = Ldir['LOo'] / 'pre' / 'river' / Ldir['gtag']
Lfun.make_dir(ri_dir)
gri_fn = ri_dir / 'river_info.csv'
with open(gri_fn, 'w') as rf:
rf.write('rname,usgs,ec,nws,ratio,depth,flow_units,temp_units\n')
rf.write('creek0,,,,1.0,5.0,m3/s,degC\n')
# and make a track for the river
track_dir = ri_dir / 'tracks'
Lfun.make_dir(track_dir)
track_fn = track_dir / 'creek0.p'
track_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import pandas as pd
from pandas.api.types import is_number
import datetime
def format_date(dt):
"""
Returns formated date
"""
if dt is None:
return dt
return dt.strftime("%Y-%m-%d")
def sanitize_dates(start, end):
"""
Return (datetime_start, datetime_end) tuple
"""
if is_number(start):
# regard int as year
start = datetime.datetime(start, 1, 1)
start = pd.to_datetime(start)
if
|
is_number(end)
|
pandas.api.types.is_number
|
# [AUTHOR], [YEAR].
import sys, pickle, csv, swifter, re
import pandas as pd
def text_to_cols(data, cols, positive_dict, exclusions_dict = None):
# Detect positives
output_dict = init_dict(positive_dict.keys())
for K, V in positive_dict.items():
mid_dict = init_dict(positive_dict[K])
for v in V:
mid_dict[v] = search_and_merge(data, cols, v)
output_dict[K] = pd.DataFrame(mid_dict).swifter.apply(lambda row: row.any(), axis = 1)
if type(exclusions_dict) is not dict:
return pd.DataFrame(output_dict)
# Detect false positives
false_positives = init_dict(exclusions_dict.keys())
for K, V in exclusions_dict.items():
mid_dict = init_dict(exclusions_dict[K])
for v in V:
mid_dict[v] = search_and_merge(data, cols, v)
false_positives[K] = pd.DataFrame(mid_dict).swifter.apply(lambda row: row.any(), axis = 1)
# Remove false positives
all_positives =
|
pd.DataFrame(output_dict)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import glob as glob
import seaborn as sns
import matplotlib.pyplot as plt
def filter_TDP(data_frame, thresh = 0.3):
"""[optional function that removes molecules that do not transition below threshold at some point]
Args:
data_frame ([dataframe]): [dataframe that has been cleaned to remove outliers by 'remove_outliers' function]
thresh (float, optional): [FRET value to threshold - will filter molecules and keep those that go below the thresh]. Defaults to 0.3.
Returns:
[dataframe]: [contains only molecules of interest]
"""
filtered_mol = []
for treatment, df in data_frame.groupby("treatment_name"):
mol_list = df[(df["FRET_before"] <= thresh)|(df["FRET_after"] <= thresh)].Molecule.unique().tolist()
filtered = df[df["Molecule"].isin(mol_list)]
filtered_mol.append(filtered)
filtered_mol = pd.concat(filtered_mol)
return filtered_mol
def remove_outliers(compiled, plot_type, data_type = "raw"):
"""[removes outliers from dataframe]
Args:
compiled ([dataframe]): [raw dataframe containing outliers to be removed]
plot_type ([str]): [string can either be 'hist' for histogram data or 'TDP' for TDP data]
data_type (str, optional): [removes either raw FRET values or 'idealized' FRET values]. Defaults to "raw".
Returns:
[dataframe]: [returns cleaned data without outliers]
"""
if plot_type == 'hist':
if data_type == "raw":
rawFRET = compiled[(compiled[3] > -0.5) & (compiled[3] < 1.5)].copy()
return rawFRET
if data_type == "idealized":
idealizedFRET = compiled[(compiled[4] > -0.5) & (compiled[4] < 1.5)].copy()
return idealizedFRET
elif plot_type == 'TDP':
outliers = compiled[(compiled["FRET before transition"] < -0.5)|(compiled["FRET before transition"] > 1.5)|(compiled["FRET after transition"] < -0.5) | (compiled["FRET after transition"] > 1.5)].index
compiled.drop(outliers, inplace = True)
return compiled
else:
print('invalid plot type, please set plot_type as "hist" or "TDP" - you idiot')
def cleanup_dwell(data, fps, thresh, first_dwell = "delete"):
"""[Will convert the data frome frame number to unit of time (seconds) and then delete all dwell times
that are smaller than the set threshold (defined previously) in seconds. Will also delete the first dwell
state from each molecule]
Args:
data ([dataframe]): [raw data]
first_dwell (str, optional): [Set to 'keep' to keep the first dwell state from each molecule otherwise
Will delete the first dwell state from each molecule by default]. Defaults to "delete".
Returns:
[dataframe]: [Data is now cleaned and ready for subsequent processing]
"""
if first_dwell == "delete":
filtered = []
for molecule, df in data.groupby("Molecule"):
filtered.append(df.iloc[1:])
filtered = pd.concat(filtered) #####filtered = pd.concat([df.iloc[1:] for molecule, df in A.groupby("Molecule")]) ##code here is the same as the for loop but in a list comprehension format
filtered["Time (s)"] = filtered["Time"]/fps
filtered = filtered[filtered["Time (s)"] >= thresh]
return filtered
if first_dwell == "keep":
data["Time (s)"] = data["Time"]/fps
data = data[data["Time (s)"] >= thresh]
return data
def filter_dwell(df, FRET_thresh, headers):
"""[Will take the cleaned TDP data and will filter it using a threshold (defined by FRET_thresh)
into seperate types of transitions (e.g., < 0.5 to > 0.5 FRET if FRET_thresh is = 0.5 is one example
of a type of transition).
Args:
df ([dataframe]): [contains cleaned data that has been processed using the 'cleanup_dwell' function]
Returns:
[dataframe]: [contains dwell time that has been categorized into each transition class]
"""
filtered_lowtohigh = df[(df["FRET_before"] < FRET_thresh) & (df["FRET_after"] > FRET_thresh)].copy()
filtered_lowtolow = df[(df["FRET_before"] < FRET_thresh) & (df["FRET_after"] < FRET_thresh)].copy()
filtered_hightolow = df[(df["FRET_before"] > FRET_thresh) & (df["FRET_after"] < FRET_thresh)].copy()
filtered_hightohigh = df[(df["FRET_before"] > FRET_thresh) & (df["FRET_after"] > FRET_thresh)].copy()
dataf = [filtered_lowtolow["Time (s)"], filtered_lowtohigh["Time (s)"], filtered_hightohigh["Time (s)"], filtered_hightolow["Time (s)"]]
df_col = pd.concat(dataf, axis = 1, keys = headers)
df_col = df_col.apply(lambda x:pd.Series(x.dropna().values)) ## removes NaN values from each column in df_col
return df_col
def transition_frequency(filt):
"""calculates the transition frequency (i.e., the number of transitions per transition class divided
by the total number of transitions observed). For example if there are 40 transitions total, and a
< 0.5 to > 0.5 transition occurs 10 times, then the transition probability for that transition type is
0.25 or 25%.
Args:
filt (dataframe): contains the dataframe with filtered data (cleaned data has been filtered by
'filter_dwell' function)
Returns:
dataframe: returns a dataframe containing the percentage for each transition type
"""
count_df_col = pd.DataFrame(filt.count(axis = 0)).transpose()
count_df_col["sum"] = count_df_col.sum(axis = 1)
dwell_frequency = pd.DataFrame([(count_df_col[column]/count_df_col["sum"])*100 for column in count_df_col]).transpose()
print(dwell_frequency)
return dwell_frequency
def calculate_mean(filtered_data, treatment_name):
"""calculates the mean dwell time of each type of transition class
Args:
filtered_data (dataframe): dataframe generated after the 'cleanup_dwell' and 'filter_dwell' functions
have been run
treatment_name (str): not required, only present to receive input from for loop. set to treatment_name
Returns:
[dataframe]: returns dataframe containing the mean of each transition class
"""
mean_dwell = pd.DataFrame([filtered_data.iloc[0:].mean()])
mean_dwell["sample"] = treatment_name
return mean_dwell
def float_generator(data_frame, treatment, FRET_thresh):
"""Will generate the float values used to scale the size of the arrows when plotting the summary heatmap
Args:
data_frame (dataframe): Takes dataframe containing the transition frequencies generated using the
'transition_frequency' function
treatment (str): will take 'treatment' from for loop. Leave as treatment.
Returns:
[dataframe]: returns values used to scale arrow -
"""
transition_frequency_arrow = data_frame[data_frame["sample"]== treatment]
normalised_number_lowtohigh = float(np.array(transition_frequency_arrow[f"< {FRET_thresh} to > {FRET_thresh}"])/1000)
normalised_number_hightolow = float(np.array(transition_frequency_arrow[f"> {FRET_thresh} to < {FRET_thresh}"])/1000)
normalised_number_hightohigh = float(np.array(transition_frequency_arrow[f"> {FRET_thresh} to > {FRET_thresh}"])/1000)
normalised_number_lowtolow = float(np.array(transition_frequency_arrow[f"< {FRET_thresh} to < {FRET_thresh}"])/1000)
arrow_list = [normalised_number_lowtohigh,normalised_number_hightolow,normalised_number_hightohigh,normalised_number_lowtolow]
return arrow_list
def heatmap_prep(histogram_data, treatment, FRET_thresh):
"""Takes the data and calculates the number of data points total (total), how much data points are below
a threshold (time below) and above a threshold (time above) and will use these values to calculate what proportion
of time the FRET is below or above thresh. Will then feed into the heatmap when plotting
Args:
histogram_data (dataframe): data used to plot the histogram - will include all the cleaned FRET and
idealized FRET values (time not a factor here, just number of frames)
treatment (str): only used to be fed from for loop. do not change
Returns:
df: contains the data required to plot the heatmap. will be as a proportion of time spent below or above
threshold
"""
subset_data = histogram_data[histogram_data["treatment_name"]==treatment]
total = len(subset_data[(subset_data["FRET"] < FRET_thresh) | (subset_data["FRET"] > FRET_thresh)])
subset_data_largerthanthresh = len(subset_data[subset_data["FRET"] > FRET_thresh])
subset_data_lessthanthresh = len(subset_data[subset_data["FRET"] < FRET_thresh])
time_below = (subset_data_lessthanthresh/total)
time_above = (subset_data_largerthanthresh/total)
thresh_dicts = {f"< {FRET_thresh}":[time_below], f"> {FRET_thresh}":[time_above] }
thresh_dfs = pd.DataFrame(thresh_dicts)
#thresh_dfs["treatment"] = treatment
return thresh_dfs
def mean_dwell_prep(mean_dwell_data, treatment, FRET_thresh):
"""calculates the mean values for each transition class and converts to float values for plotting in
summary heatmap
Args:
mean_dwell_data (dataframe): dataframe containing mean values
treatment (str): used for a for loop. do not change.
Returns:
[list]: contains list of float values
"""
subset_mean_dwell = mean_dwell_data[mean_dwell_data["sample"]== treatment]
meandwell_lowtohigh = float(np.array(subset_mean_dwell[f"< {FRET_thresh} to > {FRET_thresh}"]))
meandwell_hightolow = float(np.array(subset_mean_dwell[f"> {FRET_thresh} to < {FRET_thresh}"]))
meandwell_hightohigh = float(np.array(subset_mean_dwell[f"> {FRET_thresh} to > {FRET_thresh}"]))
meandwell_lowtolow = float(np.array(subset_mean_dwell[f"< {FRET_thresh} to < {FRET_thresh}"]))
mean_list = [meandwell_lowtohigh,meandwell_hightolow,meandwell_hightohigh,meandwell_lowtolow]
return mean_list
def file_reader(input_folder, data_type, frame_rate = False, column_names = False):
"""will import data
Args:
input_folder (directory): where data is stored
data_type (str): what data will be used to plot, needs to be either 'hist', 'TDP', 'transition_frequency'
or 'other'.
Returns:
dataframe: dataframe with data to be used in subseqeunt codes
"""
if data_type == 'hist':
filenames = glob.glob(input_folder + "/*.dat")
dfs = []
for filename in filenames:
molecule_number = filename.split('\\')[1].split('_')[0]
hist_data =
|
pd.read_table(filename, sep="\s+", header=None)
|
pandas.read_table
|
import os, sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import zipfile
import tensorflow as tf
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers
from tensorflow.keras import Model
import matplotlib.pyplot as plt
import numpy as np
import tensorflow_datasets as tfds
import pathlib
import pandas as pd
import pickle
import csv
import random
random.seed(10)
import time
from datetime import datetime
from datetime import timedelta
import shutil
import matplotlib.image as mpimg
print("\n" * 3)
delimeter = "*" * 100
print("*" * 50)
print("*" * 50)
print("%s Going to print if GPU is used" % delimeter)
print("%s Num GPUs Available: %d" % (delimeter, len(tf.config.list_physical_devices('GPU'))))
print(tf.config.list_physical_devices('GPU'))
# ============================================================== Utils
def load_obj(base_dir, name):
file_path = os.path.join(base_dir, "pickle", name + ".pkl")
with open(file_path, 'rb') as f:
return pickle.load(f)
def save_obj(obj, base_dir, name):
dir_path = os.path.join(base_dir, "pickle")
create_directory(dir_path)
file_path = os.path.join(dir_path, name + ".pkl")
with open(file_path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
print("Saved object to a file: %s" % (str(file_path)))
def save_df(df, file_name, append=False):
if append:
df.to_csv(file_name, index=False, quotechar='"', quoting=csv.QUOTE_NONNUMERIC, mode="a", header=False)
else:
df.to_csv(file_name, index=False, quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
def save_df(df, base_dir, file_name):
file_name = os.path.join(base_dir, file_name)
df.to_csv(file_name, index=False, quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
def save_df(df, file_name):
df.to_csv(file_name, index=False, quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
def remove_directory(path):
if os.path.exists(path):
print("%s path exists and removing it." % path)
shutil.rmtree(path)
def remove_file(file_name):
if (os.path.isfile(file_name)):
print("Output file %s exists and removing it." % file_name)
os.remove(file_name)
def create_directory(dir):
if(not os.path.exists(dir)):
print("Creating directory %s." % dir)
os.makedirs(dir)
else:
print("Directory %s already exists and so returning." % dir)
def remove_and_create_directory(dir):
print("Going to REMOVE and CREATE directory: %s" % dir)
remove_directory(dir)
create_directory(dir)
def get_list_of_image_from_directory(dir):
res = list(pathlib.Path(dir).glob("**/*.jpg"))
res += list(pathlib.Path(dir).glob("**/*.png"))
print("Total %d images found in directory %s" % (len(res), dir))
return res
# ===================================================================================== Data Augmentation
def get_data_generator_from_directory(dir_path, is_test_data=False):
# Data generator parameters
gen_params = {"featurewise_center":False,\
"samplewise_center":False,\
"featurewise_std_normalization":False,\
"samplewise_std_normalization":False,\
"zca_whitening":False,\
"rotation_range":20,\
"width_shift_range":0.1,\
"height_shift_range":0.1, \
"shear_range":0.2, \
"zoom_range":0.1,\
"horizontal_flip":True,\
"vertical_flip":True}
shuffle = True
if(is_test_data):
generator = ImageDataGenerator(preprocessing_function = tf.keras.applications.efficientnet.preprocess_input)
shuffle = False
else:
# For training and validation dataset
generator = ImageDataGenerator(**gen_params, preprocessing_function = tf.keras.applications.efficientnet.preprocess_input)
data_generator = generator.flow_from_directory(
directory = dir_path,
target_size=(img_W, img_H),
color_mode="rgb",
classes= CLASS_NAMES,
class_mode="categorical",
batch_size=BS,
shuffle=shuffle,
seed=84,
interpolation="nearest",
)
return data_generator
class MyThresholdCallback(tf.keras.callbacks.Callback):
def __init__(self, threshold):
super(MyThresholdCallback, self).__init__()
self.threshold = threshold
# This will stop the training when validation accurary is equal or above the threashole, 100% by default
def on_epoch_end(self, epoch, logs=None):
val_acc = logs["val_accuracy"]
print("=========================> Epoch is finished and validation accuracy is: %.2f" % val_acc, flush=True)
if val_acc >= self.threshold:
print("Validation accurary is higher than the threadhold %.2f and so stopping training" % (self.threshold))
self.model.stop_training = True
def get_callbacks(model_checkpoint_path, threadhold=1.0):
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience = 3)
monitor_it = tf.keras.callbacks.ModelCheckpoint(model_checkpoint_path, monitor='val_loss',\
verbose=0,save_best_only=True,\
save_weights_only=False,\
mode='min')
def scheduler(epoch, lr):
if epoch%10 == 0 and epoch!= 0:
lr = lr/2
return lr
lr_schedule = tf.keras.callbacks.LearningRateScheduler(scheduler,verbose = 0)
threshold_callback = MyThresholdCallback(threshold=threadhold)
return early_stop, monitor_it, lr_schedule, threshold_callback
# ============================================================== Model design ======================================
def baseline_model_1():
model = tf.keras.models.Sequential(name="Baseline_1VGG")
model.add(tf.keras.layers.BatchNormalization(input_shape=INPUT_SHAPE))
model.add(tf.keras.layers.Conv2D(32, (3, 3), padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
# model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
# model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(len(CLASS_NAMES), activation='softmax'))
return model
def baseline_model_2():
model = tf.keras.models.Sequential(name="Baseline_1VGG_dropout")
model.add(tf.keras.layers.BatchNormalization(input_shape=INPUT_SHAPE))
model.add(tf.keras.layers.Conv2D(32, (3, 3), padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
model.add(tf.keras.layers.Dropout(0.20))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.20))
model.add(tf.keras.layers.Dense(len(CLASS_NAMES), activation='softmax'))
return model
# def baseline_model():
# model = tf.keras.models.Sequential(name="Baseline_CNN")
# model.add(tf.keras.layers.BatchNormalization(input_shape=INPUT_SHAPE))
# model.add(tf.keras.layers.Conv2D(32, (3, 3), padding='same', activation='relu'))
# model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
# model.add(tf.keras.layers.Dropout(0.25))
# model.add(tf.keras.layers.BatchNormalization())
# model.add(tf.keras.layers.Conv2D(32, (3, 3), padding='same', activation='relu'))
# model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
# model.add(tf.keras.layers.Dropout(0.25))
# model.add(tf.keras.layers.Flatten())
# model.add(tf.keras.layers.Dense(128, activation='relu'))
# model.add(tf.keras.layers.Dropout(0.5))
# model.add(tf.keras.layers.Dense(len(CLASS_NAMES), activation='softmax'))
# return model
#uses globabl variable num_classes and input_shape
def get_VGG19_model():
model = tf.keras.applications.vgg19.VGG19(
include_top=True,
weights=None,
input_tensor=None,
input_shape=INPUT_SHAPE,
pooling=None,
classes=len(CLASS_NAMES),
classifier_activation='softmax'
)
# print(model.summary())
return model
def get_VGG19_transfer_learning_model():
base_model = tf.keras.applications.vgg19.VGG19(
include_top=False,
weights='imagenet',
input_shape=INPUT_SHAPE,
)
base_model.trainable = False
# print(model.summary())
x1 = base_model(base_model.input, training = False)
x2 = tf.keras.layers.Flatten()(x1)
out = tf.keras.layers.Dense(len(CLASS_NAMES),activation = 'softmax')(x2)
model = tf.keras.Model(inputs = base_model.input, outputs=out)
print(model.summary())
return model
#uses globabl variable num_classes and input_shape
def get_Resnet50_model():
model = tf.keras.applications.resnet50.ResNet50(
include_top=True,
weights=None,
input_tensor=None,
input_shape=INPUT_SHAPE,
pooling=None,
classes=len(CLASS_NAMES),
classifier_activation='softmax'
)
# print(model.summary())
return model
def get_Resnet50_transfer_learning_model():
base_model = tf.keras.applications.resnet50.ResNet50(
include_top=False,
weights='imagenet',
input_shape=INPUT_SHAPE,
)
base_model.trainable = False
# print(model.summary())
x1 = base_model(base_model.input, training = False)
x2 = tf.keras.layers.Flatten()(x1)
out = tf.keras.layers.Dense(len(CLASS_NAMES),activation = 'softmax')(x2)
model = tf.keras.Model(inputs = base_model.input, outputs=out)
print(model.summary())
return model
def get_EfficientNet_model():
model = tf.keras.applications.efficientnet.EfficientNetB7(
include_top=True,
weights=None,
input_tensor=None,
input_shape=INPUT_SHAPE,
pooling=None,
classes=len(CLASS_NAMES),
classifier_activation='softmax'
)
# print(model.summary())
return model
def get_EfficientNet_transfer_learning_model():
base_model = tf.keras.applications.efficientnet.EfficientNetB7(
include_top=False,
weights='imagenet',
input_shape=INPUT_SHAPE,
)
base_model.trainable = False
# print(model.summary())
x1 = base_model(base_model.input, training = False)
x2 = tf.keras.layers.Flatten()(x1)
out = tf.keras.layers.Dense(len(CLASS_NAMES),activation = 'softmax')(x2)
model = tf.keras.Model(inputs = base_model.input, outputs=out)
print(model.summary())
return model
# ========================================================= Model Initialization =================================
def save_model_summary(model, file_name):
with open(file_name, 'w') as f:
model.summary(print_fn=lambda x: f.write(x + '\n'))
print("Model summary has been saved to file: %s" % file_name)
def get_model(model_name):
if(model_name == "Baseline_model_1"):
return baseline_model_1()
if(model_name == "Baseline_model_2"):
return baseline_model_2()
if(model_name == "VGG19"):
return get_VGG19_model()
if(model_name == "VGG19_transfer_learning"):
return get_VGG19_transfer_learning_model()
if(model_name == "Resnet50"):
return get_Resnet50_model()
if(model_name == "Resnet50_transfer_learning"):
return get_Resnet50_transfer_learning_model()
if(model_name == "EfficientNet"):
return get_EfficientNet_model()
if(model_name == "EfficientNet_transfer_learning"):
return get_EfficientNet_transfer_learning_model()
# summarize history for accuracy
def plot_training_accuracy(history, file_name, title):
fig = plt.gcf()
fig.set_size_inches(10, 5)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title(title)
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='best')
fig.savefig(file_name)
plt.show()
plt.close()
plt.cla()
plt.clf()
# summarize history for accuracy
def plot_training_loss(history, file_name, title):
fig = plt.gcf()
fig.set_size_inches(10, 5)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title(title)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='best')
fig.savefig(file_name)
plt.show()
plt.close()
plt.cla()
plt.clf()
class Result:
def __init__(self, y_true, y_pred, path):
self.y_true = y_true
self.y_pred = y_pred
self.path = path
def __str__(self):
return "True Label: %s\nPrediction: %s\nPath: %s\n" % (self.y_true, self.y_pred, self.path)
# test_image_files = test_generator.filenames
def get_wrongly_predicted_sample_indexes(y_true, y_pred, files):
wrong_samples = []
for i in range(len(y_true)):
if y_true[i] != y_pred[i]:
res = Result(y_true[i], y_pred[i], files[i])
wrong_samples.append(res)
print(len(wrong_samples))
# print(wrong_samples)
return wrong_samples
def visualize_incorrectly_predicted_samples(incorrectly_predicted_samples, base_dir):
nrows = 8
ncols = 4
fig = plt.gcf()
fig.set_size_inches(ncols*4, nrows*4)
fig.tight_layout()
# fig.subplots_adjust(top=0.2)
fig.suptitle("Randomly sampled incorrectly predicted samples", fontsize="x-large", y=1.01)
for i in range(min(len(incorrectly_predicted_samples), nrows*ncols)):
# file_name = test_images[index]
res = incorrectly_predicted_samples[i]
file_path = os.path.join(base_dir, res.path)
img = mpimg.imread(file_path)
sp = plt.subplot(nrows, ncols, i + 1)
title = "%s" % (res)
sp.set_title(title)
sp.axis('Off')
plt.imshow(img)
plt.tight_layout()
fig.savefig(incorrectly_predicted_samples_file, dpi=200)
# useas global variable train_generator
def get_true_and_pred_labels(y_pred_model):
y_pred_indices = np.argmax(y_pred_model, axis=1)
labels = (train_generator.class_indices)
labels = dict((v,k) for k,v in labels.items())
y_true_classes = test_generator.classes
y_true_labels = [ labels[k] for k in y_true_classes]
y_pred_labels = [labels[k] for k in y_pred_indices]
# print(y_pred_labels[:10])
test_image_files = test_generator.filenames
return y_true_labels, y_pred_labels, test_image_files
def save_to_csv(y_true_labels, y_pred_labels, test_image_files, output_file):
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""Univariate anomaly detection module."""
__version__ = '1.0.0'
from typing import Dict
from fastapi import FastAPI
from pydantic import BaseModel
from adtk.detector import PersistAD, ThresholdAD, LevelShiftAD, VolatilityShiftAD
import numpy
import pandas
from . core.tools import aggregate_anomalies
app = FastAPI(
title='Univariate anomaly detection module.',
docs_url='/documentation',
redoc_url='/redoc',
description='Univariate anomaly detection based on historic data for time series.',
version=__version__
)
class Parameters(BaseModel):
"""Parameters for ADTK PersistAD"""
c: float = 3.0
window: str = '28D'
aggregate_anomalies: str = None
class TimeSeriesData(BaseModel):
"""Data provided for point anomaly detection."""
train_data: Dict[str, float]
score_data: Dict[str, float]
parameters: Parameters
class Anomalies(BaseModel):
"""Anomalies"""
anomaly_list: Dict[str, bool]
class ParametersThresholdAD(BaseModel):
"""Parameters for ADTK ThresholdAD"""
high: float = None
low: float = None
aggregate_anomalies: str = None
class TimeSeriesDataThresholdAD(BaseModel):
"""Data provided for point anomaly detection."""
score_data: Dict[str, float]
parameters: ParametersThresholdAD
class ParametersLevelShiftAD(BaseModel):
"""Parameters for ADTK LevelShiftAD"""
c: float = 20.0
window: str = '60S'
aggregate_anomalies: str = None
class TimeSeriesDataLevelShiftAD(BaseModel):
"""Data provided for point anomaly detection."""
score_data: Dict[str, float]
parameters: ParametersLevelShiftAD
class ParametersVolatilityShiftAD(BaseModel):
"""Parameters for ADTK LevelShiftAD"""
c: float = 20.0
window: str = '60S'
aggregate_anomalies: str = None
class TimeSeriesDataVolatilityShiftAD(BaseModel):
"""Data provided for point anomaly detection."""
score_data: Dict[str, float]
parameters: ParametersVolatilityShiftAD
@app.post('/detect-point-anomalies', response_model=Anomalies)
async def detect_point_anomalies(time_series_data: TimeSeriesData):
"""Apply point anomaly detection and return list of anomalies."""
# create pandas Series from dictionary containing the time series
train_data = pandas.Series(time_series_data.train_data)
train_data.index = pandas.to_datetime(train_data.index, unit='ms')
score_data =
|
pandas.Series(time_series_data.score_data)
|
pandas.Series
|
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas.compat import (
pa_version_under2p0,
pa_version_under4p0,
)
from pandas.errors import PerformanceWarning
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
isna,
)
import pandas._testing as tm
@pytest.mark.parametrize("pattern", [0, True, Series(["foo", "bar"])])
def test_startswith_endswith_non_str_patterns(pattern):
# GH3485
ser = Series(["foo", "bar"])
msg = f"expected a string object, not {type(pattern).__name__}"
with pytest.raises(TypeError, match=msg):
ser.str.startswith(pattern)
with pytest.raises(TypeError, match=msg):
ser.str.endswith(pattern)
def assert_series_or_index_equal(left, right):
if isinstance(left, Series):
tm.assert_series_equal(left, right)
else: # Index
tm.assert_index_equal(left, right)
def test_iter():
# GH3638
strs = "google", "wikimedia", "wikipedia", "wikitravel"
ser = Series(strs)
with tm.assert_produces_warning(FutureWarning):
for s in ser.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ser.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, str) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == "l"
def test_iter_empty(any_string_dtype):
ser = Series([], dtype=any_string_dtype)
i, s = 100, 1
with tm.assert_produces_warning(FutureWarning):
for i, s in enumerate(ser.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(any_string_dtype):
ser = Series(["a"], dtype=any_string_dtype)
with tm.assert_produces_warning(FutureWarning):
for i, s in enumerate(ser.str):
pass
assert not i
tm.assert_series_equal(ser, s)
def test_iter_object_try_string():
ser = Series(
[
slice(None, np.random.randint(10), np.random.randint(10, 20))
for _ in range(4)
]
)
i, s = 100, "h"
with tm.assert_produces_warning(FutureWarning):
for i, s in enumerate(ser.str):
pass
assert i == 100
assert s == "h"
# test integer/float dtypes (inferred by constructor) and mixed
def test_count(any_string_dtype):
ser = Series(["foo", "foofoo", np.nan, "foooofooofommmfoo"], dtype=any_string_dtype)
result = ser.str.count("f[o]+")
expected_dtype = np.float64 if any_string_dtype == "object" else "Int64"
expected = Series([1, 2, np.nan, 4], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
def test_count_mixed_object():
ser = Series(
["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0],
dtype=object,
)
result = ser.str.count("a")
expected = Series([1, np.nan, 0, np.nan, np.nan, 0, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_repeat(any_string_dtype):
ser = Series(["a", "b", np.nan, "c", np.nan, "d"], dtype=any_string_dtype)
result = ser.str.repeat(3)
expected = Series(
["aaa", "bbb", np.nan, "ccc", np.nan, "ddd"], dtype=any_string_dtype
)
tm.assert_series_equal(result, expected)
result = ser.str.repeat([1, 2, 3, 4, 5, 6])
expected = Series(
["a", "bb", np.nan, "cccc", np.nan, "dddddd"], dtype=any_string_dtype
)
tm.assert_series_equal(result, expected)
def test_repeat_mixed_object():
ser = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0])
result = ser.str.repeat(3)
expected = Series(
["aaa", np.nan, "bbb", np.nan, np.nan, "foofoofoo", np.nan, np.nan, np.nan]
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg, repeat", [[None, 4], ["b", None]])
def test_repeat_with_null(any_string_dtype, arg, repeat):
# GH: 31632
ser = Series(["a", arg], dtype=any_string_dtype)
result = ser.str.repeat([3, repeat])
expected = Series(["aaa", np.nan], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
def test_empty_str_methods(any_string_dtype):
empty_str = empty = Series(dtype=any_string_dtype)
if any_string_dtype == "object":
empty_int = Series(dtype="int64")
empty_bool = Series(dtype=bool)
else:
empty_int = Series(dtype="Int64")
empty_bool = Series(dtype="boolean")
empty_object = Series(dtype=object)
empty_bytes = Series(dtype=object)
empty_df = DataFrame()
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert "" == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count("a"))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.contains("a"))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.startswith("a"))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.endswith("a"))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.replace("a", "b"))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.match("^a"))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=any_string_dtype),
empty.str.extract("()", expand=True),
)
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=any_string_dtype),
empty.str.extract("()()", expand=True),
)
tm.assert_series_equal(empty_str, empty.str.extract("()", expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=any_string_dtype),
empty.str.extract("()()", expand=False),
)
tm.assert_frame_equal(empty_df, empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(""))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_object, empty_str.str.findall("a"))
tm.assert_series_equal(empty_int, empty.str.find("a"))
tm.assert_series_equal(empty_int, empty.str.rfind("a"))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_object, empty.str.split("a"))
tm.assert_series_equal(empty_object, empty.str.rsplit("a"))
tm.assert_series_equal(empty_object, empty.str.partition("a", expand=False))
tm.assert_frame_equal(empty_df, empty.str.partition("a"))
tm.assert_series_equal(empty_object, empty.str.rpartition("a", expand=False))
tm.assert_frame_equal(empty_df, empty.str.rpartition("a"))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.strip())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.lstrip())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_object, empty_bytes.str.decode("ascii"))
tm.assert_series_equal(empty_bytes, empty.str.encode("ascii"))
# ismethods should always return boolean (GH 29624)
tm.assert_series_equal(empty_bool, empty.str.isalnum())
tm.assert_series_equal(empty_bool, empty.str.isalpha())
tm.assert_series_equal(empty_bool, empty.str.isdigit())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under2p0,
):
tm.assert_series_equal(empty_bool, empty.str.isspace())
tm.assert_series_equal(empty_bool, empty.str.islower())
tm.assert_series_equal(empty_bool, empty.str.isupper())
tm.assert_series_equal(empty_bool, empty.str.istitle())
tm.assert_series_equal(empty_bool, empty.str.isnumeric())
tm.assert_series_equal(empty_bool, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize("NFC"))
table = str.maketrans("a", "b")
tm.assert_series_equal(empty_str, empty.str.translate(table))
@pytest.mark.parametrize(
"method, expected",
[
("isalnum", [True, True, True, True, True, False, True, True, False, False]),
("isalpha", [True, True, True, False, False, False, True, False, False, False]),
(
"isdigit",
[False, False, False, True, False, False, False, True, False, False],
),
(
"isnumeric",
[False, False, False, True, False, False, False, True, False, False],
),
(
"isspace",
[False, False, False, False, False, False, False, False, False, True],
),
(
"islower",
[False, True, False, False, False, False, False, False, False, False],
),
(
"isupper",
[True, False, False, False, True, False, True, False, False, False],
),
(
"istitle",
[True, False, True, False, True, False, False, False, False, False],
),
],
)
def test_ismethods(method, expected, any_string_dtype):
ser = Series(
["A", "b", "Xy", "4", "3A", "", "TT", "55", "-", " "], dtype=any_string_dtype
)
expected_dtype = "bool" if any_string_dtype == "object" else "boolean"
expected = Series(expected, dtype=expected_dtype)
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]"
and pa_version_under2p0
and method == "isspace",
):
result = getattr(ser.str, method)()
tm.assert_series_equal(result, expected)
# compare with standard library
expected = [getattr(item, method)() for item in ser]
assert list(result) == expected
@pytest.mark.parametrize(
"method, expected",
[
("isnumeric", [False, True, True, False, True, True, False]),
("isdecimal", [False, True, False, False, False, True, False]),
],
)
def test_isnumeric_unicode(method, expected, any_string_dtype):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
ser = Series(["A", "3", "¼", "★", "፸", "3", "four"], dtype=any_string_dtype)
expected_dtype = "bool" if any_string_dtype == "object" else "boolean"
expected = Series(expected, dtype=expected_dtype)
result = getattr(ser.str, method)()
tm.assert_series_equal(result, expected)
# compare with standard library
expected = [getattr(item, method)() for item in ser]
assert list(result) == expected
@pytest.mark.parametrize(
"method, expected",
[
("isnumeric", [False, np.nan, True, False, np.nan, True, False]),
("isdecimal", [False, np.nan, False, False, np.nan, True, False]),
],
)
def test_isnumeric_unicode_missing(method, expected, any_string_dtype):
values = ["A", np.nan, "¼", "★", np.nan, "3", "four"]
ser = Series(values, dtype=any_string_dtype)
expected_dtype = "object" if any_string_dtype == "object" else "boolean"
expected = Series(expected, dtype=expected_dtype)
result = getattr(ser.str, method)()
tm.assert_series_equal(result, expected)
def test_spilt_join_roundtrip(any_string_dtype):
ser = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = ser.str.split("_").str.join("_")
expected = ser.astype(object)
tm.assert_series_equal(result, expected)
def test_spilt_join_roundtrip_mixed_object():
ser = Series(
["a_b", np.nan, "asdf_cas_asdf", True, datetime.today(), "foo", None, 1, 2.0]
)
result = ser.str.split("_").str.join("_")
expected = Series(
["a_b", np.nan, "asdf_cas_asdf", np.nan, np.nan, "foo", np.nan, np.nan, np.nan]
)
tm.assert_series_equal(result, expected)
def test_len(any_string_dtype):
ser = Series(
["foo", "fooo", "fooooo", np.nan, "fooooooo", "foo\n", "あ"],
dtype=any_string_dtype,
)
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
result = ser.str.len()
expected_dtype = "float64" if any_string_dtype == "object" else "Int64"
expected = Series([3, 4, 6, np.nan, 8, 4, 1], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
def test_len_mixed():
ser = Series(
["a_b", np.nan, "asdf_cas_asdf", True, datetime.today(), "foo", None, 1, 2.0]
)
result = ser.str.len()
expected = Series([3, np.nan, 13, np.nan, np.nan, 3, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method,sub,start,end,expected",
[
("index", "EF", None, None, [4, 3, 1, 0]),
("rindex", "EF", None, None, [4, 5, 7, 4]),
("index", "EF", 3, None, [4, 3, 7, 4]),
("rindex", "EF", 3, None, [4, 5, 7, 4]),
("index", "E", 4, 8, [4, 5, 7, 4]),
("rindex", "E", 0, 5, [4, 3, 1, 4]),
],
)
def test_index(method, sub, start, end, index_or_series, any_string_dtype, expected):
obj = index_or_series(
["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"], dtype=any_string_dtype
)
expected_dtype = np.int64 if any_string_dtype == "object" else "Int64"
expected = index_or_series(expected, dtype=expected_dtype)
result = getattr(obj.str, method)(sub, start, end)
if index_or_series is Series:
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
# compare with standard library
expected = [getattr(item, method)(sub, start, end) for item in obj]
assert list(result) == expected
def test_index_not_found_raises(index_or_series, any_string_dtype):
obj = index_or_series(
["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"], dtype=any_string_dtype
)
with pytest.raises(ValueError, match="substring not found"):
obj.str.index("DE")
@pytest.mark.parametrize("method", ["index", "rindex"])
def test_index_wrong_type_raises(index_or_series, any_string_dtype, method):
obj = index_or_series([], dtype=any_string_dtype)
msg = "expected a string object, not int"
with pytest.raises(TypeError, match=msg):
getattr(obj.str, method)(0)
@pytest.mark.parametrize(
"method, exp",
[
["index", [1, 1, 0]],
["rindex", [3, 1, 2]],
],
)
def test_index_missing(any_string_dtype, method, exp):
ser = Series(["abcb", "ab", "bcbe", np.nan], dtype=any_string_dtype)
expected_dtype = np.float64 if any_string_dtype == "object" else "Int64"
result = getattr(ser.str, method)("b")
expected = Series(exp + [np.nan], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
def test_pipe_failures(any_string_dtype):
# #2119
ser = Series(["A|B|C"], dtype=any_string_dtype)
result = ser.str.split("|")
expected = Series([["A", "B", "C"]], dtype=object)
|
tm.assert_series_equal(result, expected)
|
pandas._testing.assert_series_equal
|
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from datetime import datetime
from io import StringIO
import os
import platform
from tempfile import TemporaryFile
from urllib.error import URLError
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_empty_decimal_marker(all_parsers):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = "Only length-1 decimal markers supported"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), decimal="")
def test_bad_stream_exception(all_parsers, csv_dir_path):
# see gh-13652
#
# This test validates that both the Python engine and C engine will
# raise UnicodeDecodeError instead of C engine raising ParserError
# and swallowing the exception that caused read to fail.
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup("utf-8")
parser = all_parsers
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
with open(path, "rb") as handle, codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
) as stream:
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
def test_squeeze(all_parsers):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True)
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
def test_malformed(all_parsers):
# see gh-6607
parser = all_parsers
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#")
@pytest.mark.parametrize("nrows", [5, 3, None])
def test_malformed_chunks(all_parsers, nrows):
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
parser = all_parsers
msg = "Expected 3 fields in line 6, saw 5"
reader = parser.read_csv(
StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
)
with pytest.raises(ParserError, match=msg):
reader.read(nrows)
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_no_index_name(all_parsers, csv_dir_path):
parser = all_parsers
csv2 = os.path.join(csv_dir_path, "test2.csv")
result = parser.read_csv(csv2, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
[1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
[0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
[1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
[-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
],
columns=["A", "B", "C", "D", "E"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
]
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_wrong_num_columns(all_parsers):
# Too few columns.
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
parser = all_parsers
msg = "Expected 6 fields in line 3, saw 7"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
def test_read_duplicate_index_explicit(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(all_parsers):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"A,B\nTrue,1\nFalse,2\nTrue,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
dict(true_values=["yes", "Yes", "YES"], false_values=["no", "NO", "No"]),
DataFrame(
[[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
columns=["A", "B"],
),
),
(
"A,B\nTRUE,1\nFALSE,2\nTRUE,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nfoo,bar\nbar,foo",
dict(true_values=["foo"], false_values=["bar"]),
DataFrame([[True, False], [False, True]], columns=["A", "B"]),
),
],
)
def test_parse_bool(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_int_conversion(all_parsers):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
@pytest.mark.parametrize("index_col", [0, "index"])
def test_read_chunksize_with_index(all_parsers, index_col):
parser = all_parsers
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2)
expected = DataFrame(
[
["foo", 2, 3, 4, 5],
["bar", 7, 8, 9, 10],
["baz", 12, 13, 14, 15],
["qux", 12, 13, 14, 15],
["foo2", 12, 13, 14, 15],
["bar2", 12, 13, 14, 15],
],
columns=["index", "A", "B", "C", "D"],
)
expected = expected.set_index("index")
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
def test_read_chunksize_bad(all_parsers, chunksize):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), chunksize=chunksize)
@pytest.mark.parametrize("chunksize", [2, 8])
def test_read_chunksize_and_nrows(all_parsers, chunksize):
# see gh-15755
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), expected)
def test_read_chunksize_and_nrows_changing_size(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
with pytest.raises(StopIteration, match=""):
reader.get_chunk(size=3)
def test_get_chunk_passed_chunksize(all_parsers):
parser = all_parsers
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
reader = parser.read_csv(StringIO(data), chunksize=2)
result = reader.get_chunk()
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(index_col=0)])
def test_read_chunksize_compat(all_parsers, kwargs):
# see gh-12185
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs)
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), result)
def test_read_chunksize_jagged_names(all_parsers):
# see gh-23509
parser = all_parsers
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4)
result = concat(reader)
tm.assert_frame_equal(result, expected)
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = dict(index_col=0)
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
parser = TextParser(data_list, chunksize=2, **kwargs)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_iterator(all_parsers):
# see gh-6607
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
expected = parser.read_csv(StringIO(data), **kwargs)
reader = parser.read_csv(StringIO(data), iterator=True, **kwargs)
first_chunk = reader.read(3)
tm.assert_frame_equal(first_chunk, expected[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, expected[3:])
def test_iterator2(all_parsers):
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result[0], expected)
def test_reader_list(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_reader_list_skiprows(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, skiprows=[1], **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[1:3])
def test_iterator_stop_on_chunksize(all_parsers):
# gh-3967: stopping iteration when chunksize is specified
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), chunksize=1)
result = list(reader)
assert len(result) == 3
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(concat(result), expected)
@pytest.mark.parametrize(
"kwargs", [dict(iterator=True, chunksize=1), dict(iterator=True), dict(chunksize=1)]
)
def test_iterator_skipfooter_errors(all_parsers, kwargs):
msg = "'skipfooter' not supported for 'iteration'"
parser = all_parsers
data = "a\n1\n2"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, **kwargs)
def test_nrows_skipfooter_errors(all_parsers):
msg = "'skipfooter' not supported with 'nrows'"
data = "a\n1\n2\n3\n4\n5\n6"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"""foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
""",
dict(index_col=0, names=["index", "A", "B", "C", "D"]),
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
columns=["A", "B", "C", "D"],
),
),
(
"""foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
""",
dict(index_col=[0, 1], names=["index1", "index2", "A", "B", "C", "D"]),
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
],
names=["index1", "index2"],
),
columns=["A", "B", "C", "D"],
),
),
],
)
def test_pass_names_with_index(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_no_level_names(all_parsers, index_col):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
headless_data = "\n".join(data.split("\n")[1:])
names = ["A", "B", "C", "D"]
parser = all_parsers
result = parser.read_csv(
StringIO(headless_data), index_col=index_col, header=None, names=names
)
expected = parser.read_csv(StringIO(data), index_col=index_col)
# No index names in headless data.
expected.index.names = [None] * 2
tm.assert_frame_equal(result, expected)
def test_multi_index_no_level_names_implicit(all_parsers):
parser = all_parsers
data = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
]
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected,header",
[
("a,b", DataFrame(columns=["a", "b"]), [0]),
(
"a,b\nc,d",
DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])),
[0, 1],
),
],
)
@pytest.mark.parametrize("round_trip", [True, False])
def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):
# see gh-14545
parser = all_parsers
data = expected.to_csv(index=False) if round_trip else data
result = parser.read_csv(StringIO(data), header=header)
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(all_parsers):
parser = all_parsers
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
result = parser.read_csv(StringIO(data), sep=" ")
expected = DataFrame(
[[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]],
columns=["Unnamed: 0", "id", "c0", "c1", "c2"],
)
tm.assert_frame_equal(result, expected)
def test_read_csv_parse_simple_list(all_parsers):
parser = all_parsers
data = """foo
bar baz
qux foo
foo
bar"""
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"])
tm.assert_frame_equal(result, expected)
@tm.network
def test_url(all_parsers, csv_dir_path):
# TODO: FTP testing
parser = all_parsers
kwargs = dict(sep="\t")
url = (
"https://raw.github.com/pandas-dev/pandas/master/"
"pandas/tests/io/parser/data/salaries.csv"
)
url_result = parser.read_csv(url, **kwargs)
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
tm.assert_frame_equal(url_result, local_result)
@pytest.mark.slow
def test_local_file(all_parsers, csv_dir_path):
parser = all_parsers
kwargs = dict(sep="\t")
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
url = "file://localhost/" + local_path
try:
url_result = parser.read_csv(url, **kwargs)
tm.assert_frame_equal(url_result, local_result)
except URLError:
# Fails on some systems.
pytest.skip("Failing on: " + " ".join(platform.uname()))
def test_path_path_lib(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_local_path(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv, lambda p: parser.read_csv(p, index_col=0)
)
tm.assert_frame_equal(df, result)
def test_nonexistent_path(all_parsers):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
# GH#29233 "File foo" instead of "File b'foo'"
parser = all_parsers
path = "{}.csv".format(tm.rands(10))
msg = f"File {path} does not exist" if parser.engine == "c" else r"\[Errno 2\]"
with pytest.raises(FileNotFoundError, match=msg) as e:
parser.read_csv(path)
filename = e.value.filename
assert path == filename
def test_missing_trailing_delimiters(all_parsers):
parser = all_parsers
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],
columns=["A", "B", "C", "D"],
)
tm.assert_frame_equal(result, expected)
def test_skip_initial_space(all_parsers):
data = (
'"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
"1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "
"314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "
"70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "
"0.212036, 14.7674, 41.605, -9999.0, -9999.0, "
"-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"
)
parser = all_parsers
result = parser.read_csv(
StringIO(data),
names=list(range(33)),
header=None,
na_values=["-9999.0"],
skipinitialspace=True,
)
expected = DataFrame(
[
[
"09-Apr-2012",
"01:10:18.300",
2456026.548822908,
12849,
1.00361,
1.12551,
330.65659,
355626618.16711,
73.48821,
314.11625,
1917.09447,
179.71425,
80.0,
240.0,
-350,
70.06056,
344.9837,
1,
1,
-0.689265,
-0.692787,
0.212036,
14.7674,
41.605,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
0,
12,
128,
]
]
)
tm.assert_frame_equal(result, expected)
def test_trailing_delimiters(all_parsers):
# see gh-2442
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=False)
expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(all_parsers):
# https://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
parser = all_parsers
result = parser.read_csv(
StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
)
assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie'
tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
def test_int64_min_issues(all_parsers):
# see gh-2599
parser = all_parsers
data = "A,B\n0,0\n0,"
result = parser.read_csv(StringIO(data))
expected = DataFrame({"A": [0, 0], "B": [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(all_parsers):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
{
"Numbers": [
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194,
]
}
)
tm.assert_frame_equal(result, expected)
def test_chunks_have_consistent_numerical_type(all_parsers):
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
# Coercions should work without warnings.
with tm.assert_produces_warning(None):
result = parser.read_csv(StringIO(data))
assert type(result.a[0]) is np.float64
assert result.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(all_parsers):
warning_type = None
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["a", "b"] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if parser.engine == "c" and parser.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = parser.read_csv(StringIO(data))
assert df.a.dtype == np.object
@pytest.mark.parametrize("sep", [" ", r"\s+"])
def test_integer_overflow_bug(all_parsers, sep):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, sep=sep)
expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]])
tm.assert_frame_equal(result, expected)
def test_catch_too_many_names(all_parsers):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
parser = all_parsers
msg = (
"Too many columns specified: expected 4 and found 3"
if parser.engine == "c"
else "Number of passed names did not match "
"number of header fields in the file"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
def test_ignore_leading_whitespace(all_parsers):
# see gh-3374, gh-6607
parser = all_parsers
data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
result = parser.read_csv(StringIO(data), sep=r"\s+")
expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(all_parsers):
# see gh-10022
parser = all_parsers
data = "\n hello\nworld\n"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([" hello", "world"])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(all_parsers):
# see gh-10184
data = "x,y"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(columns=["y"], index=Index([], name="x"))
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index(all_parsers):
# see gh-10467
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=["x", "y"])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"])
)
tm.assert_frame_equal(result, expected)
def test_empty_with_reversed_multi_index(all_parsers):
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"])
)
tm.assert_frame_equal(result, expected)
def test_float_parser(all_parsers):
# see gh-9565
parser = all_parsers
data = "45e-1,4.5,45.,inf,-inf"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(",")]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(all_parsers):
# see gh-12215
df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]})
data = df.to_csv(index=False)
parser = all_parsers
for precision in parser.float_precision_choices:
df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision)
tm.assert_frame_equal(df_roundtrip, df)
@pytest.mark.parametrize("conv", [None, np.int64, np.uint64])
def test_int64_overflow(all_parsers, conv):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
parser = all_parsers
if conv is None:
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
"00013007854817840016671868",
"00013007854817840016749251",
"00013007854817840016754630",
"00013007854817840016781876",
"00013007854817840017028824",
"00013007854817840017963235",
"00013007854817840018860166",
],
columns=["ID"],
)
tm.assert_frame_equal(result, expected)
else:
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
msg = (
"(Python int too large to convert to C long)|"
"(long too big to convert)|"
"(int too big to convert)"
)
with pytest.raises(OverflowError, match=msg):
parser.read_csv(StringIO(data), converters={"ID": conv})
@pytest.mark.parametrize(
"val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min]
)
def test_int64_uint64_range(all_parsers, val):
# These numbers fall right inside the int64-uint64
# range, so they should be parsed as string.
parser = all_parsers
result = parser.read_csv(StringIO(str(val)), header=None)
expected = DataFrame([val])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1]
)
def test_outside_int64_uint64_range(all_parsers, val):
# These numbers fall just outside the int64-uint64
# range, so they should be parsed as string.
parser = all_parsers
result = parser.read_csv(StringIO(str(val)), header=None)
expected = DataFrame([str(val)])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("exp_data", [[str(-1), str(2 ** 63)], [str(2 ** 63), str(-1)]])
def test_numeric_range_too_wide(all_parsers, exp_data):
# No numerical dtype can hold both negative and uint64
# values, so they should be cast as string.
parser = all_parsers
data = "\n".join(exp_data)
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("iterator", [True, False])
def test_empty_with_nrows_chunksize(all_parsers, iterator):
# see gh-9535
parser = all_parsers
expected = DataFrame(columns=["foo", "bar"])
nrows = 10
data = StringIO("foo,bar\n")
if iterator:
result = next(iter(parser.read_csv(data, chunksize=nrows)))
else:
result = parser.read_csv(data, nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected,msg",
[
# gh-10728: WHITESPACE_LINE
(
"a,b,c\n4,5,6\n ",
dict(),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# gh-10548: EAT_LINE_COMMENT
(
"a,b,c\n4,5,6\n#comment",
dict(comment="#"),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_CRNL_NOP
(
"a,b,c\n4,5,6\n\r",
dict(),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_COMMENT
(
"a,b,c\n4,5,6#comment",
dict(comment="#"),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# SKIP_LINE
(
"a,b,c\n4,5,6\nskipme",
dict(skiprows=[2]),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_LINE_COMMENT
(
"a,b,c\n4,5,6\n#comment",
dict(comment="#", skip_blank_lines=False),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# IN_FIELD
(
"a,b,c\n4,5,6\n ",
dict(skip_blank_lines=False),
DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]),
None,
),
# EAT_CRNL
(
"a,b,c\n4,5,6\n\r",
dict(skip_blank_lines=False),
DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]),
None,
),
# ESCAPED_CHAR
(
"a,b,c\n4,5,6\n\\",
dict(escapechar="\\"),
None,
"(EOF following escape character)|(unexpected end of data)",
),
# ESCAPE_IN_QUOTED_FIELD
(
'a,b,c\n4,5,6\n"\\',
dict(escapechar="\\"),
None,
"(EOF inside string starting at row 2)|(unexpected end of data)",
),
# IN_QUOTED_FIELD
(
'a,b,c\n4,5,6\n"',
dict(escapechar="\\"),
None,
"(EOF inside string starting at row 2)|(unexpected end of data)",
),
],
ids=[
"whitespace-line",
"eat-line-comment",
"eat-crnl-nop",
"eat-comment",
"skip-line",
"eat-line-comment",
"in-field",
"eat-crnl",
"escaped-char",
"escape-in-quoted-field",
"in-quoted-field",
],
)
def test_eof_states(all_parsers, data, kwargs, expected, msg):
# see gh-10728, gh-10548
parser = all_parsers
if expected is None:
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])
def test_uneven_lines_with_usecols(all_parsers, usecols):
# see gh-12203
parser = all_parsers
data = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10"""
if usecols is None:
# Make sure that an error is still raised
# when the "usecols" parameter is not provided.
msg = r"Expected \d+ fields in line \d+, saw \d+"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
else:
expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]})
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
# First, check to see that the response of parser when faced with no
# provided columns raises the correct error, with or without usecols.
("", dict(), None),
("", dict(usecols=["X"]), None),
(
",,",
dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]),
DataFrame(columns=["X"], index=[0], dtype=np.float64),
),
(
"",
dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]),
DataFrame(columns=["X"]),
),
],
)
def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
# see gh-12493
parser = all_parsers
if expected is None:
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,expected",
[
# gh-8661, gh-8679: this should ignore six lines, including
# lines with trailing whitespace and blank lines.
(
dict(
header=None,
delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True,
),
DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]),
),
# gh-8983: test skipping set of rows after a row with trailing spaces.
(
dict(
delim_whitespace=True, skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True
),
DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}),
),
],
)
def test_trailing_spaces(all_parsers, kwargs, expected):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
parser = all_parsers
result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
tm.assert_frame_equal(result, expected)
def test_raise_on_sep_with_delim_whitespace(all_parsers):
# see gh-6607
data = "a b c\n1 2 3"
parser = all_parsers
with pytest.raises(ValueError, match="you can only specify one"):
parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)
@pytest.mark.parametrize("delim_whitespace", [True, False])
def test_single_char_leading_whitespace(all_parsers, delim_whitespace):
# see gh-9710
parser = all_parsers
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({"MyColumn": list("abab")})
result = parser.read_csv(
StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sep,skip_blank_lines,exp_data",
[
(",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(
",",
False,
[
[1.0, 2.0, 4.0],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5.0, np.nan, 10.0],
[np.nan, np.nan, np.nan],
[-70.0, 0.4, 1.0],
],
),
],
)
def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data):
parser = all_parsers
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
if sep == r"\s+":
data = data.replace(",", " ")
result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines)
expected = DataFrame(exp_data, columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_whitespace_lines(all_parsers):
parser = all_parsers
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected",
[
(
""" A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
""",
DataFrame(
[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
columns=["A", "B", "C", "D"],
index=["a", "b", "c"],
),
),
(
" a b c\n1 2 3 \n4 5 6\n 7 8 9",
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]),
),
],
)
def test_whitespace_regex_separator(all_parsers, data, expected):
# see gh-6607
parser = all_parsers
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
def test_verbose_read(all_parsers, capsys):
parser = all_parsers
data = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
parser.read_csv(StringIO(data), verbose=True)
captured = capsys.readouterr()
if parser.engine == "c":
assert "Tokenization took:" in captured.out
assert "Parser memory cleanup took:" in captured.out
else: # Python engine
assert captured.out == "Filled 3 NA values in column a\n"
def test_verbose_read2(all_parsers, capsys):
parser = all_parsers
data = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
parser.read_csv(StringIO(data), verbose=True, index_col=0)
captured = capsys.readouterr()
# Engines are verbose in different ways.
if parser.engine == "c":
assert "Tokenization took:" in captured.out
assert "Parser memory cleanup took:" in captured.out
else: # Python engine
assert captured.out == "Filled 1 NA values in column a\n"
def test_iteration_open_handle(all_parsers):
parser = all_parsers
kwargs = dict(squeeze=True, header=None)
with tm.ensure_clean() as path:
with open(path, "w") as f:
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
with open(path, "r") as f:
for line in f:
if "CCC" in line:
break
result = parser.read_csv(f, **kwargs)
expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,thousands,decimal",
[
(
"""A|B|C
1|2,334.01|5
10|13|10.
""",
",",
".",
),
(
"""A|B|C
1|2.334,01|5
10|13|10,
""",
".",
",",
),
],
)
def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal):
parser = all_parsers
expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})
result = parser.read_csv(
StringIO(data), sep="|", thousands=thousands, decimal=decimal
)
tm.assert_frame_equal(result, expected)
def test_euro_decimal_format(all_parsers):
parser = all_parsers
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
result = parser.read_csv(StringIO(data), sep=";", decimal=",")
expected = DataFrame(
[
[1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819],
[2, 121.12, 14897.76, "DEF", "uyt", 0.377320872],
[3, 878.158, 108013.434, "GHI", "rez", 2.735694704],
],
columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("na_filter", [True, False])
def test_inf_parsing(all_parsers, na_filter):
parser = all_parsers
data = """\
,A
a,inf
b,-inf
c,+Inf
d,-Inf
e,INF
f,-INF
g,+INf
h,-INf
i,inF
j,-inF"""
expected = DataFrame(
{"A": [float("inf"), float("-inf")] * 5},
index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"],
)
result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("na_filter", [True, False])
def test_infinity_parsing(all_parsers, na_filter):
parser = all_parsers
data = """\
,A
a,Infinity
b,-Infinity
c,+Infinity
"""
expected = DataFrame(
{"A": [float("infinity"), float("-infinity"), float("+infinity")]},
index=["a", "b", "c"],
)
result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5])
def test_raise_on_no_columns(all_parsers, nrows):
parser = all_parsers
data = "\n" * nrows
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data))
def test_memory_map(all_parsers, csv_dir_path):
mmap_file = os.path.join(csv_dir_path, "test_mmap.csv")
parser = all_parsers
expected = DataFrame(
{"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]}
)
result = parser.read_csv(mmap_file, memory_map=True)
tm.assert_frame_equal(result, expected)
def test_null_byte_char(all_parsers):
# see gh-2741
data = "\x00,foo"
names = ["a", "b"]
parser = all_parsers
if parser.engine == "c":
expected = DataFrame([[np.nan, "foo"]], columns=names)
out = parser.read_csv(StringIO(data), names=names)
tm.assert_frame_equal(out, expected)
else:
msg = "NULL byte detected"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), names=names)
def test_temporary_file(all_parsers):
# see gh-13398
parser = all_parsers
data = "0 0"
new_file = TemporaryFile("w+")
new_file.write(data)
new_file.flush()
new_file.seek(0)
result = parser.read_csv(new_file, sep=r"\s+", header=None)
new_file.close()
expected = DataFrame([[0, 0]])
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte(all_parsers):
# see gh-5500
parser = all_parsers
data = "a,b\n1\x1a,2"
expected = DataFrame([["1\x1a", 2]], columns=["a", "b"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte_to_file(all_parsers):
# see gh-16559
parser = all_parsers
data = b'c1,c2\r\n"test \x1a test", test\r\n'
expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"])
path = "__{}__.csv".format(tm.rands(10))
with
|
tm.ensure_clean(path)
|
pandas._testing.ensure_clean
|
import pandas as pd
import os
import string
import csv
import copy
import typing
from annotation.generation.annotation_to_template import run_wikifier as get_wikifier_result
from pathlib import Path
from collections import defaultdict
"""
How to use:
Step 1: load file
b. call "load_xlsx" send with one file path
Step 2: generate file
call "generate" after you loaded the file
the output folder location and column name config (optional) if needed
if column name config not given or partial not given the system will use:
Example file:
https://docs.google.com/spreadsheets/d/1NuTmRIxpy460S4CRdP6XORKFILssOby_RxiFbONXwv0/edit#gid=756069733
For attribute file:
Assume "Property" column exist and it is the node column
Assume "Attribute" column exist and it is the label column
For unit file:
Assume "Unit" column exist and it is the node column
Assume "Q-Node" column exist and it is the label column
"""
stop_punctuation = string.punctuation
TRANSLATOR = str.maketrans(stop_punctuation, ' ' * len(stop_punctuation))
# deprecated! not use
def load_csvs(dataset_file: str, attributes_file: str, units_file: str):
loaded_file = {}
files = [dataset_file, attributes_file, units_file]
file_type = ["dataset_file", "attributes_file", "units_file"]
for each_file, each_file_type in zip(files, file_type):
if each_file:
if not os.path.exists(each_file):
raise ValueError("{} {} not exist!".format(each_file_type, each_file))
loaded_file[each_file_type] = pd.read_csv(each_file)
return loaded_file
def load_xlsx(input_file: str, sheet_name_config: dict = None):
loaded_file = {}
sheet_names = pd.ExcelFile(input_file).sheet_names
if not sheet_name_config:
sheet_name_config = {"dataset_file": "Dataset",
"attributes_file": "Attributes",
"units_file": "Units",
"extra_edges": "Extra Edges"
}
for k, v in sheet_name_config.items():
if v not in sheet_names:
raise ValueError("Sheet name {} used for {} does not found!".format(v, k))
loaded_file[k] = pd.read_excel(input_file, v)
optional_sheet_name_config = {
"wikifier": "Wikifier",
"qualifiers": "Qualifiers",
"Wikifier_t2wml": "Wikifier_t2wml",
"Wikifier Columns": "Wikifier Columns"
}
for k, v in optional_sheet_name_config.items():
if v not in sheet_names:
loaded_sheet = None
else:
loaded_sheet = pd.read_excel(input_file, v)
loaded_file[k] = loaded_sheet
return loaded_file
def generate(loaded_file: dict, output_path: str = ".", column_name_config=None, to_disk=True,
datamart_properties_file: str = None, dataset_qnode: str = None, dataset_id: str = None,
debug: bool = False,
) -> typing.Optional[dict]:
"""
The main entry function for generating datamart files from template input,
base on input parameter `to_disk`, the output can be None or dict of dataframe
"""
if column_name_config is None:
column_name_config = {}
if "attributes_file_node_column_name" not in column_name_config:
column_name_config["attributes_file_node_column_name"] = "Property"
if "attributes_file_node_label_column_name" not in column_name_config:
column_name_config["attributes_file_node_label_column_name"] = "Attribute"
if "unit_file_node_column_name" not in column_name_config:
column_name_config["unit_file_node_column_name"] = "Q-Node"
if "unit_file_node_label_column_name" not in column_name_config:
column_name_config["unit_file_node_label_column_name"] = "Unit"
if len(loaded_file["dataset_file"]["dataset"].unique()) > 1:
raise ValueError("One dataset file should only contains 1 dataset ID in `dataset` column.")
if loaded_file["wikifier"] is not None:
extra_wikifier_dict = get_wikifier_part(loaded_file["wikifier"])
else:
extra_wikifier_dict = {}
# update 2020.7.22: accept user specified dataset id if given
if dataset_qnode is None:
dataset_qnode = loaded_file["dataset_file"]["dataset"].iloc[0]
if len(dataset_qnode) == 0 or not dataset_qnode[0] == 'Q':
raise Exception('First column of "Dataset" tab has be Qnodes')
if dataset_id is None:
# dataset_id = loaded_file["dataset_file"]["dataset"].iloc[0]
result = loaded_file['dataset_file']['node2'][loaded_file["dataset_file"]['label'] == 'P1813']
if len(result) == 0:
raise Exception('Missing dataset identifier. Missing "P1813" edge in "Dataset" tab')
else:
dataset_id = result.iloc[0]
if dataset_id[0] == '"' and dataset_id[-1] == '"':
dataset_id = dataset_id[1:-1]
# generate files
memo = defaultdict(dict)
kgtk_properties_df = _generate_KGTK_properties_file(loaded_file["attributes_file"],
loaded_file["qualifiers"],
dataset_qnode, dataset_id,
memo, column_name_config["attributes_file_node_column_name"],
column_name_config["attributes_file_node_label_column_name"])
kgtk_variables_df = _generate_KGTK_variables_file(loaded_file["attributes_file"],
dataset_qnode, dataset_id, memo,
column_name_config["attributes_file_node_column_name"],
column_name_config["attributes_file_node_label_column_name"])
kgtk_units_df = _generate_KGTK_units_file(loaded_file["units_file"], dataset_qnode, memo,
column_name_config["unit_file_node_column_name"],
column_name_config["unit_file_node_label_column_name"])
wikifier_df = _generate_wikifier_file(memo, extra_wikifier_dict)
if loaded_file["Wikifier_t2wml"] is not None:
wikifier_df = pd.concat([wikifier_df, loaded_file["Wikifier_t2wml"]])
dataset_df = _generate_dataset_file(loaded_file["dataset_file"])
extra_edges_df = _generate_extra_edges_file(loaded_file["extra_edges"], memo)
output_files = {"kgtk_properties.tsv": kgtk_properties_df,
"kgtk_variables.tsv": kgtk_variables_df,
"kgtk_units.tsv": kgtk_units_df,
"wikifier.csv": wikifier_df,
"extra_edges.tsv": extra_edges_df,
"dataset.tsv": dataset_df}
if datamart_properties_file is not None:
datamart_schema_df = pd.read_csv(datamart_properties_file, sep='\t')
output_files['datamart_schema_properties.tsv'] = datamart_schema_df
# save to disk if required or running in debug mode
if to_disk or debug:
os.makedirs(output_path, exist_ok=True)
for each_file_name, each_file in output_files.items():
output_file_path = os.path.join(output_path, each_file_name)
if each_file_name.endswith(".csv"):
each_file.to_csv(output_file_path, index=False)
elif each_file_name.endswith(".tsv"):
each_file.to_csv(output_file_path, sep='\t', index=False, quoting=csv.QUOTE_NONE)
if not to_disk:
return output_files
def _generate_KGTK_properties_file(input_df: pd.DataFrame, qualifier_df: pd.DataFrame,
dataset_q_node: str, dataset_id: str, memo: dict,
node_column_name="Property", node_label_column_name="Attribute",
qualifier_column_name="Qualifiers") -> pd.DataFrame:
"""
sample format for each property (totally 3 rows)
Please note that data type may change (to String, Date) base on the given input template file
id node1 label node2
0 Paid-security-002-data_type Paid-security-002 data_type Quantity
1 Paid-security-002-P31 Paid-security-002 P31 Q18616576
2 Paid-security-002-label Paid-security-002 label UN
:return: kgtk format property dataframe
"""
node_number = 1
output_df_list = []
input_df = input_df.fillna("")
has_relationship = 'Relationship' in input_df.columns and 'Role' in input_df.columns
for _, each_row in input_df.iterrows():
node_number += 1
if has_relationship:
role = each_row["Role"].upper()
else:
role = ""
if each_row[node_column_name] == "":
node_label = to_kgtk_format_string(each_row[node_label_column_name])
node_id = _generate_p_nodes(role, dataset_q_node, node_number, memo, each_row['Attribute'])
# get type if specified
if "type" in each_row:
value_type = each_row["type"]
else:
value_type = "Quantity"
labels = ["wikidata_data_type", "data_type", "P31", "label"]
node2s = [value_type, value_type, "Q18616576", node_label]
for i in range(len(labels)):
id_ = "{}-{}".format(node_id, labels[i])
output_df_list.append({"id": id_, "node1": node_id, "label": labels[i], "node2": node2s[i]})
else:
node_id = each_row[node_column_name]
# add to memo for future use
memo["property"][node_id] = each_row[node_label_column_name]
if "Role" in each_row:
memo["property_role"][node_id] = each_row["Role"].lower()
# add qualifier part if we have
if qualifier_df is not None:
qualifier_df = qualifier_df.fillna("")
for _, each_row in qualifier_df.iterrows():
node_number += 1
if each_row[node_column_name] == "":
node_id = _generate_p_nodes("QUALIFIER", dataset_q_node, node_number, memo, each_row["Attribute"])
memo["qualifier_target_nodes"][each_row[qualifier_column_name]] = memo["property_name_to_id"][
each_row[node_label_column_name]]
memo["qualifier_name_to_id"][each_row[qualifier_column_name]] = node_id
memo["property"][node_id] = each_row[qualifier_column_name]
labels = ["data_type", "P31", "label"]
node2s = ["String", "Q18616576", to_kgtk_format_string(each_row[qualifier_column_name])]
for i in range(3):
id_ = "{}-{}".format(node_id, labels[i])
output_df_list.append({"id": id_, "node1": node_id, "label": labels[i], "node2": node2s[i]})
else:
memo["property"][each_row[node_column_name]] = each_row[qualifier_column_name]
memo["qualifier_name_to_id"][each_row[qualifier_column_name]] = each_row[node_column_name]
memo["qualifier_target_nodes"][each_row[qualifier_column_name]] = memo["property_name_to_id"][
each_row[node_label_column_name]]
# get output
output_df = pd.DataFrame(output_df_list)
# in case of empty df
if output_df.shape == (0, 0):
output_df = pd.DataFrame(columns=['id', 'node1', 'label', 'node2'])
return output_df
def _generate_KGTK_variables_file(input_df: pd.DataFrame, dataset_q_node: str, dataset_id: str, memo: dict,
node_column_name="Property", node_label_column_name="Attribute"):
"""
sample format for each variable, totally 10 + n (n is the count of related qualifiers) rows
"id" "node1" "label" "node2"
0 QVARIABLE-OECD-002-label QVARIABLE-002 label "GDP per capita"
1 QVARIABLE-OECD-002-P1476 QVARIABLE-002 P1476 "GDP per capita"
2 QVARIABLE-OECD-002-description QVARIABLE-002 description "GDP per capita variable in OECD"
3 QVARIABLE-OECD-002-P31-1 QVARIABLE-002 P31 Q50701
4 QVARIABLE-OECD-002-P2006020002-P248 QVARIABLE-002 P2006020002 P585
5 QVARIABLE-OECD-002-P2006020002-P248 QVARIABLE-002 P2006020002 P248
6 QVARIABLE-OECD-002-P1687-1 QVARIABLE-002 P1687 PVARIABLE-OECD-002
7 QVARIABLE-OECD-002-P2006020004-1 QVARIABLE-002 P2006020004 QOECD
8 QVARIABLE-OECD-002-P1813 QVARIABLE-002 P1813 "gdp_per_capita"
9 QVARIABLE-OECD-P2006020003-QOECD002 QVARIABLE P2006020003 QOECD-002
-------------------------------------------------
10 QVARIABLE-OECD-P2006020002-PQUALIFIER-OECD-101 QVARIABLE P2006020003 PQUALIFIER-OECD-101
11 QVARIABLE-OECD-P2006020002-PQUALIFIER-OECD-102 QVARIABLE P2006020003 PQUALIFIER-OECD-102
-------------------------------------------------
12 ... QVARIABLE-002 P2010050001 FactorClass:EconomicAgricuturalCapability
13 ... QVARIABLE-002 P2010050001 Relevance:1
"""
node_number = 1
output_df_list = []
short_name_memo = set()
input_df = input_df.fillna("")
all_qualifier_properties = []
for node, role in memo["property_role"].items():
if role == "qualifier":
all_qualifier_properties.append(node)
has_relationship = 'Relationship' in input_df.columns and 'Role' in input_df.columns
for _, each_row in input_df.iterrows():
if has_relationship:
role = each_row["Role"].upper()
else:
role = ""
# not add QUALIFIER to variables tab
if has_relationship and role == "QUALIFIER":
continue
target_properties = []
# update 2020.7.22: consider role and relationship for new template file
if has_relationship:
relations = each_row['Relationship']
# qualifier should not have qualifier properties
if each_row['Role'].lower() != "qualifier":
if relations == "":
target_properties = all_qualifier_properties
else:
for each_relation in relations.slipt("|"):
if each_relation not in memo["property_name_to_id"]:
raise ValueError(
"Annotation specify variable {} not exist in input data.".format(each_relation))
target_properties.append(memo["property_name_to_id"][each_relation])
node_number += 1
if each_row[node_column_name] == "":
# update 2020.7.23, also add role for P nodes
p_node_id = _generate_p_nodes(role, dataset_q_node, node_number, memo, each_row["Attribute"])
else:
p_node_id = each_row[node_column_name]
# update 2020.7.22: change to add role in Q node id
q_node_id = _generate_q_nodes(role, dataset_q_node, node_number)
memo["variable"][q_node_id] = each_row[node_label_column_name]
fixed_labels = ["label", "P1476", "description", # 1-3
"P31", "P2006020002", "P2006020002", # 4-6
"P1687", "P2006020004", "P1813", # 7-9
"P2006020003"]
labels = fixed_labels + len(target_properties) * ["P2006020002"]
if each_row['label'] == "":
node2_label = to_kgtk_format_string(each_row[node_label_column_name])
else:
node2_label = to_kgtk_format_string(each_row['label'])
if each_row['description'] == "":
node2_description = to_kgtk_format_string("{} in {}".format(each_row[node_label_column_name], dataset_id))
else:
node2_description = to_kgtk_format_string(each_row['description'])
node2s = [node2_label, # to_kgtk_format_string(each_row[node_label_column_name]), # 1
node2_label, # to_kgtk_format_string(each_row[node_label_column_name]), # 2
node2_description,
# to_kgtk_format_string("{} in {}".format(each_row[node_label_column_name], dataset_id)), # 3
"Q50701", "P585", "P248", # 4(Q50701 = variable), 5(P585 = Point in time), 6(P249 = stated in)
p_node_id, # 7
dataset_q_node, # 8
to_kgtk_format_string(get_short_name(short_name_memo, each_row[node_label_column_name])), # 9
q_node_id # 10
] + target_properties
node1s = [q_node_id] * (len(fixed_labels) - 1) + [dataset_q_node] + [q_node_id] * len(target_properties)
# Add tag edges
if 'tag' in input_df.columns and each_row['tag']:
tag_values = [to_kgtk_format_string(x) for x in each_row['tag'].split('|')]
node1s += [q_node_id] * len(tag_values)
labels += ['P2010050001'] * len(tag_values)
node2s += tag_values
# add those nodes
for i, each_label in enumerate(labels):
id_ = _generate_edge_id(node1s[i], labels[i], node2s[i])
output_df_list.append({"id": id_, "node1": node1s[i], "label": labels[i], "node2": node2s[i]})
# get output
output_df = pd.DataFrame(output_df_list)
# in case of empty df
if output_df.shape == (0, 0):
output_df = pd.DataFrame(columns=['id', 'node1', 'label', 'node2'])
return output_df
def _generate_KGTK_units_file(input_df: pd.DataFrame, dataset_q_node: str, memo: dict, node_column_name="Q-Node",
node_label_column_name="Unit") -> pd.DataFrame:
"""
sample format for each unit (totally 2 rows)
id node1 label node2
0 QUNIT-aid-security-U002-label Qaid-security-U002 label person
1 QUNIT-aid-security-U002-P31 Qaid-security-U002 P31 Q47574
:return:
"""
node_number = 1
count = 0
output_df_dict = {}
input_df = input_df.fillna("")
for _, each_row in input_df.iterrows():
node_number += 1
if each_row[node_column_name] == "":
# update 2020.7.22: change to use QUNIT* instead of Q*
node_id = _generate_q_nodes("UNIT", dataset_q_node, node_number)
labels = ["label", "P31"]
node2s = [to_kgtk_format_string(each_row[node_label_column_name]), "Q47574"]
memo["unit"][node_id] = each_row[node_label_column_name]
for i in range(2):
id_ = _generate_edge_id(node_id, labels[i], node2s[i])
output_df_dict[count] = {"id": id_, "node1": node_id, "label": labels[i], "node2": node2s[i]}
count += 1
else:
memo["unit"][each_row[node_column_name]] = each_row[node_label_column_name]
# get output
output_df = pd.DataFrame.from_dict(output_df_dict, orient="index")
# in case of empty df
if output_df.shape == (0, 0):
output_df = pd.DataFrame(columns=['id', 'node1', 'label', 'node2'])
return output_df
def _generate_wikifier_file(memo, extra_wikifier_dict):
"""
generate the wikifier part from template(those properties, variables, units generated in above functions)
Sample file looks like:
column row value context item
0 "" "" UN property Paid-security-002
1 "" "" INGO property Paid-security-003
2 "" "" LNGO/NRCS property Paid-security-004
3 "" "" ICRC property Paid-security-005
4 "" "" UN variable Qaid-security-002
5 "" "" INGO variable Qaid-security-003
6 "" "" person unit Qaid-security-U002
"""
output_df_list = []
for memo_type, each_memo in memo.items():
if memo_type in {"property", "unit", "variable"}:
for node, label in each_memo.items():
output_df_list.append({"column": "", "row": "", "value": label, "context": memo_type, "item": node})
# for those specific alias of wikifier names
combo = (label, memo_type)
if combo in extra_wikifier_dict:
output_df_list.append(
{"column": "", "row": "", "value": extra_wikifier_dict[combo], "context": memo_type,
"item": node})
# get output
output_df = pd.DataFrame(output_df_list)
return output_df
def _generate_dataset_file(input_df: pd.DataFrame):
"""
A sample dataset file looks like:
node1 label node2 id
Qaid-security P31 Q1172284 aid-security-P31
Qaid-security label aid-security dataset aid-security-label
Qaid-security P1476 aid-security dataset aid-security-P1476
Qaid-security description aid-security dataset aid-security-description
Qaid-security P2699 aid-security aid-security-P2699
Qaid-security P1813 aid-security aid-security-P1813
:return:
"""
output_df = copy.deepcopy(input_df)
ids = []
for _, each_row in output_df.iterrows():
ids.append("{}-{}".format(each_row["dataset"], each_row["label"]))
output_df['id'] = ids
# Assume the the first column are already Q nodes
# output_df["dataset"] = output_df['dataset'].apply(lambda x: "Q" + x)
output_df = output_df.rename(columns={"dataset": "node1"})
# check double quotes
output_df = _check_double_quotes(output_df, check_content_startswith=True)
return output_df
def _generate_extra_edges_file(input_df: pd.DataFrame, memo: dict):
qualifier_extra_edges_list = []
if "qualifier_target_nodes" in memo:
for k, v in memo['qualifier_target_nodes'].items():
qualifier_extra_edges_list.append({"id": "", "node1": v, "label": "P2006020002",
"node2": memo["qualifier_name_to_id"][k]})
output_df = pd.concat([input_df,
|
pd.DataFrame(qualifier_extra_edges_list)
|
pandas.DataFrame
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: <NAME>
# Modified for documentation by <NAME>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
# Define a pipeline to search for the best combination of PCA truncation
# and classifier regularization.
pca = PCA()
# set the tolerance to a large value to make the example faster
logistic = LogisticRegression(max_iter=10000, tol=0.1)
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
X_digits, y_digits = datasets.load_digits(return_X_y=True)
# Parameters of pipelines can be set using ‘__’ separated parameter names:
param_grid = {
'pca__n_components': [5, 15, 30, 45, 64],
'logistic__C': np.logspace(-4, 4, 4),
}
search = GridSearchCV(pipe, param_grid, n_jobs=-1)
search.fit(X_digits, y_digits)
print("Best parameter (CV score=%0.3f):" % search.best_score_)
print(search.best_params_)
# Plot the PCA spectrum
pca.fit(X_digits)
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6))
ax0.plot(np.arange(1, pca.n_components_ + 1),
pca.explained_variance_ratio_, '+', linewidth=2)
ax0.set_ylabel('PCA explained variance ratio')
ax0.axvline(search.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
ax0.legend(prop=dict(size=12))
# For each number of components, find the best classifier results
results =
|
pd.DataFrame(search.cv_results_)
|
pandas.DataFrame
|
# -*- coding:utf-8 -*-
from calendar import leapdays
import numpy as np
import pandas as pd
import random
from datetime import datetime, timedelta
import time
import re
import joblib
import requests
# from draw import *
import xlrd
from pre_train import model_predict
from unit import *
xlrd.xlsx.ensure_elementtree_imported(False, None)
xlrd.xlsx.Element_has_iter = True
base_path_1 = "./dataset/"
base_path_2 = "./dataset/tmp/"
base_path_3 = "./output/"
station_id_change = {
'miyunshuiku_aq': 'miyunshuik_aq',
'wanshouxigong_aq': 'wanshouxig_aq',
'nongzhanguan_aq': 'nongzhangu_aq',
'xizhimenbei_aq': 'xizhimenbe_aq',
'fengtaihuayuan_aq': 'fengtaihua_aq',
'aotizhongxin_aq': 'aotizhongx_aq',
'yongdingmennei_aq': 'yongdingme_aq'
}
# 从网站下载数据
def get_data(city, start_time, end_time, current_day=False):
if current_day == True:
end_time = '2018-07-01-23'
link1 = 'https://biendata.com/competition/airquality/' + city + '/' + start_time + '/' + end_time + '/2k0d1d8'
respones = requests.get(link1)
if current_day == False:
with open(base_path_2 + city + "_airquality_" + start_time + "_" + end_time + ".csv", 'w') as f:
f.write(respones.text)
else:
with open(base_path_2 + city + "_airquality_current_day.csv", 'w') as f:
f.write(respones.text)
if city == "bj":
link2 = 'https://biendata.com/competition/meteorology/' + city + '/' + start_time + '/' + end_time + '/2k0d1d8'
respones = requests.get(link2)
if current_day == False:
with open(base_path_2 + city + "_meteorology_" + start_time + "_" + end_time + ".csv", 'w') as f:
f.write(respones.text)
else:
with open(base_path_2 + city + "_meteorology_current_day.csv", 'w') as f:
f.write(respones.text)
link3 = 'https://biendata.com/competition/meteorology/' + city + '_grid/' + start_time + '/' + end_time + '/2k0d1d8'
respones = requests.get(link3)
if current_day == False:
with open(base_path_2 + city + "_meteorology_grid_" + start_time + "_" + end_time + ".csv", 'w') as f:
f.write(respones.text)
else:
with open(base_path_2 + city + "_meteorology_grid_current_day.csv", 'w') as f:
f.write(respones.text)
# 加载 站点
def load_station():
filename = base_path_1 + "Beijing_AirQuality_Stations_cn.xlsx"
data = xlrd.open_workbook(filename)
table = data.sheet_by_name(u'Sheet2')
nrows = table.nrows
#print(nrows)
bj_stations = {}
for i in range(0, nrows):
row = table.row_values(i)
print (row)
bj_stations[row[0]] = {}
bj_stations[row[0]]['lng'] = row[1]
bj_stations[row[0]]['lat'] = row[2]
bj_stations[row[0]]['type_id'] = int(row[-1])
#print(int(row[-1]))
bj_stations[row[0]]['station_num_id'] = i
filename = base_path_1 + "London_AirQuality_Stations.csv"
fr = open(filename)
ld_stations = {}
flag = 0
i = 0
for line in fr.readlines():
if flag == 0:
flag = 1
continue
row = line.strip().split(",")
ld_stations[row[0]] = {}
if row[2] == "TRUE":
ld_stations[row[0]]['predict'] = True
else:
ld_stations[row[0]]['predict'] = False
ld_stations[row[0]]['lng'] = float(row[5])
ld_stations[row[0]]['lat'] = float(row[4])
ld_stations[row[0]]['type_id'] = int(row[-1])
ld_stations[row[0]]['station_num_id'] = i
i += 1
stations = {}
stations["bj"] = bj_stations
stations["ld"] = ld_stations
return stations
# 加载原始数据
def load_data(city, start_time, end_time, current_day=False):
if current_day == False:
#filename = base_path_2 + city + "_airquality_" + start_time + "_" + end_time + ".csv"
filename = "C:/Users/Nobody/Documents/aau/6/jacob/KDD_CUP_2018-master/dataset/tmp/beijing_17_18_aq.csv"
else:
#filename = "C:/Users/Nobody/Documents/aau/6/jacob/KDD_CUP_2018-master/dataset/tmp/beijing_17_18_aq.csv"
filename = base_path_1 + city + "_aq_online.csv"
df =
|
pd.read_csv(filename,low_memory=False, sep=',')
|
pandas.read_csv
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import os
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
from os.path import join as pjoin
from cplvm import CPLVM
import matplotlib
font = {"size": 30}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
DATA_DIR = "../../data/mix_seq/data/nutlin/"
if __name__ == "__main__":
latent_dim_shared = 2
latent_dim_foreground = 2
X_fname = pjoin(DATA_DIR, "dmso_expt1.csv")
Y_fname = pjoin(DATA_DIR, "nutlin_expt1.csv")
X_mutation_fname = pjoin(DATA_DIR, "p53_mutations_dmso.csv")
Y_mutation_fname = pjoin(DATA_DIR, "p53_mutations_nutlin.csv")
p53_mutations_X = pd.read_csv(X_mutation_fname, index_col=0)
p53_mutations_X.tp53_mutation[
p53_mutations_X.tp53_mutation == "Hotspot"
] = "Mutated"
p53_mutations_X.tp53_mutation[
p53_mutations_X.tp53_mutation == "Other"
] = "Wild-type"
p53_mutations_Y = pd.read_csv(Y_mutation_fname, index_col=0)
p53_mutations_Y.tp53_mutation[
p53_mutations_Y.tp53_mutation == "Hotspot"
] = "Mutated"
p53_mutations_Y.tp53_mutation[
p53_mutations_Y.tp53_mutation == "Other"
] = "Wild-type"
# Read in data
X =
|
pd.read_csv(X_fname, index_col=0)
|
pandas.read_csv
|
#Lib for Streamlit
# Copyright(c) 2021 - AilluminateX LLC
# This is main Sofware... Screening and Tirage
# Customized to general Major Activities
# Make all the School Activities- st.write(DataFrame) ==> (outputs) Commented...
# The reason, since still we need the major calculations.
# Also the Computing is not that expensive.. So, no need to optimize at this point
import streamlit as st
import pandas as pd
#Change website title (set_page_config)
#==============
from PIL import Image
image_favicon=Image.open('Logo_AiX.jpg')
st.set_page_config(page_title='AilluminateX - Covid Platform', page_icon = 'Logo_AiX.jpg') #, layout = 'wide', initial_sidebar_state = 'auto'), # layout = 'wide',)
# favicon being an object of the same kind as the one you should provide st.image() with
#(ie. a PIL array for example) or a string (url or local file path)
#==============
#Hide footer and customize the text
#=========================
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
footer:after {
content:'Copyright(c) 2021 - AilluminateX LLC and Ailysium - Covid19 Bio-Forecasting Platform | https://www.aillumiante.com';
visibility: visible;
display: block;
position: relative;
#background-color: gray;
padding: 5px;
top: 2px;
}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
#==============================
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from yellowbrick.classifier import ClassificationReport
from sklearn.metrics import accuracy_score
#import numpy as np
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import plotly.express as px
import numpy as np
import plotly
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import altair as alt
import plotly.figure_factory as ff
import matplotlib
from matplotlib import cm
import seaborn as sns; sns.set()
from PIL import Image
import statsmodels.api as sm
import statsmodels.formula.api as smf
#from sklearn import model_selection, preprocessing, metrics, svm,linear_model
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score, cross_validate, StratifiedKFold
from sklearn.feature_selection import SelectKBest, chi2
#from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import auc, roc_auc_score, roc_curve, explained_variance_score, precision_recall_curve,average_precision_score,accuracy_score, classification_report
#from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPRegressor
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from scipy.stats import boxcox
from matplotlib import pyplot
import pickle
#from sklearn.externals import joblib
import joblib
# Load Image & Logo
#====================
st.image("Logo_AiX.jpg") # Change to MSpace Logo
#st.write("https://www.ailluminate.com")
#st.image("LogoAiX1.jpg") # Change to MSpace Logo
st.markdown("<h1 style='text-align: left; color: turquoise;'>Ailysium: BioForecast Platform</h1>", unsafe_allow_html=True)
#st.markdown("<h1 style='text-align: left; color: turquoise;'>Train AI BioForecast Model (Realtime)</h1>", unsafe_allow_html=True)
#st.markdown("<h1 style='text-align: left; color: turquoise;'>Opening-Economy & Society</h1>", unsafe_allow_html=True)
#df_forecast= pd.read_csv("2021-03-27-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
df_forecast=pd.read_csv("recent-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
#Load Data - The last/most recent Forecast and latest Data
#=====================
# The last two, most recent forecast
#df_forecast= pd.read_csv("2021-03-15-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
#Forcast_date="2021-03-15"
#Forecasted_dates=["3/20/2021", "3/27/2021", "4/03/2021", "4/10/2021" ]
#df_forecast= pd.read_csv("2021-03-22-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
#Forcast_date="2021-03-22"
#Forecasted_dates=["3/27/2021", "4/03/2021", "4/10/2021", "4/17/2021" ]
#==========================================
df_forecast_previous= pd.read_csv("previous-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
Forcast_date="2021-03-22"
Forecasted_dates=["3/27/2021", "4/03/2021", "4/10/2021", "4/17/2021" ]
df_forecast_recent=pd.read_csv("recent-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
Forcast_date="2021-03-29"
Forecasted_dates=["4/03/2021", "4/10/2021", "4/17/2021", "4/24/2021" ]
#================
#initialize the data
#=======================
#Models
#====================
#st.success("What Forecast Model Data to Load?")
forecast_model_Options= ['Reference Model',
'Ensemble',
'UGA-CEID',
'Columbia',
'ISU',
'UVA',
'LNQ',
'Facebook',
'JHU-APL',
'UpstateSU',
'JHU-IDD',
'LANL',
'Ensemble']
#st.success("What Date Forecast Data to Load?")
data_dates_options=['2021-01-04', '2021-01-11', '2021-01-18',
'2021-01-25', '2021-02-01', '2021-02-08',
'2021-02-15', '2021-02-22', '2021-03-01',
'2021-03-08', '2021-03-15', '2021-03-22',
'2021-03-29']
data_dates_options=['2021-03-29',
'2021-03-22', '2021-03-15', '2021-03-08',
'2021-03-01', '2021-02-22', '2021-02-15',
'2021-02-08', '2021-02-01', '2021-01-25',
'2021-01-18', '2021-01-11', '2021-01-04']
data_dates_options=['2021-04-14']
load_ai_model_options=['Reference Model',
'AI Model 1',
'AI Model 2 (L)',
'AI Model 3 (Fast)',
'AI Model 4 (Fast) (L)',
'AI Model 5',
'AI Model 6',
'AI Model 7 (VERY Slow- Do Not Use, if You have too!)',
'AI Model 8',
'AI Model 9 (Slow)',
'AI Model 10',
'AI Model 11 (L)',
'AI Model 12',
'AI Model 13',
'AI Model 14 (L)',
'AI Model 15',
'AI Model 16 (L)',
'AI Model (aggregator)']
train_ai_model_options=load_ai_model_options
#===========================
#Selectt Option Section
#============================
select_options=["AiX-ai-Forecast-Platform",
"Load Forecast Data", #Simply Check the Forecast Data
"Load AI Model",
"Train AI Model",
"AiX-Platform"]
select_options=["AiX-ai-Forecast-Platform"]
your_option=select_options
st.sidebar.success("Please Select your Option" )
option_selectbox = st.sidebar.selectbox( "Select your Option:", your_option)
select_Name=option_selectbox
#if option_selectbox=='Load Forecast Data' or option_selectbox!='Load Forecast Data':
#if select_Name=='Load Forecast Data' or select_Name!='Load Forecast Data':
if select_Name=='AiX-ai-Forecast-Platform' or select_Name!='AiX-ai-Forecast-Platform':
#Models
#====================
#st.success("What Forecast Model Data to Load?")
your_option=forecast_model_Options
st.sidebar.info("Please Select Forecast Model" )
option_selectbox = st.sidebar.selectbox( "Select Forecast Model:", your_option)
if option_selectbox =='Reference Model':
option_selectbox='Reference Model'
option_selectbox='Ensemble'
forecast_model_Name=option_selectbox
#if option_selectbox=='Load Forecast Data' or option_selectbox!='Load Forecast Data':
if select_Name=='Load Forecast Data' or select_Name!='Load Forecast Data':
#st.success("What Date Forecast Data to Load?")
your_option=data_dates_options
st.sidebar.warning("Please Select Forecast Date" )
option_selectbox = st.sidebar.selectbox( "Select Forecast Date:", your_option)
#if option_selectbox=='2021-03-22':
# option_selectbox= '2021-03-15'
data_dates_Name=option_selectbox
if option_selectbox==data_dates_Name:
your_option=["One(1) Week Ahead", "Two(2) Weeks Ahead", "Three(3) Weeks Ahead", "Four(4) Weeks Ahead"]
st.sidebar.warning("Please Select Forecast Week" )
option_selectbox = st.sidebar.selectbox( "Select Forecast Weeks Ahead:", your_option)
data_week_Name=option_selectbox
if data_week_Name !="One(1) Week Ahead":
st.write("Two(2), Three(3), and Four(4) Weeks Ahead are being calculated offline currently and are not presented as realtime")
#if option_selectbox=='Load AI Model':
if select_Name=='Load AI Model':
your_option=load_ai_model_options
st.sidebar.error("Please Select AI Model to load" )
option_selectbox = st.sidebar.selectbox( "Select AI-Model to Load:", your_option)
ai_load_Name=option_selectbox
#if option_selectbox=='Train AI Model':
if select_Name=='Train AI Model':
your_option=train_ai_model_options
st.sidebar.success("Please Select AI Model to Train" )
option_selectbox = st.sidebar.selectbox( "Select AI-Model to Train:", your_option)
ai_train_Name=option_selectbox
#load_data_csv=data_dates_Name+"-all-forecasted-cases-model-data.csv"
#st.write("Data to load: ", load_data_csv)
#Load Models and Sidebar Selection
#===================================================================================# Load AI Models
#if option_selectbox=='AiX Platform':
if select_Name=='AiX Platform':
model2load=pd.read_csv('model2load.csv', engine='python', dtype=str) # dtype={"Index": int})
model_index=model2load
model_names_option=model_index.AI_Models.values
st.sidebar.success("Please Select your AI Model!" )
model_selectbox = st.sidebar.selectbox( "Select AI Model", model_names_option)
Model_Name=model_selectbox
Index_model=model2load.Index[model2load.AI_Models==Model_Name].values[0]
Index_model=int(Index_model)
pkl_model_load=model2load.Pkl_Model[model2load.AI_Models==Model_Name].values[0]
#Load Data and Model
Pkl_Filename = pkl_model_load #"Pickle_RForest.pkl"
#st.write(Pkl_Filename)
# Load the Model back from file
#****with open(Pkl_Filename, 'rb') as file: # This line to load the file
#*** Pickle_LoadModel = pickle.load(file) # This line to load the file
# Pickle_RForest = pickle.load(file)
#RForest=Pickle_RForest
load_data_csv=data_dates_Name+"-all-forecasted-cases-model-data.csv"
#st.write('Load CDC Model Data- Data to load:', ' ', load_data_csv)
load_data_csv="recent-all-forecasted-cases-model-data.csv"
#st.write('Load CDC Model Data- Data to load:', ' ', load_data_csv)
#Forecast Data is being loaded and alll sort of sidebars also created.
#===================================================
#import pandas as pd
# Load Reference Model Forecast Ensemble - Only For Visualization Purpose
#=============================================================================
#df_forecast= pd.read_csv("2021-03-15-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
#df_forecast= pd.read_csv(load_data_csv, engine='python', dtype={'fips': str})
df_forecast_ref=pd.DataFrame()
df_forecast_ref=pd.read_csv("previous-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
Forcast_date="2021-03-22"
Forecasted_dates=["3/27/2021", "4/03/2021", "4/10/2021", "4/17/2021" ]
df_forecast=pd.DataFrame()
df_forecast= df_forecast_ref.copy()
df=pd.DataFrame()
df=df_forecast.copy()
# Drop all the States. We are only interested in Counties
df_drop=df[df.location_name!=df.State]
#df_drop1 = df.query("location_name != State")
#df_drop.fips= df_drop.fips.astype(str)
df_forecast=df_drop.copy()
#df_drop.fips= df_drop.fips.astype(str)
#df_forecast_Ensemble=df_forecast[df_forecast.model=="Ensemble"]
#forecast_model_Name="Ensemble"
df_forecast_Ensemble=pd.DataFrame()
df_forecast_Ensemble=df_forecast[df_forecast.model=="Ensemble"]
df_forecast_Ensemble=df_forecast_Ensemble[df_forecast_Ensemble.target=="1 wk ahead inc case"]
df_forecast_Ensemble_ref=pd.DataFrame()
df_forecast_Ensemble_ref=df_forecast_Ensemble.copy()
# Load Previous Forecast
#=========================
#df_forecast= pd.read_csv("2021-03-15-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
#df_forecast= pd.read_csv(load_data_csv, engine='python', dtype={'fips': str})
df_forecast_previous=pd.DataFrame()
df_forecast_previous=pd.read_csv("previous-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
Forcast_date="2021-03-22"
Forecasted_dates=["3/27/2021", "4/03/2021", "4/10/2021", "4/17/2021" ]
df_forecast=pd.DataFrame()
df_forecast= df_forecast_previous.copy()
df=pd.DataFrame()
df=df_forecast.copy()
# Drop all the States. We are only interested in Counties
df_drop=df[df.location_name!=df.State]
#df_drop1 = df.query("location_name != State")
#df_drop.fips= df_drop.fips.astype(str)
df_forecast=df_drop.copy()
#df_drop.fips= df_drop.fips.astype(str)
#df_forecast_Ensemble=df_forecast[df_forecast.model=="Ensemble"]
df_forecast_Ensemble=pd.DataFrame()
df_forecast_Ensemble=df_forecast[df_forecast.model==forecast_model_Name]
df_forecast_Ensemble=df_forecast_Ensemble[df_forecast_Ensemble.target=="1 wk ahead inc case"]
df_forecast_Ensemble_previous=pd.DataFrame()
df_forecast_Ensemble_previous=df_forecast_Ensemble.copy()
#Load Most Recent Forecast
#====================
#df_forecast= pd.read_csv(load_data_csv, engine='python', dtype={'fips': str})
df_forecast_recent=pd.DataFrame()
df_forecast_recent=pd.read_csv("recent-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
Forcast_date="2021-03-29"
Forecasted_dates=["4/03/2021", "4/10/2021", "4/17/2021", "4/24/2021" ]
df_forecast=pd.DataFrame()
df_forecast= df_forecast_recent.copy()
df=pd.DataFrame()
df=df_forecast.copy()
# Drop all the States. We are only interested in Counties
df_drop=df[df.location_name!=df.State]
#df_drop1 = df.query("location_name != State")
#df_drop.fips= df_drop.fips.astype(str)
#df_drop.fips= df_drop.fips.astype(str)
#df_forecast_Ensemble=df_forecast[df_forecast.model=="Ensemble"]
df_forecast_Ensemble=pd.DataFrame()
df_forecast_Ensemble=df_forecast[df_forecast.model==forecast_model_Name]
df_forecast_Ensemble=df_forecast_Ensemble[df_forecast_Ensemble.target=="1 wk ahead inc case"]
df_forecast_Ensemble_recent=pd.DataFrame()
df_forecast_Ensemble_recent=df_forecast_Ensemble.copy()
#Load Actual Cases
#==========================
df_actual_cases=pd.DataFrame()
df_actual_cases=pd.read_csv("covid_confirmed_usafacts_forecast.csv", engine='python', dtype={'fips': str})
#======================Visulaization of data =======================
# ======================Compare the Forecast with actula data ================"
df_ref_temp=pd.DataFrame(np.array(df_forecast_Ensemble_ref.iloc[:,[6,7]].values), columns=["fips", "Forecast_Reference"]) # 6,7: fips and point
df_model_temp=pd.DataFrame(np.array(df_forecast_Ensemble_previous.iloc[:,[6,7]].values), columns=["fips", "Forecast_Model"]) # 6,7: fips and point
df_actual_temp=pd.DataFrame(np.array(df_actual_cases.iloc[:,[0,-2]].values), columns=["fips", "Actual_Target"]) # 0, -2: fips and most recent actual-target
df_actual_temp=pd.DataFrame(np.array(df_actual_cases.iloc[:,[0,-7,-6,-5,-4,-3, -2]].values),
columns=["fips", "TimeN5", "TimeN4", "TimeN3", "TimeN2", "TimeN1", "Actual_Target"]) # 0, -2: fips and most recent actual-target
#st.write("Last 6 Total Weekly Cases, ", df_actual_temp.head(20))
data_merge= pd.DataFrame() #df_ref_temp.copy()
data_merge= pd.merge(df_ref_temp, df_model_temp, on="fips")
data_merge_left=data_merge.copy()
data_merge= pd.merge(data_merge_left, df_actual_temp, on="fips")
#st.write("df_actual_temp:, ", data_merge.head())
#st.error("Stop for checking how many is loaded")
data_merge.iloc[:,1:] = data_merge.iloc[:,1:].astype(float)
#st.write("Data Merged: ", data_merge.head())
#data_merge = data_merge.iloc[:,[1,2,3]].astype(float)
df_forecast_target=data_merge.copy()
#df_forecast_target_Scaled = df_forecast_target_Scaled.astype(float)
len_data=len(df_forecast_target)
df_population= pd.read_csv("covid_county_population_usafacts.csv", engine='python', dtype={'fips': str, 'fips_1': str})
df_forecast_target_Scaled = df_forecast_target.copy()
i=0
while i <len_data:
fips=df_forecast_target['fips'].iloc[0]
population=df_population.population[df_population.fips==fips].values[0]
df_forecast_target_Scaled.iloc[i,1:]=df_forecast_target.iloc[i,1:]/population*1000
i=i+1
df_forecast_target_Scaled.iloc[:,1:] = df_forecast_target_Scaled.iloc[:,1:].astype(float)
#st.write("df_forecast_target_Scaled", df_forecast_target_Scaled.head())
data_viz=df_forecast_target_Scaled.copy()
#Delete All The Data Frames that we do not need!
#=======================Delete all the DataFrame we do not need ==================
df_forecast_target_Scaled=pd.DataFrame()
data_merge=pd.DataFrame()
df_forecast_target=pd.DataFrame()
df_forecast_Ensemble_previous=pd.DataFrame()
df_forecast_Ensemble_recent=pd.DataFrame()
df_forecast_Ensemble_ref=pd.DataFrame()
df_forecast=pd.DataFrame()
df_ref_temp=pd.DataFrame()
df_model_temp=pd.DataFrame()
df_actual_temp=pd.DataFrame()
df_drop=pd.DataFrame()
#===================End of Delete ==========================
#data_viz.to_csv("data_viz.csv", index=False)
data_viz= data_viz.drop(data_viz.columns[[0]], axis=1)
data_viz= data_viz*100
data_viz= data_viz.astype(float)
#st.write("Data viz: head ", data_viz.head())
#st.write("Data viz: Stat ", data_viz.describe())
data_viz.drop( data_viz[ data_viz.Forecast_Reference >4500 ].index , inplace=True)
data_viz.drop( data_viz[ data_viz.Forecast_Model >4500 ].index , inplace=True)
data_viz.drop( data_viz[ data_viz.Actual_Target >5000 ].index , inplace=True)
data_viz.drop( data_viz[ data_viz.TimeN1>5000 ].index , inplace=True)
data_viz.drop( data_viz[ data_viz.TimeN2>5000 ].index , inplace=True)
data_viz.drop( data_viz[ data_viz.TimeN3>5000 ].index , inplace=True)
data_viz.drop( data_viz[ data_viz.TimeN4>5000 ].index , inplace=True)
data_viz.drop( data_viz[ data_viz.TimeN5>5000 ].index , inplace=True)
#st.write("Data viz: Stat 2- after cut off of 4500-5000 ", data_viz.describe())
#st.success("Stop")
#data_viz= data_viz*100
#data_viz["Forecast_Reference"]=data_viz["Forecast_Reference"].apply(np.ceil)
data_viz.drop( data_viz[ data_viz.Forecast_Reference <1 ].index , inplace=True)
#data_viz["Forecast_Model"]=data_viz["Forecast_Model"].apply(np.ceil)
data_viz.drop( data_viz[ data_viz.Forecast_Model <1 ].index , inplace=True)
#data_viz["Actual_Target"]=data_viz["Actual_Target"].apply(np.ceil)
data_viz.drop( data_viz[ data_viz.Actual_Target <1 ].index , inplace=True)
data_viz.drop( data_viz[ data_viz.TimeN1<1 ].index , inplace=True)
data_viz.drop( data_viz[ data_viz.TimeN2<1 ].index , inplace=True)
data_viz.drop( data_viz[ data_viz.TimeN3<1 ].index , inplace=True)
data_viz.drop( data_viz[ data_viz.TimeN4<1 ].index , inplace=True)
data_viz.drop( data_viz[ data_viz.TimeN5<1 ].index , inplace=True)
#data_viz= np.around(data_viz)
#data_viz=data_viz[data_viz.Actual_Target>=0]
#data_viz= data_viz*100 #np.around(data_viz+)
#data_viz=data_viz[data_viz.Actual_Target<5000]
#data_viz=data_viz[data_viz.Forecast_Reference<4200]
#data_viz=data_viz[data_viz.Forecast_Model<4200]
#data_viz_temp=data_viz[data_viz<5000]
if data_viz.empty:
st.error("No Data matches our criteria both for AI Model and Visualization!")
st.warning("Please select another option!")
st.stop("The Program stopped here!")
#data_viz.drop( data_viz[ data_viz >5000 ].index , inplace=True)
#st.write("describe data -2")
#st.write(data_viz.describe())
#================= Visualization
#sns.jointplot(data=data_viz, x="target", y="Ensemble")
#sns.pairplot(data=data_viz, hue='color')
#data_viz=pd.read_csv("data_viz.csv", engine='python')
i=0.2
data_viz=(data_viz**i-1)/i
#data_viz=np.log(data_viz)
#st.write("Data viz: Stat3333333333333 ", data_viz.describe())
huecolor=data_viz.Actual_Target.values
huecolor=huecolor.astype(int)
data_viz["huecolor"]=huecolor.astype(int)
#data_viz=data_viz[data_viz>0]
#st.write("describe data -2")
#st.write(data_viz.describe())
huecolor=data_viz.Actual_Target.values.astype(int)
data_viz["huecolor"]=huecolor.astype(int)
#st.title("Hello")
#fig = sns.pairplot(penguins, hue="species")
#st.pyplot(fig)
data_vis=data_viz.copy()
st.write(" ")
st.markdown("<h1 style='text-align: left; color: turquoise;'>Forecast: Reference vs Selected Model </h1>", unsafe_allow_html=True)
# 2-D plot of images
#fig=sns.pairplot(data_viz, hue="huecolor", diag_kind="hist")
#st.pyplot(fig)
data_vis= data_vis.drop(data_vis.columns[[2,3,4,5,6]], axis=1)
#fig=sns.pairplot(data_vis, hue="huecolor", diag_kind="hist")
#st.pyplot(fig)
#data_vis=pd.DataFrame()
#import numpy as np
#import pandas as pd
#import statsmodels.api as sm
#import statsmodels.formula.api as smf
#import matplotlib.pyplot as plt
mod = smf.quantreg('Forecast_Model ~ Actual_Target', data_viz)
res = mod.fit(q=.5)
#st.write(res.summary())
#LRresult = (res.summary2().tables[0])
#st.write(LRresult)
#LRresult = (res.summary2().tables[1])
#st.write(LRresult)
#import statsmodels.api as sm
#model = sm.OLS(y,x)
#res = model.fit()
results_summary = res.summary()
results_as_html = results_summary.tables[0].as_html()
df_res=pd.read_html(results_as_html, header=0, index_col=0)[0]
st.write(df_res)
# Note that tables is a list. The table at index 1 is the "core" table. Additionally, read_html puts dfs in a list, so we want index 0
results_as_html = results_summary.tables[1].as_html()
df_res=pd.read_html(results_as_html, header=0, index_col=0)[0]
#st.write(df_res)
#print("Stop the program here")
#st.stop()
#from scipy.stats import boxcox
#from matplotlib import pyplot
#hist=pyplot.hist(data_viz['Forecast_Model'],100)
#fig = plt.figure()
#plt.hist(data_viz['Forecast_Model'],100)
#st.plotly_chart(fig)
#====================Stat plot ================================
#mod = smf.quantreg('AiX_AI_Model3 ~ target', covid19_forecast)
#========================================
quantiles = np.arange(.025, .96, .1)
quantiles = np.arange(.05, 0.975, .125)
quantiles=[0.025, 0.25, 0.5, 0.75, 0.975]
def fit_model(q):
res = mod.fit(q=q)
return [q, res.params['Intercept'], res.params['Actual_Target']] + \
res.conf_int().loc['Actual_Target'].tolist()
models = [fit_model(x) for x in quantiles]
models = pd.DataFrame(models, columns=['q', 'a', 'b', 'lb', 'ub'])
ols = smf.ols('Forecast_Model ~ Actual_Target', data_viz).fit()
ols_ci = ols.conf_int().loc['Actual_Target'].tolist()
ols = dict(a = ols.params['Intercept'],
b = ols.params['Actual_Target'],
lb = ols_ci[0],
ub = ols_ci[1])
#st.write("Models: ", models)
#st.write("OLS: ", ols)
x = np.arange(data_viz.Actual_Target.min(), data_viz.Actual_Target.max(), 20)
get_y = lambda a, b: a + b * x
fig, ax = plt.subplots(figsize=(18, 10))
for i in range(models.shape[0]):
y = get_y(models.a[i], models.b[i])
ax.plot(x, y, linestyle='dotted', color='grey')
y = get_y(ols['a'], ols['b'])
ax.plot(x, y, color='red', label='OLS-Map')
ax.scatter(data_viz.Actual_Target, data_viz.Forecast_Model, color='blue', alpha=.2)
ax.set_xlim((-1, 20))
ax.set_ylim((-1, 20))
legend = ax.legend()
ax.set_xlabel('Covid19 Actual Cases', fontsize=16)
ax.set_ylabel('Covid19 Forecast/Predictions Cases (Forecast_Model)', fontsize=16);
st.pyplot(fig)
#AI Section starts from here
#===========================AiX-AI Section=================================
#data = data_viz.iloc[:,[0]] # 0,1
#target=data_viz.iloc[:,2].values
data = data_viz.iloc[:,[0,2,3,4,5,6]]
target=data_viz.iloc[:,7].values
data_train, data_test, target_train, target_test = train_test_split(data,target, test_size = 0.30, random_state = 10) #stratify=np.ravel(target))
train =data_train
test =data_test
train_target=target_train
test_target=target_test
X_train, X_val, y_train, y_val = train_test_split(train, train_target, test_size=0.3, shuffle=True)
data=np.round(data*100,0)
target=np.round(target*100,0)
data_train, data_test, target_train, target_test = train_test_split(data,target, test_size = 0.30, random_state = 10) #stratify=np.ravel(target))
train =data_train
test =data_test
train_target=target_train
test_target=target_test
#Save the Model RF
#==========Random Forest===========================
#from sklearn.neighbors import KNeighborsClassifier
#from sklearn.metrics import accuracy_score
#from sklearn.ensemble import RandomForestClassifier
#from sklearn.externals import joblib
#import sklearn.external.joblib as extjoblib
#import joblib
#RForest = RandomForestClassifier(max_depth=15, random_state=1)
#RForest.fit(data_train, np.ravel(target_train))
# Load the Model back from file
#Due to big memory requirments; We are not using this, till we get Fund to use big Memory
#Pkl_Filename = "Pickle_ML_RForest.pkl"
# this file is too large
#Pkl_Filename = "Pickle_ML_KNN.pkl"
#with open(Pkl_Filename, 'rb') as file:
# Pickle_RForest = pickle.load(file)
#RForest=Pickle_RForest
#======= This way, we can have smaller file to save at github
joblip_Filename = "joblip_ML_RForest.pkl"
with open(joblip_Filename, 'rb') as file:
joblip_RForest=joblib.load(file)
RForest=joblip_RForest
pred = RForest.predict(data_test)
pred_rf= RForest.predict(data)
pred=np.round(pred,0)
pred_rf=np.round(pred_rf,0)
rf_acc=1-accuracy_score(np.ravel(target_test), pred)
#=====================Decision Tree - D3 ===========================
#======= This way, we can have smaller file to save at github
joblip_Filename = "joblip_ML_D3Forest.pkl"
with open(joblip_Filename, 'rb') as file:
joblip_D3Forest=joblib.load(file)
RForest=joblip_D3Forest
pred = RForest.predict(data_test)
pred_d3= RForest.predict(data)
pred=np.round(pred,0)
pred_d3=np.round(pred_d3,0)
d3_acc=1-accuracy_score(np.ravel(target_test), pred)
#=====================KNN ===========================
#A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().
#import necessary modules
#from sklearn.neighbors import KNeighborsClassifier
#from sklearn.metrics import accuracy_score
#create object of the lassifier
#neigh = KNeighborsClassifier(n_neighbors=1)
#Train the algorithm
#neigh.fit(data_train, np.ravel(target_train))
# Load the Model back from file
Pkl_Filename = "Pickle_ML_KNN.pkl"
with open(Pkl_Filename, 'rb') as file:
Pickle_RForest = pickle.load(file)
neigh=Pickle_RForest
# predict the response
pred = neigh.predict(data_test)
# evaluate accuracy
pred_knn=neigh.predict(data)
pred=np.round(pred,0)
pred_knn=np.round(pred_knn,0)
knn_acc=1-accuracy_score(np.ravel(target_test), pred)
#=============MLP =============================
#from sklearn.neural_network import MLPRegressor
#from sklearn.datasets import make_regression
#from sklearn.model_selection import train_test_split
#from sklearn.externals import joblib
#regr = MLPRegressor(random_state=1, max_iter=5000).fit(data_train, target_train)
# Load the Model back from file
Pkl_Filename = "Pickle_ML_MLP.pkl"
with open(Pkl_Filename, 'rb') as file:
Pickle_RForest = pickle.load(file)
regr=Pickle_RForest
pred = regr.predict(data_test)
pred=np.round(pred,0)
pred_MLP=regr.predict(data)
pred_MLP=np.round(pred_MLP,0)
mlp_acc=1-accuracy_score(np.ravel(target_test), pred)
#==============AI Aggregator =====================
#df_MLearning=pd.DataFrame({"RF":pred_rf, "KNN":pred_knn, "MLP":pred_MLP})
df_MLearning=pd.DataFrame({"AiX_AI_Model1":pred_rf, "AiX_AI_Model2":pred_knn, "AiX_AI_Model3":pred_MLP, "AiX_AI_Model_D3":pred_d3 })
#df_MLearning["ML_Median"]=df_MLearning.median(axis=1)
#df_MLearning["ML_Mean"]=df_MLearning.mean(axis=1)
#df_MLearning["AiX_AI_Model4"]=df_MLearning.median(axis=1)
#df_MLearning["AiX_AI_Model5"]=df_MLearning.iloc[:,[0,1,2,3]].mean(axis=1)
df_MLearning["AiX_AI_Model4"]=df_MLearning.iloc[:,[0,1,3]].median(axis=1)
df_MLearning["AiX_AI_Model5"]=df_MLearning.iloc[:,[0,1,3]].mean(axis=1)
#df_smart= (df_MLearning["AiX_AI_Model1"]*rf_acc + df_MLearning["AiX_AI_Model2"]*knn_acc+ df_MLearning["AiX_AI_Model_D3"]*d3_acc+
# df_MLearning["AiX_AI_Model3"]*mlp_acc*0+df_MLearning["AiX_AI_Model4"]*4+ # Median 75% - Mean 25%
df_smart= (df_MLearning["AiX_AI_Model1"]*rf_acc + df_MLearning["AiX_AI_Model2"]*knn_acc+ df_MLearning["AiX_AI_Model_D3"]*d3_acc+
df_MLearning["AiX_AI_Model4"]*4+ # Median 75% - Mean 25%
df_MLearning["AiX_AI_Model5"]*2)/(rf_acc+knn_acc+d3_acc+6)
df_MLearning["AiX_AI_Smart_Committee_Machine"]=df_smart # df_MLearning.mean(axis=1)
df_MLearning["target"]=target
#df_MLearning1=df_MLearning.copy()/100
#df_MLearning1=df_MLearning1.astype(int)
#st.write(df_MLearning1.head())
# No Need for Deployed App
#======================================================
#st.write(" ")
#st.markdown("<h1 style='text-align: left; color: red;'> AiX - AI Smart Committee Machine</h1>", unsafe_allow_html=True)
#=============ML Plots================
data_ML=df_MLearning.copy()/100
df_MLearning["huecolor"]= data_viz["huecolor"].values #(huecolor).astype(int)
data_ML["huecolor"]= data_viz["huecolor"].values # (huecolor).astype(int)
#fig=sns.pairplot(data_ML, hue="huecolor", diag_kind="hist")
#st.pyplot(fig)
df_MLearning=pd.DataFrame()
#======================ML Output ======================
covid19_forecast=data_ML.iloc[:,]
#median_acc=np.median([rf_acc, knn_acc, mlp_acc, d3_acc])
#mean_acc=np.mean([rf_acc, knn_acc, mlp_acc, d3_acc])
#smart_acc=1-(rf_acc+knn_acc+ mlp_acc+d3_acc+ 4*median_acc+ 2*mean_acc)/(rf_acc+knn_acc+mlp_acc+ d3_acc+6)
median_acc=np.median([rf_acc, knn_acc, d3_acc])
mean_acc=np.mean([rf_acc, knn_acc, d3_acc])
smart_acc=1-(rf_acc+knn_acc+d3_acc+ 4*median_acc+ 2*mean_acc)/(rf_acc+knn_acc+ d3_acc+6)
#===========Smart Committee Machine #========================
st.write(" ")
st.markdown("<h1 style='text-align: left; color: turquoise;'> AiX - AI Smart Committee Machine</h1>", unsafe_allow_html=True)
st.write("AiX-AI Smart Committee Machine accuracy score : ", smart_acc)
mod = smf.quantreg('AiX_AI_Smart_Committee_Machine ~ target', covid19_forecast)
res = mod.fit(q=.5)
results_summary = res.summary()
results_as_html = results_summary.tables[0].as_html()
df_res=pd.read_html(results_as_html, header=0, index_col=0)[0]
st.write(df_res)
# Note that tables is a list. The table at index 1 is the "core" table. Additionally, read_html puts dfs in a list, so we want index 0
results_as_html = results_summary.tables[1].as_html()
df_res=pd.read_html(results_as_html, header=0, index_col=0)[0]
#st.write(df_res)
#============== Model 1 ============================
st.write(" ")
st.markdown("<h1 style='text-align: left; color: turquoise;'> AiX - AI Model1</h1>", unsafe_allow_html=True)
st.write("AiX-AI Model 1 accuracy score : ", 1-rf_acc)
mod = smf.quantreg('AiX_AI_Model1 ~ target', covid19_forecast)
res = mod.fit(q=.5)
results_summary = res.summary()
results_as_html = results_summary.tables[0].as_html()
df_res=pd.read_html(results_as_html, header=0, index_col=0)[0]
st.write(df_res)
# Note that tables is a list. The table at index 1 is the "core" table. Additionally, read_html puts dfs in a list, so we want index 0
results_as_html = results_summary.tables[1].as_html()
df_res=pd.read_html(results_as_html, header=0, index_col=0)[0]
#st.write(df_res)
#============== Model 2 ============================
st.write(" ")
st.markdown("<h1 style='text-align: left; color: turquoise;'> AiX - AI Model2</h1>", unsafe_allow_html=True)
st.write("AiX-AI Model 2 accuracy score : ", 1-knn_acc)
mod = smf.quantreg('AiX_AI_Model2 ~ target', covid19_forecast)
res = mod.fit(q=.5)
results_summary = res.summary()
results_as_html = results_summary.tables[0].as_html()
df_res=pd.read_html(results_as_html, header=0, index_col=0)[0]
st.write(df_res)
# Note that tables is a list. The table at index 1 is the "core" table. Additionally, read_html puts dfs in a list, so we want index 0
results_as_html = results_summary.tables[1].as_html()
df_res=pd.read_html(results_as_html, header=0, index_col=0)[0]
#st.write(df_res)
#============== Model 3 ============================
st.write(" ")
st.markdown("<h1 style='text-align: left; color: turquoise;'> AiX - AI Model3</h1>", unsafe_allow_html=True)
st.write("AiX-AI Model 3 accuracy score : ", 1-mlp_acc)
mod = smf.quantreg('AiX_AI_Model3 ~ target', covid19_forecast)
res = mod.fit(q=.5)
results_summary = res.summary()
results_as_html = results_summary.tables[0].as_html()
df_res=pd.read_html(results_as_html, header=0, index_col=0)[0]
st.write(df_res)
# Note that tables is a list. The table at index 1 is the "core" table. Additionally, read_html puts dfs in a list, so we want index 0
results_as_html = results_summary.tables[1].as_html()
df_res=pd.read_html(results_as_html, header=0, index_col=0)[0]
#st.write(df_res)
#=================
#============== Model D-3 ============================
st.write(" ")
st.markdown("<h1 style='text-align: left; color: turquoise;'> AiX - AI Model4</h1>", unsafe_allow_html=True)
st.write("AiX-AI Model 3 accuracy score : ", 1-d3_acc)
mod = smf.quantreg('AiX_AI_Model_D3 ~ target', covid19_forecast)
res = mod.fit(q=.5)
results_summary = res.summary()
results_as_html = results_summary.tables[0].as_html()
df_res=pd.read_html(results_as_html, header=0, index_col=0)[0]
st.write(df_res)
# Note that tables is a list. The table at index 1 is the "core" table. Additionally, read_html puts dfs in a list, so we want index 0
results_as_html = results_summary.tables[1].as_html()
df_res=pd.read_html(results_as_html, header=0, index_col=0)[0]
#st.write(df_res)
#============== Model 4 ============================
st.write(" ")
st.markdown("<h1 style='text-align: left; color: turquoise;'> AiX - AI Model5</h1>", unsafe_allow_html=True)
st.write("AiX-AI Model 5 accuracy score : ", 1-np.median([rf_acc, knn_acc, d3_acc]))
mod = smf.quantreg('AiX_AI_Model4 ~ target', covid19_forecast)
res = mod.fit(q=.5)
results_summary = res.summary()
results_as_html = results_summary.tables[0].as_html()
df_res=
|
pd.read_html(results_as_html, header=0, index_col=0)
|
pandas.read_html
|
# to estimate flood control voluse from ReGeom data
from datetime import datetime
from datetime import date
import os
import numpy as np
import pandas as pd
import sys
from dateutil.relativedelta import relativedelta
print(os.path.basename(__file__))
##### initial setting ------------------------------
tag = sys.argv[1]
dam_file = './'+tag+'/damloc_modified.csv'
## link
GRSADdir = "./inp/GRSAD/"
ReGeomdir = "./inp/ReGeom/"
ReGeom_ErrorFile = "./inp/ReGeom_Error.csv"
output_file = './'+tag+'/tmp_p03_fldsto.csv'
#### parameters to calculate flood control volume
pc = 75 ## percentile of surface area timeseries
s_yr, s_mon = 1984, 3
e_yr, e_mon = 2018, 12
#### read database --------------------------
grand = pd.read_csv(dam_file)
error = pd.read_csv(ReGeom_ErrorFile)
#### dam loop -----------------------------
cols = ['damid', 'damname', 'ave_area', 'fldsto_mcm', 'totalsto_mcm']
df_new = pd.DataFrame(index=[], columns=cols)
for i in range(len(grand)):
gr = grand.iloc[i:i+1]
nm = gr['damid'].values[0]
damname = gr['damname'].values[0]
totalsto = gr['totalsto_mcm'].values[0]
print('')
print('------')
print(nm, damname)
#if nm > 6820:
# continue
error_i = error.query('GRAND_ID == @nm')
## read timeseries file -----
grsadpath = GRSADdir + '/'+ str(nm) + '_intp'
if not os.path.isfile(grsadpath):
print('file not found: ' +str(grsadpath))
df_i = [nm, damname, np.nan, np.nan, totalsto]
df_i = pd.Series(df_i, index=df_new.columns)
df_new = df_new.append(df_i, ignore_index=True)
continue
import pandas as pd
df = pd.read_table(grsadpath, index_col=0, parse_dates=True)
data = df.dropna()
if np.max(df['3water_enh'].value_counts()) > 12:
rm_df = df['3water_enh'].value_counts()
rm_df = rm_df[rm_df > 12]
rm_df = rm_df.index
for j in range(len(rm_df)):
rm_val = rm_df[j]
data['3water_enh'] = data['3water_enh'].replace(rm_val, np.nan)
data = data.dropna()
data = data['3water_enh']
#print(data)
if len(data) < 2:
df_i = [nm, damname, np.nan, np.nan, totalsto]
df_i =
|
pd.Series(df_i, index=df_new.columns)
|
pandas.Series
|
'''
eval_pretrained_sklearn_binary_classifier.py
Usage
-----
$ python eval_pretrained_sklearn_binary_classifier.py \
--dataset_path [path] \
--pretrained_clf_path [path] \
[optional args]
Optional arguments
------------------
--dataset_path DATASET_PATH
Path to folder containing:
*.npy files: X_train, y_train, P_train
*.txt files: X_colnames.txt, y_colnames.txt
--pretrained_clf_path OUTPUT_PATH
Path to folder holding output from this evaluator.
Includes:
* clf_<id>_.dump : loadable clf object
* clf_<id>_callback_train.csv : perf metrics
'''
from __future__ import print_function
import numpy as np
import pandas as pd
import datetime
import sys
import os
import argparse
import itertools
import time
import scipy.sparse
from collections import OrderedDict
from distutils.dir_util import mkpath
from sklearn.externals import joblib
from sklearn.metrics import roc_curve, auc, roc_auc_score, accuracy_score
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import BernoulliNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import Normalizer, Binarizer
from sklearn.pipeline import Pipeline
from pc_toolbox.utils_io import (
load_csr_matrix, pprint, config_pprint_logging,
load_list_of_strings_from_txt,
load_list_of_unicode_from_txt,
)
from train_and_eval_sklearn_binary_classifier import (
make_constructor_and_evaluator_funcs,
ThresholdClassifier,
)
def read_args_from_stdin_and_run():
''' Main executable function to train and evaluate classifier.
Post Condition
--------------
AUC and other eval info printed to stdout.
Trained classifier saved ???.
'''
if not sys.stdin.isatty():
for line in sys.stdin.readlines():
line = line.strip()
sys.argv.extend(line.split(' '))
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset_path',
default='/tmp/',
type=str,
help="Path to folder containing:" +
" *.npy files: X_train, y_train, P_train"
" *.txt files: X_colnames.txt and y_colnames.txt")
parser.add_argument(
'--pretrained_clf_path',
default='/tmp/',
type=str,
help="Path to folder to hold output from classifier. Includes:" +
" perf_metric*.txt files: auc_train.txt & auc_test.txt" +
" settings.txt: description of all settings to reproduce.")
parser.add_argument(
'--split_names',
default='test')
parser.add_argument(
'--split_nicknames',
default='evaltest')
parser.add_argument(
'--features_path',
default='/tmp/',
type=str,
help="Path to folder with SSAMfeat*.npy files")
parser.add_argument(
'--target_arr_name',
default='Y',
type=str,
)
parser.add_argument(
'--target_names',
default='all',
type=str,
help='Name of response/intervention to test.' +
' To try specific interventions, write names separated by commas.' +
' To try all interventions, use special name "all"')
parser.add_argument(
'--seed_bootstrap',
default=42,
type=int,
help='Seed for bootstrap')
parser.add_argument(
'--n_bootstraps',
default=5000,
type=int,
help='Number of samples for bootstrap conf. intervals')
parser.add_argument(
'--bootstrap_stratify_pos_and_neg',
default=True,
type=int,
help='Whether to stratify examples or not')
args, unk_list = parser.parse_known_args()
arg_dict = vars(args)
dataset_path = arg_dict['dataset_path']
assert os.path.exists(arg_dict['pretrained_clf_path'])
output_path = arg_dict['pretrained_clf_path']
clf_opts = list()
# Write parsed args to plain-text file
# so we can exactly reproduce later
with open(os.path.join(output_path, 'settings.txt'), 'r') as f:
for line in f.readlines():
line = line.strip()
clf_opts.append(line.split(' = '))
clf_opts = dict(clf_opts)
feat_path_list = [
arg_dict['dataset_path'],
arg_dict['features_path']]
pprint('[run_classifier says:] Loading dataset ...')
start_time = time.time()
feature_arr_names = clf_opts['feature_arr_names'].split(',')
pprint('feature_arr_names:')
feat_colnames_by_arr = OrderedDict()
for feat_arr_name in feature_arr_names:
pprint(feat_arr_name)
cur_feat_colnames = None
for feat_path in feat_path_list:
colname_fpath = os.path.join(
feat_path,
feat_arr_name + '_colnames.txt')
if os.path.exists(colname_fpath):
cur_feat_colnames = \
[unicode(feat_arr_name + ":") + s
for s in load_list_of_unicode_from_txt(colname_fpath)]
break
feat_colnames_by_arr[feat_arr_name] = cur_feat_colnames
target_arr_name = arg_dict['target_arr_name']
all_target_names = load_list_of_strings_from_txt(os.path.join(
arg_dict['dataset_path'],
target_arr_name + '_colnames.txt'))
target_names = arg_dict['target_names']
if target_names == 'all':
target_names = all_target_names
target_cols = np.arange(len(all_target_names)).tolist()
else:
target_names = target_names.split(',')
target_cols = list()
for name in target_names:
assert name in all_target_names
target_cols.append(all_target_names.index(name))
datasets_by_split = dict()
split_nicknames = arg_dict['split_nicknames'].split(',')
split_names = arg_dict['split_names'].split(',')
for nickname, split_name in zip(split_nicknames,split_names):
datasets_by_split[nickname] = dict()
split_dataset = datasets_by_split[nickname]
# Load Y
dense_fpath = os.path.join(
dataset_path,
target_arr_name + "_%s.npy" % split_name)
y = np.asarray(np.load(dense_fpath), order='C', dtype=np.int32)
if y.ndim < 2:
y = y[:,np.newaxis]
assert y.ndim == 2
assert y.shape[1] == len(all_target_names)
split_dataset['y'] = y[:, target_cols]
assert split_dataset['y'].shape[1] == len(target_cols)
# Load X
x_list = list()
for feat_arr_name in feature_arr_names:
x_cur = None
def fpath_generator():
for feat_path in feat_path_list:
for sname in [nickname, split_name]:
dense_fpath = os.path.join(
feat_path, feat_arr_name + "_" + sname + ".npy")
sparse_fpath = os.path.join(
feat_path, feat_arr_name + "_csr_" + sname + ".npz")
yield dense_fpath, sparse_fpath
ds_path_list = [pair for pair in fpath_generator()]
for ii, (dense_fpath, sparse_fpath) in enumerate(ds_path_list):
try:
if os.path.exists(sparse_fpath):
x_cur = load_csr_matrix(sparse_fpath)
assert np.all(np.isfinite(x_cur.data))
break
else:
x_cur = np.asarray(
np.load(dense_fpath),
order='C', dtype=np.float64)
if x_cur.ndim < 2:
x_cur = np.atleast_2d(x_cur).T
assert np.all(np.isfinite(x_cur))
break
except IOError as e:
if ii == len(ds_path_list) - 1:
# Couldn't find desired file in any feat_path
raise e
else:
# Try the next feat_path in the list
pass
if x_cur is not None:
if feat_colnames_by_arr[feat_arr_name] is not None:
feat_dim = len(feat_colnames_by_arr[feat_arr_name])
assert x_cur.shape[1] == feat_dim
else:
# Add dummy colnames
feat_dim = x_cur.shape[1]
n_sig_digits = np.maximum(
3, int(np.ceil(np.log10(feat_dim))))
fmt_str = "%s_%0" + str(n_sig_digits) + "d"
feat_colnames_by_arr[feat_arr_name] = [
fmt_str % (feat_arr_name, fid)
for fid in range(feat_dim)]
x_list.append(x_cur)
if isinstance(x_list[0], np.ndarray):
split_dataset['x'] = np.hstack(x_list)
else:
split_dataset['x'] = scipy.sparse.hstack(x_list, format='csr')
assert split_dataset['x'].ndim == 2
assert split_dataset['x'].shape[0] == split_dataset['y'].shape[0]
assert (
isinstance(split_dataset['x'], np.ndarray)
or isinstance(split_dataset['x'], scipy.sparse.csr_matrix)
)
if split_name == split_names[0]:
# Flatten feat colnames into single list
feat_colnames = sum(feat_colnames_by_arr.values(), [])
assert isinstance(feat_colnames, list)
assert len(feat_colnames) == split_dataset['x'].shape[1]
print('y colnames: %s' % ' '.join(target_names))
if len(feat_colnames) > 10:
print('x colnames: %s ... %s' % (' '.join(feat_colnames[:5]), ' '.join(feat_colnames[-5:])))
else:
print('x colnames: %s' % ' '.join(feat_colnames))
print('---- %5s dataset summary' % split_name)
print('%9d total examples' % y.shape[0])
print('y : %d x %d targets' % split_dataset['y'].shape)
print('x : %d x %d features' % split_dataset['x'].shape)
for c in xrange(len(target_names)):
y_c = split_dataset['y'][:,c]
print('target %s : frac pos %.3f' % (target_names[c], np.mean(y_c)))
print(' %6d pos examples' % np.sum(y_c == 1))
print(' %6d neg examples' % np.sum(y_c == 0))
elapsed_time = time.time() - start_time
print('[run_classifier says:] dataset loaded after %.2f sec.' % elapsed_time)
n_cols = len(target_names)
for c in xrange(n_cols):
print('[eval_pretrained_classifier says:] eval for target %s' % target_names[c])
eval_pretrained_clf(
classifier_name=clf_opts['classifier_name'],
classifier_path=arg_dict['pretrained_clf_path'],
datasets_by_split=datasets_by_split,
y_col_id=c,
y_orig_col_id=all_target_names.index(target_names[c]),
y_col_name=target_names[c],
feat_colnames=feat_colnames,
output_path=arg_dict['pretrained_clf_path'],
seed_bootstrap=arg_dict['seed_bootstrap'],
n_bootstraps=arg_dict['n_bootstraps'],
bootstrap_stratify_pos_and_neg=arg_dict['bootstrap_stratify_pos_and_neg'],
)
elapsed_time = time.time() - start_time
print('[eval_pretrained_classifier says:] target %s completed after %.2f sec' % (target_names[c], elapsed_time))
def eval_pretrained_clf(
classifier_path='/tmp/',
classifier_name='logistic_regression',
datasets_by_split=None,
verbose=True,
feat_colnames=None,
y_col_id=0,
y_orig_col_id=0,
y_col_name='',
output_path='/tmp/',
seed_bootstrap=42,
n_bootstraps=5000,
bootstrap_stratify_pos_and_neg=True,
):
start_time = time.time()
(make_classifier, score_classifier, calc_best_idx,
make_clf_report, make_csv_row_dict, make_interp_report) = \
make_constructor_and_evaluator_funcs(
classifier_name,
n_bootstraps=n_bootstraps,
seed_bootstrap=seed_bootstrap,
bootstrap_stratify_pos_and_neg=bootstrap_stratify_pos_and_neg)
# Read classifier obj from disk
clf_path = os.path.join(
classifier_path,
'clf_%d_object.dump' % (y_orig_col_id))
best_clf = joblib.load(clf_path)
if os.path.exists(output_path):
n_keys = len(datasets_by_split.keys())
for ss, split in enumerate(datasets_by_split.keys()):
csv_fpath = os.path.join(
output_path,
'clf_%d_callback_%s.csv' % (y_orig_col_id, split))
row_dict = make_csv_row_dict(
best_clf,
datasets_by_split[split]['x'],
datasets_by_split[split]['y'][:, y_col_id],
y_col_name,
split,
classifier_name)
csv_df =
|
pd.DataFrame([row_dict])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 11 11:30:17 2019
@author: max
"""
#!/usr/bin/env python3
import os
import sys
#import seaborn as sns
import numpy as np
import pandas as pd
import scipy.stats as stats
#import matplotlib.pyplot as plt
import argparse
import plotly
#import plotly.plotly as py
import plotly.graph_objs as go
#init_notebook_mode(connected=True)
#import statsmodels.api as sm
#from xattr import xattr
import time
#import subprocess
from plotly import __version__
#from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
print(__version__) # requires version >= 1.9.0
#from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.multicomp import MultiComparison
from statsmodels.formula.api import ols
sys.path.append(os.path.realpath(__file__))
import load_data as exp
import statistics
#from load_data import KnockdownFeatures_class as kd
'''
This script is taking a list of folders and a list of knockdowns as the input.
These will be used as input to the load_data.py module.
This script is containing several statistical functions to analyze the data object
created by load_data.py.
It can be run from the command line and is giving the options to run a bonferroni corrected|
t-test to compare the knockdowns with the respective control and print figures for all features
with significance annotations. Figures can either be based on raw data, or on the z_score
In addition it gives the option to print .csv files of median feature values to be fed
into the PCA analysis app.
Dependencies:
KnockdownFeatures_class.py
load_data.py
'''
#%%
#add the paths to the experiment folders
# =============================================================================
# path=['/Users/max/Desktop/Office/test/data_test/SiRNA_31/segmented/']
# #add the knockdowns you want to load
# knockdowns=['CTRL', 'ARHGAP17', 'DOCK10', 'ITSN1']
# =============================================================================
def parseArguments():
# Define the parser and read arguments
parser = argparse.ArgumentParser(description='a function including various statistical tools to be applied to the data objects.')
parser.add_argument('-d','--dir', nargs='+', help='add the directories with spaces between them', required=True)
parser.add_argument('-k','--kd', nargs='+', help='add the knockdown folder names with spaces between them', required=True)
parser.add_argument('-t','--TSNE', help='set True for TSNE output, \'z_score\' for TSNE output as z_score \
\'long\' for long format csv with z_score values. leave empty to skip this output', required=False)
parser.add_argument('-f','--figures', help='set True for figure printing of raw data, z_score for figure printing of z_scores.set to featureplot for featureplots leave empty to skip this output ', required=False)
args = parser.parse_args()
return(args)
#%%
# =============================================================================
# def boxplot(feature, value):
# #makes a boxplot of the values from one feature, grouped by knockdown
# ax=sns.catplot(x='experiment', y=value, hue='KD',\
# data=data.grouped_features[feature], kind='box')
# sig, alpha=calc_Bonferroni(feature)
# plot_median=data.grouped_features[feature].groupby(['experiment', 'KD'])[value].median()
# nobs=[sig[x][1] for x in sig]
# axes = ax.axes.flatten()
# axes[0].set_xlabel(feature)
# pos=range(len(nobs))
# sns.FacetGrid.set_xticklabels(ax, nobs)
# plt.show()
# # ax=ax.get_figure()
# plt.close()
# return ax
# =============================================================================
#axes[1].set_title("External")
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
def median(a, l, r):
n = r - l + 1
n = (n + 1) // 2 - 1
return n + l
def featureplot(KD, value):
#to_tag=False
# =============================================================================
# DEFAULT_PLOTLY_COLORS=['rgb(31, 119, 180)', 'rgb(255, 127, 14)',
# 'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
# 'rgb(148, 103, 189)', 'rgb(140, 86, 75)',
# 'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
# 'rgb(188, 189, 34)', 'rgb(23, 190, 207)']
# =============================================================================
#if wide feature attribute isnt already existing in data it is calling
#pca_feature_data and pca_attribute_data()
if hasattr(data, 'wide_feature')==False:
data.pca_feature_data(value=value)
data.pca_attribute_data()
#creates a variable of data to plot
if hasattr(data, 'KD_plot_data')==False:
KD_plot_data=data.wide_feature
#adds the column for the knockdowns from the attribute_data
KD_plot_data['KD']=data.wide_attribute['knockdown']
#melts it to long format
KD_plot_data=pd.melt(KD_plot_data, id_vars='KD')
data.KD_plot_data=KD_plot_data.rename(columns={'variable':'feature', 'value':value})
to_plot=data.KD_plot_data[(data.KD_plot_data['KD']==KD)]
# =============================================================================
# z_score_mask=(data.grouped_features[feature]['KD']!='CTRL')
# #excluding the control from plots showing the z_score
# if value == 'z_score':
# x_data=list(data.grouped_features[feature][z_score_mask].groupby(['experiment', 'KD']).groups.keys())
# y_index=data.grouped_features[feature][z_score_mask].groupby(['experiment', 'KD'])[value]
# else:
# =============================================================================
#gets the keys to the groups for indexing
x_data=list(to_plot.groupby(['feature']).groups.keys())
#gets a group object the groups are referring to
y_index=to_plot.groupby(['feature'])[value]
#y_data=data.grouped_features[feature].iloc[list(y_index.groups[x_data[0]])]['value']
#y_data=data.grouped_features[feature].groupby(['experiment', 'KD']).groups[x_data[1]]
traces=[]
#Q3=[]
#rescale_values=[]
#lower_rescale_values=[]
#colour_dict={}
# =============================================================================
# for enum, kd in enumerate(data.knockdowns):
# if enum >= len(DEFAULT_PLOTLY_COLORS):
# enum=0
# #making a colour dictionary, to give each box its own colour based on the knockdown group
# if kd not in colour_dict.keys():
# colour_dict.update({kd:DEFAULT_PLOTLY_COLORS[enum]})
# =============================================================================
#sig, alpha=calc_Bonferroni(feature)
#https://stackoverflow.com/questions/26536899/how-do-you-add-labels-to-a-plotly-boxplot-in-python
for enum, xd in enumerate(x_data):
#rescale_values.append(to_plot.loc[list(y_index.groups[xd])][value].std()+to_plot.loc[list(y_index.groups[xd])][value].median())
#lower_rescale_values.append(-1*(to_plot.loc[list(y_index.groups[xd])][value].std())-to_plot.loc[list(y_index.groups[xd])][value].median())
#Q3.append(IQR(list(data.grouped_features[feature].iloc[list(y_index.groups[xd])][value]), len(data.grouped_features[feature].iloc[list(y_index.groups[xd])][value])))
traces.append(go.Box(
#list(y_index.groups[xd]) applies the index of one group to the grouped dataframe to obtain
# a list of indices for that group. This list of indeces is used to index the dataframe, and obtain
#the value column of it.
y=to_plot.loc[list(y_index.groups[xd])][value],
name=str(xd),
#adds the points for each value next to the box
boxpoints=False,
#boxpoint='all',
jitter=0.5,
whiskerwidth=0.2,
marker=dict(
size=2,
#color=colour_dict[xd[1]]
),
line=dict(width=1),
))
# =============================================================================
# if value=='z_score':
# lower_limit=3*statistics.median(lower_rescale_values)
# else:
# lower_limit=0
# upper_limit=4*statistics.median(rescale_values)
# =============================================================================
layout = go.Layout(
boxgap=0,
boxgroupgap=0,
title=KD,
autosize=True,
yaxis=dict(
#autorange=True,
showgrid=True,
zeroline=True,
dtick=5,
gridcolor='rgb(0, 0, 0)',
gridwidth=1,
zerolinecolor='rgb(0, 0, 0)',
zerolinewidth=2,
range=[-5, 5]
# automargin=True,
),
# =============================================================================
# margin=dict(
# l=40,
# r=30,
# b=80,
# t=3*max(Q3),
# ),
# =============================================================================
paper_bgcolor='rgb(243, 243, 243)',
plot_bgcolor='rgb(243, 243, 243)',
showlegend=False
)
fig = go.Figure(data=traces, layout=layout)
#counts the number of observations for each group
# =============================================================================
# count=to_plot.groupby(['feature'])[value].count()
# for enum, xd in enumerate(x_data):
# sig_index=xd[0]+xd[1]
# #gets the number of observations for the current box
# n=str(count[xd])
#
#
# #getting the title for each column the following way:
# #getting the sig_index by concatonating the two strings of xd
# #and using this as the key for the bonferrony corrected t_test
# #to obtain the second value, which is the p value
# try:
# p=round(sig[sig_index][1], 4)
# #adds a star if the p value is significant
# if p < alpha:
# p=str(p)
# p=p+'*'
# #marks the plot as being significant
# to_tag=True
# p=str(p)
# #exception, if no p value exists (i.e. for control)
# except:
# p=''
#
# fig['layout']['annotations']+=tuple([dict(
# #positions on x axis based on current box
# x=enum,
# #positions text based on y axis based on the median of current box
# y=to_plot.groupby(['feature'])[value].median(),
# yref='y',
# xref='x',
# text='p: {}<br>n: {}'.format('NA', n),
# showarrow=True,
# #determines the length of the arrow for the annotation text
# arrowhead=0,
# ax=0,
# ay=-10
# )])
# =============================================================================
# =============================================================================
# if to_tag==True:
# #saves the plot in a different folder, if one or more groups show significance
# sig_folder=os.path.join(path[0], 'significant')
# createFolder(sig_folder)
# file='{}/{}.html'.format(sig_folder,KD)
# else:
# =============================================================================
file='{}{}.html'.format(path[0],KD)
plotly.offline.plot(fig, filename = file, image='svg', auto_open=False)
return fig
def loop_featureplot(value):
'''
creates a graph for each knockdown
'''
for k in data.knockdowns:
featureplot(k, value)
time.sleep(1)
def pyplot(feature, value):
to_tag=False
DEFAULT_PLOTLY_COLORS=['rgb(31, 119, 180)', 'rgb(255, 127, 14)',
'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
'rgb(148, 103, 189)', 'rgb(140, 86, 75)',
'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
'rgb(188, 189, 34)', 'rgb(23, 190, 207)']
z_score_mask=(data.grouped_features[feature]['KD']!='CTRL')
#excluding the control from plots showing the z_score
if value == 'z_score':
x_data=list(data.grouped_features[feature][z_score_mask].groupby(['experiment', 'KD']).groups.keys())
y_index=data.grouped_features[feature][z_score_mask].groupby(['experiment', 'KD'])[value]
else:
#gets the keys to the groups for indexing
x_data=list(data.grouped_features[feature].groupby(['experiment', 'KD']).groups.keys())
#gets a group object the groups are referring to
y_index=data.grouped_features[feature].groupby(['experiment', 'KD'])[value]
#y_data=data.grouped_features[feature].iloc[list(y_index.groups[x_data[0]])]['value']
#y_data=data.grouped_features[feature].groupby(['experiment', 'KD']).groups[x_data[1]]
traces=[]
#Q3=[]
rescale_values=[]
lower_rescale_values=[]
colour_dict={}
for enum, kd in enumerate(knockdowns):
if enum >= len(DEFAULT_PLOTLY_COLORS):
enum=0
#making a colour dictionary, to give each box its own colour based on the knockdown group
if kd not in colour_dict.keys():
colour_dict.update({kd:DEFAULT_PLOTLY_COLORS[enum]})
sig, alpha=calc_Bonferroni(feature)
#https://stackoverflow.com/questions/26536899/how-do-you-add-labels-to-a-plotly-boxplot-in-python
for enum, xd in enumerate(x_data):
rescale_values.append(data.grouped_features[feature].iloc[list(y_index.groups[xd])][value].std()+data.grouped_features[feature].iloc[list(y_index.groups[xd])][value].median())
lower_rescale_values.append(-1*(data.grouped_features[feature].iloc[list(y_index.groups[xd])][value].std())-data.grouped_features[feature].iloc[list(y_index.groups[xd])][value].median())
#Q3.append(IQR(list(data.grouped_features[feature].iloc[list(y_index.groups[xd])][value]), len(data.grouped_features[feature].iloc[list(y_index.groups[xd])][value])))
traces.append(go.Box(
#list(y_index.groups[xd]) applies the index of one group to the grouped dataframe to obtain
# a list of indices for that group. This list of indeces is used to index the dataframe, and obtain
#the value column of it.
y=data.grouped_features[feature].loc[list(y_index.groups[xd])][value],
name=str(xd),
#adds the points for each value next to the box
boxpoints='all',
#boxpoint='all',
jitter=0.5,
whiskerwidth=0.2,
marker=dict(
size=2,
color=colour_dict[xd[1]]
),
line=dict(width=1),
))
if value=='z_score':
lower_limit=-8
upper_limit=8
else:
lower_limit=0
upper_limit=4*statistics.median(rescale_values)
layout = go.Layout(
title=feature,
autosize=True,
yaxis=dict(
#autorange=True,
showgrid=True,
zeroline=True,
dtick=5,
gridcolor='rgb(255, 255, 255)',
gridwidth=1,
zerolinecolor='rgb(255, 255, 255)',
zerolinewidth=2,
range=[lower_limit, upper_limit]
# automargin=True,
),
# =============================================================================
# margin=dict(
# l=40,
# r=30,
# b=80,
# t=3*max(Q3),
# ),
# =============================================================================
paper_bgcolor='rgb(243, 243, 243)',
plot_bgcolor='rgb(243, 243, 243)',
showlegend=True
)
fig = go.Figure(data=traces, layout=layout)
#counts the number of observations for each group
count=data.grouped_features[feature].groupby(['experiment', 'KD'])[value].count()
for enum, xd in enumerate(x_data):
sig_index=xd[0]+xd[1]
#gets the number of observations for the current box
n=str(count[xd])
#getting the title for each column the following way:
#getting the sig_index by concatonating the two strings of xd
#and using this as the key for the bonferrony corrected t_test
#to obtain the second value, which is the p value
try:
p=round(sig[sig_index][1], 4)
#adds a star if the p value is significant
if p < alpha:
p=str(p)
p=p+'*'
#marks the plot as being significant
to_tag=True
p=str(p)
#exception, if no p value exists (i.e. for control)
except:
p=''
fig['layout']['annotations']+=tuple([dict(
#positions on x axis based on current box
x=enum,
#positions text based on y axis based on the median of current box
y=data.grouped_features[feature].iloc[list(y_index.groups[xd])][value].median(),
yref='y',
xref='x',
text='p: {}<br>n: {}'.format(p, n),
showarrow=True,
#determines the length of the arrow for the annotation text
arrowhead=0,
ax=0,
ay=-10
)])
if to_tag==True:
#saves the plot in a different folder, if one or more groups show significance
sig_folder=os.path.join(path[0], 'significant')
createFolder(sig_folder)
file='{}/{}.html'.format(sig_folder,feature)
else:
file='{}{}.html'.format(path[0],feature)
plotly.offline.plot(fig, filename = file, image='svg', auto_open=False)
return fig, x_data
def loop_graph(function, value):
'''
creates a graph for each feature
'''
for f in data.features:
function(f, value)
time.sleep(1)
#%%
#data.grouped_features[feature].boxplot('z_score', by='KD', figsize=(12, 8))
#either computes the MAD (robust==True),
#or the standard deviation(robust==False)
def MAD_robust(x, robust=True):
if robust==True:
med=np.median(x)
dif=[np.abs(i-med) for i in x]
return np.median(dif)
else:
return np.std(x)
#either computes the median (robust==True), or the mean (robust==False)
def Mean_robust(x, robust=True):
if robust==True:
return np.median(x)
else:
return np.mean(x)
def calc_mean_features():
'''
calculates the mean values of each feature grouped by timepoint and by experiment
excluding the ctrl
'''
mean_features=[]
for f in data.features:
temp=pd.DataFrame()
temp=data.grouped_features[f][data.grouped_features[f]['KD']!='CTRL'].groupby(['timepoint', 'experiment', 'KD'], as_index=False).agg({'value':[MAD_robust, Mean_robust]})
temp['feature']=f
mean_features.append(temp)
mean_features = pd.concat(mean_features, axis=0, sort=True)
mean_features.columns = ["_".join(x) for x in mean_features.columns.ravel()]
mean_features=mean_features.reset_index(drop=True)
return mean_features
def calc_mean_ctrl(all_features=False):
'''
calculates the mean values of each feature grouped by timepoint and by experiment
only for the ctrl
'''
#all features==False used for standard z_score. Only calculates the mean and standard deviation for the control
if all_features==False:
mean_ctrl=[]
for f in data.features:
temp=pd.DataFrame()
temp=data.grouped_features[f][data.grouped_features[f]['KD']=='CTRL'].groupby(['experiment'], as_index=False).agg({'value':[MAD_robust, Mean_robust]})
#temp=data.grouped_features[f][data.grouped_features[f]['KD']=='CTRL'].groupby(['timepoint', 'experiment'], as_index=False).agg({'value':[MAD_robust, Mean_robust]})
temp['feature']=f
mean_ctrl.append(temp)
mean_ctrl = pd.concat(mean_ctrl, axis=0, sort=True)
mean_ctrl.columns = ["_".join(x) for x in mean_ctrl.columns.ravel()]
mean_ctrl=mean_ctrl.reset_index(drop=True)
#if all features==True used for internal z_score, computes the mean and standard deviation
#for all knockdowns
if all_features==True:
mean_ctrl=[]
for k in data.knockdowns:
for f in data.features:
temp=pd.DataFrame()
temp=data.grouped_features[f][data.grouped_features[f]['KD']==k].groupby(['timepoint', 'experiment'], as_index=False).agg({'value':[MAD_robust, Mean_robust]})
temp['feature']=f
temp['knockdown']=k
mean_ctrl.append(temp)
mean_ctrl =
|
pd.concat(mean_ctrl, axis=0, sort=True)
|
pandas.concat
|
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result = frequencies._get_rule_month('Q')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12))
print(result == 'DEC')
result = frequencies._get_rule_month('Q-JAN')
assert(result == 'JAN')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert(result == 'JAN')
result = frequencies._get_rule_month('A-DEC')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.YearEnd())
assert(result == 'DEC')
result = frequencies._get_rule_month('A-MAY')
assert(result == 'MAY')
result = frequencies._get_rule_month(offsets.YearEnd(month=5))
assert(result == 'MAY')
class TestFrequencyCode(tm.TestCase):
def test_freq_code(self):
self.assertEqual(frequencies.get_freq('A'), 1000)
self.assertEqual(frequencies.get_freq('3A'), 1000)
self.assertEqual(frequencies.get_freq('-1A'), 1000)
self.assertEqual(frequencies.get_freq('W'), 4000)
self.assertEqual(frequencies.get_freq('W-MON'), 4001)
self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
for freqstr, code in compat.iteritems(frequencies._period_code_map):
result = frequencies.get_freq(freqstr)
self.assertEqual(result, code)
result = frequencies.get_freq_group(freqstr)
self.assertEqual(result, code // 1000 * 1000)
result = frequencies.get_freq_group(code)
self.assertEqual(result, code // 1000 * 1000)
def test_freq_group(self):
self.assertEqual(frequencies.get_freq_group('A'), 1000)
self.assertEqual(frequencies.get_freq_group('3A'), 1000)
self.assertEqual(frequencies.get_freq_group('-1A'), 1000)
self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000)
self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000)
self.assertEqual(frequencies.get_freq_group('W'), 4000)
self.assertEqual(frequencies.get_freq_group('W-MON'), 4000)
self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=1)), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=5)), 4000)
def test_get_to_timestamp_base(self):
tsb = frequencies.get_to_timestamp_base
self.assertEqual(tsb(frequencies.get_freq_code('D')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('W')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('M')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('S')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('T')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('H')[0]),
frequencies.get_freq_code('S')[0])
def test_freq_to_reso(self):
Reso = frequencies.Resolution
self.assertEqual(Reso.get_str_from_freq('A'), 'year')
self.assertEqual(Reso.get_str_from_freq('Q'), 'quarter')
self.assertEqual(Reso.get_str_from_freq('M'), 'month')
self.assertEqual(Reso.get_str_from_freq('D'), 'day')
self.assertEqual(Reso.get_str_from_freq('H'), 'hour')
self.assertEqual(Reso.get_str_from_freq('T'), 'minute')
self.assertEqual(Reso.get_str_from_freq('S'), 'second')
self.assertEqual(Reso.get_str_from_freq('L'), 'millisecond')
self.assertEqual(Reso.get_str_from_freq('U'), 'microsecond')
self.assertEqual(Reso.get_str_from_freq('N'), 'nanosecond')
for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
# check roundtrip
result = Reso.get_freq(Reso.get_str_from_freq(freq))
self.assertEqual(freq, result)
for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
self.assertEqual(freq, result)
def test_get_freq_code(self):
# freqstr
self.assertEqual(frequencies.get_freq_code('A'),
(frequencies.get_freq('A'), 1))
self.assertEqual(frequencies.get_freq_code('3D'),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code('-2M'),
(frequencies.get_freq('M'), -2))
# tuple
self.assertEqual(frequencies.get_freq_code(('D', 1)),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(('A', 3)),
(frequencies.get_freq('A'), 3))
self.assertEqual(frequencies.get_freq_code(('M', -2)),
(frequencies.get_freq('M'), -2))
# numeric tuple
self.assertEqual(frequencies.get_freq_code((1000, 1)), (1000, 1))
# offsets
self.assertEqual(frequencies.get_freq_code(offsets.Day()),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Day(3)),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Day(-2)),
(frequencies.get_freq('D'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd()),
(frequencies.get_freq('M'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(3)),
(frequencies.get_freq('M'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(-2)),
(frequencies.get_freq('M'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.Week()),
(frequencies.get_freq('W'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3)),
(frequencies.get_freq('W'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2)),
(frequencies.get_freq('W'), -2))
# monday is weekday=0
self.assertEqual(frequencies.get_freq_code(offsets.Week(weekday=1)),
(frequencies.get_freq('W-TUE'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3, weekday=0)),
(frequencies.get_freq('W-MON'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2, weekday=4)),
(frequencies.get_freq('W-FRI'), -2))
_dti = DatetimeIndex
class TestFrequencyInference(tm.TestCase):
def test_raise_if_period_index(self):
index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
self.assertRaises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
self.assertRaises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
self.assertEqual(frequencies.infer_freq(index), 'B')
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(index), 'D')
def test_non_datetimeindex(self):
dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(dates), 'D')
def test_hour(self):
self._check_tick(timedelta(hours=1), 'H')
def test_minute(self):
self._check_tick(timedelta(minutes=1), 'T')
def test_second(self):
self._check_tick(timedelta(seconds=1), 'S')
def test_millisecond(self):
self._check_tick(timedelta(microseconds=1000), 'L')
def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
if i > 1:
exp_freq = '%d%s' % (i, code)
else:
exp_freq = code
self.assertEqual(frequencies.infer_freq(index), exp_freq)
index = _dti([b + base_delta * 7] +
[b + base_delta * j for j in range(3)])
self.assertIsNone(frequencies.infer_freq(index))
index = _dti([b + base_delta * j for j in range(3)] +
[b + base_delta * 7])
self.assertIsNone(frequencies.infer_freq(index))
def test_weekly(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
self._check_generated_range('1/1/2000', 'W-%s' % day)
def test_week_of_month(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
for i in range(1, 5):
self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
def test_fifth_week_of_month(self):
# Only supports freq up to WOM-4. See #9425
func = lambda: date_range('2014-01-01', freq='WOM-5MON')
self.assertRaises(ValueError, func)
def test_fifth_week_of_month_infer(self):
# Only attempts to infer up to WOM-4. See #9425
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake(self):
#All of these dates are on same day of week and are 4 or 5 weeks apart
index = DatetimeIndex(["2013-08-27","2013-10-01","2013-10-29","2013-11-26"])
assert frequencies.infer_freq(index) != 'WOM-4TUE'
def test_monthly(self):
self._check_generated_range('1/1/2000', 'M')
def test_monthly_ambiguous(self):
rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
self.assertEqual(rng.inferred_freq, 'M')
def test_business_monthly(self):
self._check_generated_range('1/1/2000', 'BM')
def test_business_start_monthly(self):
self._check_generated_range('1/1/2000', 'BMS')
def test_quarterly(self):
for month in ['JAN', 'FEB', 'MAR']:
self._check_generated_range('1/1/2000', 'Q-%s' % month)
def test_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'A-%s' % month)
def test_business_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'BA-%s' % month)
def test_annual_ambiguous(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
self.assertEqual(rng.inferred_freq, 'A-JAN')
def _check_generated_range(self, start, freq):
freq = freq.upper()
gen = date_range(start, periods=7, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
gen = date_range(start, periods=5, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-DEC')
rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-NOV')
rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-OCT')
def test_infer_freq_tz(self):
freqs = {'AS-JAN': ['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'],
'Q-OCT': ['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'],
'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'],
'W-SAT': ['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'],
'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'],
'H': ['2011-12-31 22:00', '2011-12-31 23:00', '2012-01-01 00:00', '2012-01-01 01:00']
}
# GH 7310
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for expected, dates in compat.iteritems(freqs):
idx = DatetimeIndex(dates, tz=tz)
self.assertEqual(idx.inferred_freq, expected)
def test_infer_freq_tz_transition(self):
# Tests for #8772
date_pairs = [['2013-11-02', '2013-11-5'], #Fall DST
['2014-03-08', '2014-03-11'], #Spring DST
['2014-01-01', '2014-01-03']] #Regular Time
freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U', '3600000000001N']
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for date_pair in date_pairs:
for freq in freqs:
idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
self.assertEqual(idx.inferred_freq, freq)
index = date_range("2013-11-03", periods=5, freq="3H").tz_localize("America/Chicago")
self.assertIsNone(index.inferred_freq)
def test_infer_freq_businesshour(self):
# GH 7905
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00'])
# hourly freq in a day must result in 'H'
self.assertEqual(idx.inferred_freq, 'H')
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00',
'2014-07-01 15:00', '2014-07-01 16:00',
'2014-07-02 09:00', '2014-07-02 10:00', '2014-07-02 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00', '2014-07-07 16:00',
'2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00',
'2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00',
'2014-07-08 15:00', '2014-07-08 16:00'])
self.assertEqual(idx.inferred_freq, 'BH')
def test_not_monotonic(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
rng = rng[::-1]
self.assertEqual(rng.inferred_freq, '-1A-JAN')
def test_non_datetimeindex(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
vals = rng.to_pydatetime()
result = frequencies.infer_freq(vals)
self.assertEqual(result, rng.inferred_freq)
def test_invalid_index_types(self):
# test all index types
for i in [ tm.makeIntIndex(10),
tm.makeFloatIndex(10),
tm.makePeriodIndex(10) ]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(i))
# GH 10822
# odd error message on conversions to datetime for unicode
if not
|
is_platform_windows()
|
pandas.compat.is_platform_windows
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Timestamp,
date_range)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData, _check_mixed_float
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
class TestDataFrameMissingData(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
exp = Series(mat[5:], index=self.frame.index[5:], name='foo')
tm.assert_series_equal(smaller_frame['foo'], exp)
tm.assert_series_equal(inp_frame1['foo'], exp)
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
self.assertTrue((frame['bar'] == 5).all())
inp_frame2.dropna(subset=['bar'], inplace=True)
self.assert_index_equal(samesize_frame.index, self.frame.index)
self.assert_index_equal(inp_frame2.index, self.frame.index)
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
# threshold
dropped = df.dropna(axis=1, thresh=5)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, thresh=5, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0, thresh=4)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, thresh=4, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=1, thresh=4)
assert_frame_equal(dropped, df)
dropped = df.dropna(axis=1, thresh=3)
assert_frame_equal(dropped, df)
# subset
dropped = df.dropna(axis=0, subset=[0, 1, 3])
inp = df.copy()
inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
assert_frame_equal(dropped, df)
assert_frame_equal(inp, df)
# all
dropped = df.dropna(axis=1, how='all')
assert_frame_equal(dropped, df)
df[2] = nan
dropped = df.dropna(axis=1, how='all')
expected = df.ix[:, [0, 1, 3]]
assert_frame_equal(dropped, expected)
# bad input
self.assertRaises(ValueError, df.dropna, axis=3)
def test_drop_and_dropna_caching(self):
# tst that cacher updates
original = Series([1, 2, np.nan], name='A')
expected = Series([1, 2], dtype=original.dtype, name='A')
df = pd.DataFrame({'A': original.values.copy()})
df2 = df.copy()
df['A'].dropna()
assert_series_equal(df['A'], original)
df['A'].dropna(inplace=True)
assert_series_equal(df['A'], expected)
df2['A'].drop([1])
assert_series_equal(df2['A'], original)
df2['A'].drop([1], inplace=True)
assert_series_equal(df2['A'], original.drop([1]))
def test_dropna_corner(self):
# bad input
self.assertRaises(ValueError, self.frame.dropna, how='foo')
self.assertRaises(TypeError, self.frame.dropna, how=None)
# non-existent column - 8303
self.assertRaises(KeyError, self.frame.dropna, subset=['A', 'X'])
def test_dropna_multiple_axes(self):
df = DataFrame([[1, np.nan, 2, 3],
[4, np.nan, 5, 6],
[np.nan, np.nan, np.nan, np.nan],
[7, np.nan, 8, 9]])
cp = df.copy()
result = df.dropna(how='all', axis=[0, 1])
result2 = df.dropna(how='all', axis=(0, 1))
expected = df.dropna(how='all').dropna(how='all', axis=1)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(df, cp)
inp = df.copy()
inp.dropna(how='all', axis=(0, 1), inplace=True)
assert_frame_equal(inp, expected)
def test_fillna(self):
self.tsframe.ix[:5, 'A'] = nan
self.tsframe.ix[-5:, 'A'] = nan
zero_filled = self.tsframe.fillna(0)
self.assertTrue((zero_filled.ix[:5, 'A'] == 0).all())
padded = self.tsframe.fillna(method='pad')
self.assertTrue(np.isnan(padded.ix[:5, 'A']).all())
self.assertTrue((padded.ix[-5:, 'A'] == padded.ix[-5, 'A']).all())
# mixed type
self.mixed_frame.ix[5:20, 'foo'] = nan
self.mixed_frame.ix[-10:, 'A'] = nan
result = self.mixed_frame.fillna(value=0)
result = self.mixed_frame.fillna(method='pad')
self.assertRaises(ValueError, self.tsframe.fillna)
self.assertRaises(ValueError, self.tsframe.fillna, 5, method='ffill')
# mixed numeric (but no float16)
mf = self.mixed_float.reindex(columns=['A', 'B', 'D'])
mf.ix[-10:, 'A'] = nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype=dict(C=None))
result = mf.fillna(method='pad')
_check_mixed_float(result, dtype=dict(C=None))
# empty frame (GH #2778)
df = DataFrame(columns=['x'])
for m in ['pad', 'backfill']:
df.x.fillna(method=m, inplace=1)
df.x.fillna(method=m)
# with different dtype (GH3386)
df = DataFrame([['a', 'a', np.nan, 'a'], [
'b', 'b', np.nan, 'b'], ['c', 'c', np.nan, 'c']])
result = df.fillna({2: 'foo'})
expected = DataFrame([['a', 'a', 'foo', 'a'],
['b', 'b', 'foo', 'b'],
['c', 'c', 'foo', 'c']])
assert_frame_equal(result, expected)
df.fillna({2: 'foo'}, inplace=True)
assert_frame_equal(df, expected)
# limit and value
df = DataFrame(np.random.randn(10, 3))
df.iloc[2:7, 0] = np.nan
df.iloc[3:5, 2] = np.nan
expected = df.copy()
expected.iloc[2, 0] = 999
expected.iloc[3, 2] = 999
result = df.fillna(999, limit=1)
assert_frame_equal(result, expected)
# with datelike
# GH 6344
df = DataFrame({
'Date': [pd.NaT, Timestamp("2014-1-1")],
'Date2': [Timestamp("2013-1-1"), pd.NaT]
})
expected = df.copy()
expected['Date'] = expected['Date'].fillna(df.ix[0, 'Date2'])
result = df.fillna(value={'Date': df['Date2']})
assert_frame_equal(result, expected)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = DataFrame(index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = df.get_dtype_counts().sort_values()
expected = Series({'object': 5})
assert_series_equal(result, expected)
result = df.fillna(1)
expected = DataFrame(1, index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = result.get_dtype_counts().sort_values()
expected = Series({'int64': 5})
assert_series_equal(result, expected)
# empty block
df = DataFrame(index=lrange(3), columns=['A', 'B'], dtype='float64')
result = df.fillna('nan')
expected = DataFrame('nan', index=lrange(3), columns=['A', 'B'])
assert_frame_equal(result, expected)
# equiv of replace
df = DataFrame(dict(A=[1, np.nan], B=[1., 2.]))
for v in ['', 1, np.nan, 1.0]:
expected = df.replace(np.nan, v)
result = df.fillna(v)
assert_frame_equal(result, expected)
def test_fillna_datetime_columns(self):
# GH 7095
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=date_range('20130110', periods=3))
self.assert_frame_equal(result, expected)
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': [pd.Timestamp('2013-01-01'),
pd.Timestamp('2013-01-02'), pd.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': [pd.Timestamp('2013-01-01'),
pd.Timestamp('2013-01-02'), '?'],
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=pd.date_range('20130110', periods=3))
self.assert_frame_equal(result, expected)
def test_ffill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.ffill(),
self.tsframe.fillna(method='ffill'))
def test_bfill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.bfill(),
self.tsframe.fillna(method='bfill'))
def test_fillna_skip_certain_blocks(self):
# don't try to fill boolean, int blocks
df = DataFrame(np.random.randn(10, 4).astype(int))
# it works!
df.fillna(np.nan)
def test_fillna_inplace(self):
df = DataFrame(np.random.randn(10, 4))
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(value=0)
self.assertIsNot(expected, df)
df.fillna(value=0, inplace=True)
assert_frame_equal(df, expected)
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(method='ffill')
self.assertIsNot(expected, df)
df.fillna(method='ffill', inplace=True)
assert_frame_equal(df, expected)
def test_fillna_dict_series(self):
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]})
result = df.fillna({'a': 0, 'b': 5})
expected = df.copy()
expected['a'] = expected['a'].fillna(0)
expected['b'] = expected['b'].fillna(5)
assert_frame_equal(result, expected)
# it works
result = df.fillna({'a': 0, 'b': 5, 'd': 7})
# Series treated same as dict
result = df.fillna(df.max())
expected = df.fillna(df.max().to_dict())
assert_frame_equal(result, expected)
# disable this for now
with assertRaisesRegexp(NotImplementedError, 'column by column'):
df.fillna(df.max(1), axis=1)
def test_fillna_dataframe(self):
# GH 8377
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]},
index=list('VWXYZ'))
# df2 may have different index and columns
df2 = DataFrame({'a': [nan, 10, 20, 30, 40],
'b': [50, 60, 70, 80, 90],
'foo': ['bar'] * 5},
index=list('VWXuZ'))
result = df.fillna(df2)
# only those columns and indices which are shared get filled
expected = DataFrame({'a': [nan, 1, 2, nan, 40],
'b': [1, 2, 3, nan, 90],
'c': [nan, 1, 2, 3, 4]},
index=list('VWXYZ'))
assert_frame_equal(result, expected)
def test_fillna_columns(self):
df = DataFrame(np.random.randn(10, 10))
df.values[:, ::2] = np.nan
result = df.fillna(method='ffill', axis=1)
expected = df.T.fillna(method='pad').T
assert_frame_equal(result, expected)
df.insert(6, 'foo', 5)
result = df.fillna(method='ffill', axis=1)
expected = df.astype(float).fillna(method='ffill', axis=1)
assert_frame_equal(result, expected)
def test_fillna_invalid_method(self):
with assertRaisesRegexp(ValueError, 'ffil'):
self.frame.fillna(method='ffil')
def test_fillna_invalid_value(self):
# list
self.assertRaises(TypeError, self.frame.fillna, [1, 2])
# tuple
self.assertRaises(TypeError, self.frame.fillna, (1, 2))
# frame with series
self.assertRaises(ValueError, self.frame.iloc[:, 0].fillna,
self.frame)
def test_fillna_col_reordering(self):
cols = ["COL." + str(i) for i in range(5, 0, -1)]
data = np.random.rand(20, 5)
df = DataFrame(index=lrange(20), columns=cols, data=data)
filled = df.fillna(method='ffill')
self.assertEqual(df.columns.tolist(), filled.columns.tolist())
def test_fill_corner(self):
self.mixed_frame.ix[5:20, 'foo'] = nan
self.mixed_frame.ix[-10:, 'A'] = nan
filled = self.mixed_frame.fillna(value=0)
self.assertTrue((filled.ix[5:20, 'foo'] == 0).all())
del self.mixed_frame['foo']
empty_float = self.frame.reindex(columns=[])
# TODO(wesm): unused?
result = empty_float.fillna(value=0) # noqa
def test_fill_value_when_combine_const(self):
# GH12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype='float')
df = DataFrame({'foo': dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
assert_frame_equal(res, exp)
class TestDataFrameInterpolate(tm.TestCase, TestData):
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with tm.assertRaises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with tm.assertRaises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
expected.A.loc[3] = 2.81621174
expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
expected.A.loc[3] = 2
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
assert_frame_equal(result, expected)
result = df.interpolate(method='zero')
expected.A.loc[3] = 2.
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
def test_interp_alt_scipy(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
expected.ix[2, 'A'] = 3
expected.ix[5, 'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method='krogh')
expectedk = df.copy()
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
import scipy
result = df.interpolate(method='pchip')
expected.ix[2, 'A'] = 3
if LooseVersion(scipy.__version__) >= '0.17.0':
expected.ix[5, 'A'] = 6.0
else:
expected.ix[5, 'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
# scipy route
tm._skip_if_no_scipy()
result = df.interpolate(axis=1, method='values')
|
assert_frame_equal(result, expected)
|
pandas.util.testing.assert_frame_equal
|
import unittest
import numpy as np
import pandas as pd
import scipy.stats as st
from os import path, getcwd
from ..graphs import GraphGroupScatter
from ..data import Vector
from ..analysis.exc import NoDataError
from ..data import UnequalVectorLengthError
class MyTestCase(unittest.TestCase):
@property
def save_path(self):
if getcwd().split('/')[-1] == 'test':
return './images/'
elif getcwd().split('/')[-1] == 'sci_analysis':
if path.exists('./setup.py'):
return './sci_analysis/test/images/'
else:
return './test/images/'
else:
'./'
def test_1_scatter_two_groups_default(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'],
save_to='{}test_group_scatter_1'.format(self.save_path)))
def test_2_scatter_two_groups_no_fit(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], fit=False,
save_to='{}test_group_scatter_2'.format(self.save_path)))
def test_3_scatter_two_groups_no_points(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array =
|
pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 31 18:04:31 2017
@author: kcarnold
"""
import numpy as np
import pandas as pd
from suggestion import clustering
#%%
sents = clustering.filter_reasonable_length_sents(clustering.get_all_sents())
#%%
rs = np.random.RandomState(0)
N = 500
indices = np.random.choice(len(sents), N, replace=False)
picked_sents = [sents[i] for i in indices]
#%%
|
pd.DataFrame({'idx': indices, 'sent': picked_sents})
|
pandas.DataFrame
|
import streamlit as st
from PIL import Image
import pickle
import numpy as np
import librosa
import pywt
from sklearn.decomposition import PCA
import pandas as pd
clf=pickle.load(open('model1.pkl','rb'))
df1=[]
@st.cache(suppress_st_warning=True)
def predict(normal,deep,nide,nedi):
audio_datat=[]
labelst=[]
saprtt=[]
path1t=[]
audio_filet=None
labelst.append(0)
audio_filet,srt=librosa.load(normal)
audio_datat.append(audio_filet)
saprtt.append(srt)
labelst.append(1)
audio_filet,srt=librosa.load(deep)
audio_datat.append(audio_filet)
saprtt.append(srt)
labelst.append(2)
audio_filet,srt=librosa.load(nide)
audio_datat.append(audio_filet)
saprtt.append(srt)
labelst.append(3)
audio_filet,srt=librosa.load(nedi)
audio_datat.append(audio_filet)
saprtt.append(srt)
featurest =np.empty((0,160))
pca=PCA(n_components=1)
scalest=np.arange(1,161)
for ind in range(len(audio_datat)):
print('.',end='')
coefft,freqst = pywt.cwt(audio_datat[ind],scalest,'morl')
featurest =np.vstack([featurest,pca.fit_transform(coefft).flatten()])
X_testt=featurest
y_pred = clf.predict(X_testt)
y_pred1= clf.predict_proba(X_testt)
df=
|
pd.DataFrame(y_pred1,labelst)
|
pandas.DataFrame
|
"""
<NAME>017
PanCancer Classifier
tcga_util.py
Usage: For import only
"""
def get_args():
"""
Get arguments for the main pancancer classifier script
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--genes',
help='Comma separated string of HUGO gene symbols')
parser.add_argument('-t', '--diseases', default='Auto',
help='Comma sep string of TCGA disease acronyms. '
'If no arguments are passed, filtering will '
'default to options given in --filter_count and '
'--filter_prop.')
parser.add_argument('-f', '--folds', default='5', type=int,
help='Number of cross validation folds to perform')
parser.add_argument('-d', '--drop', action='store_true',
help='Decision to drop input genes from X matrix')
parser.add_argument('-u', '--copy_number', action='store_true',
help='Supplement Y matrix with copy number events')
parser.add_argument('-c', '--filter_count', default=15, type=int,
help='Min number of mutations in diseases to include')
parser.add_argument('-p', '--filter_prop', default=0.05, type=float,
help='Min proportion of positives to include disease')
parser.add_argument('-n', '--num_features', default=8000, type=int,
help='Number of MAD genes to include in classifier')
parser.add_argument('-a', '--alphas', default='0.1,0.15,0.2,0.5,0.8,1',
help='the alphas for parameter sweep')
parser.add_argument('-l', '--l1_ratios', default='0,0.1,0.15,0.18,0.2,0.3',
help='the l1 ratios for parameter sweep')
parser.add_argument('-b', '--alt_genes', default='None',
help='alternative genes to test performance')
parser.add_argument('-s', '--alt_diseases', default="Auto",
help='The alternative diseases to test performance')
parser.add_argument('-i', '--alt_filter_count', default=15, type=int,
help='Min number of mutations in disease to include')
parser.add_argument('-r', '--alt_filter_prop', default=0.05, type=float,
help='Min proportion of positives to include disease')
parser.add_argument('-o', '--alt_folder', default='Auto',
help='Provide an alternative folder to save results')
parser.add_argument('-v', '--remove_hyper', action='store_true',
help='Remove hypermutated samples')
parser.add_argument('-k', '--keep_intermediate', action='store_true',
help='Keep intermediate ROC values for plotting')
parser.add_argument('-x', '--x_matrix', default='raw',
help='Filename of features to use in model')
parser.add_argument('-e', '--shuffled', action='store_true',
help='Shuffle the input gene exprs matrix alongside')
parser.add_argument('--shuffled_before_training', action='store_true',
help='Shuffle the gene exprs matrix before training')
parser.add_argument('-m', '--no_mutation', action='store_false',
help='Remove mutation data from y matrix')
parser.add_argument('-z', '--drop_rasopathy', action='store_true',
help='Decision to drop rasopathy genes from X matrix')
parser.add_argument('-q', '--drop_expression', action='store_true',
help='Decision to drop gene expression values from X')
parser.add_argument('-j', '--drop_covariates', action='store_true',
help='Decision to drop covariate information from X')
args = parser.parse_args()
return args
def get_threshold_metrics(y_true, y_pred, drop_intermediate=False,
disease='all'):
"""
Retrieve true/false positive rates and auroc/aupr for class predictions
Arguments:
y_true - an array of gold standard mutation status
y_pred - an array of predicted mutation status
disease - a string that includes the corresponding TCGA study acronym
Output:
dict of AUROC, AUPR, pandas dataframes of ROC and PR data, and cancer-type
"""
import pandas as pd
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.metrics import precision_recall_curve, average_precision_score
roc_columns = ['fpr', 'tpr', 'threshold']
pr_columns = ['precision', 'recall', 'threshold']
if drop_intermediate:
roc_items = zip(roc_columns,
roc_curve(y_true, y_pred, drop_intermediate=False))
else:
roc_items = zip(roc_columns, roc_curve(y_true, y_pred))
roc_df = pd.DataFrame.from_dict(dict(roc_items))
prec, rec, thresh = precision_recall_curve(y_true, y_pred)
pr_df =
|
pd.DataFrame.from_records([prec, rec])
|
pandas.DataFrame.from_records
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 14 13:57:33 2020
@author: <NAME>
"""
# A problem with ARIMA is that it does not support seasonal data. That is a time series with a repeating cycle.
# ARIMA expects data that is either not seasonal or has the seasonal component removed, e.g. seasonally adjusted via methods such as seasonal differencing.
from timeseries.modules.config import ORIG_DATA_PATH, SAVE_PLOTS_PATH, SAVE_MODELS_PATH, \
DATA, MONTH_DATA_PATH, MODELS_PATH, SAVE_RESULTS_PATH, SAVE_PLOTS_RESULTS_PATH_BASE
from timeseries.modules.dummy_plots_for_theory import save_fig, set_working_directory
from timeseries.modules.load_transform_data import load_transform_excel
# from timeseries.modules.sophisticated_prediction import create_dict_from_monthly
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from tqdm import tqdm
import warnings
import time
import glob
import os
import datetime
from statsmodels.tsa.stattools import adfuller # dickey fuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.arima.model import ARIMA, ARIMAResults
from statsmodels.tsa.statespace.sarimax import SARIMAX, SARIMAXResults
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from sklearn.metrics import mean_squared_error
def input_ar_ma(model_name):
print('\nPlease insert the AR and the MA order which you can read from the PACF and ACF plots!\nIf you like to close the input, type in <stop>!')
while True:
try:
nr = input('AR-order (PACF)\tMA-order (ACF):\n')
if 'stop' in nr:
break
nr1, nr2 = nr.split()
nr1, nr2 = int(nr1), int(nr2)
if model_name in ['SARIMAX']:
try:
nr_sari = input('Seasonal\nAR-order (PACF)\tMA-order (ACF)\tSeasonality:\n')
if 'stop' in nr_sari:
break
nr3, nr4, nr5 = nr_sari.split()
nr3, nr4, nr5 = int(nr3), int(nr4), int(nr5)
return {'AR':nr1, 'MA':nr2, 'SAR':nr3, 'SMA':nr4, 'S':nr5}
break
except ValueError:
print('\nYou did not provide three numbers.\nPlease insert three numbers and no Strings!\nFormat: <nr> <nr> <nr>')
else:
return {'AR':nr1, 'MA':nr2}
break
except ValueError:
print('\nYou did not provide two numbers.\nPlease insert two numbers and no Strings!\nFormat: <nr> <nr>')
def get_stationarity(timeseries, given_model, window, save_plots, print_results = False):
# rolling statistics
rolling_mean = timeseries.rolling(window=window).mean()
rolling_std = timeseries.rolling(window=window).std()
# rolling statistics plot
original = plt.plot(timeseries, color='blue', label='Original')
mean = plt.plot(rolling_mean, color='red', label='Rolling Mean')
std = plt.plot(rolling_std, color='black', label='Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation for windowsize ' + str(window) )
if save_plots:
save_fig(name = given_model + '_mean_deviation_window_' + str(window), path_img=SAVE_PLOTS_PATH)
plt.show(block = False)
# Dickey–Fuller test:
result = adfuller(timeseries)
stationary = False
if result[1] <= 0.05:
stationary = True
result = {'ADF Statistic':result[0], 'p-value':result[1], 'other_first': result[2],
'other_second':result[3], 'Critical Values':{'1%':result[4]['1%'],
'5%':result[4]['5%'],
'10%':result[4]['10%']},
'stationary': stationary}
if print_results:
print('ADF Statistic: {}'.format(result['ADF Statistic']))
print('p-value: {}'.format(result['p-value']))
print('Critical Values:')
for key, value in result['Critical Values'].items():
print('\t{}: {}'.format(key, value))
print('Stationary: {}'.format(stationary))
return result
def plot_acf_pacf(series, given_model, save_plots, rolling_window):
if len(series.values)*0.5 >= 60: # big value for seasonal AR and MA detection
lags_length = 60
else:
lags_length = len(series.values)*0.5 - 5 # -5 because otherwise lags_length to long to display
fig, (ax1,ax2) = plt.subplots(2,1, sharex=True)
fig = plot_pacf(series, zero = True, lags = lags_length, method = 'OLS', title ='Partial correlogram to find out AR value for '+ given_model +' window ' + str(rolling_window), ax = ax1, markerfacecolor='black', color = 'black')
fig = plot_acf(series, zero = True, lags = lags_length, title ='Correlogram to find out MA value for ' + given_model +' window ' + str(rolling_window), ax = ax2, markerfacecolor='black', color = 'black')
plt.show(block= False)
if save_plots:
save_fig(name = given_model + '_acf_pacf_' + str(rolling_window), path_img=SAVE_PLOTS_PATH, fig = fig)
def decompose_and_plot(dependent_var, given_model, index, rolling_window, save_dec, save_acf, save_stat, print_results = False):
series = pd.Series(dependent_var.values, index=index)
decomposition = seasonal_decompose(series)
decomposition.resid.dropna(inplace=True)
figure = decomposition.plot()
if save_dec:
save_fig(name = 'decompose_window_' + str(rolling_window), path_img=SAVE_PLOTS_PATH, fig = figure)
plot_acf_pacf(series, given_model, save_acf, rolling_window)
plt.figure(2)
dickey_fuller_results = get_stationarity(decomposition.observed, given_model = given_model, window = rolling_window, save_plots = save_stat, print_results = print_results)
return decomposition
def split(data, diff_faktor, rel_faktor):
values = pd.DataFrame(data.values)
dataframe = None
if diff_faktor != 0:
for i in range(1,diff_faktor+1):
dataframe = pd.concat([dataframe,values.shift(i)], axis= 1)
dataframe = pd.concat([dataframe, values], axis = 1)
else:
dataframe = values
X = dataframe.values
train, test = X[1:int(len(X)*rel_faktor)], X[int(len(X)*rel_faktor):]
if diff_faktor != 0:
train_X, train_y = train[:,:diff_faktor], train[:,diff_faktor]
test_X, test_y = test[:,:diff_faktor], test[:,diff_faktor]
else:
train_X, train_y = None, train
test_X, test_y = None, test
return {'Train':train, 'Test':test, 'Train_X':train_X , 'Train_y':train_y , 'Test_X': test_X, 'Test_y':test_X}
def compare_models(given_model, data, diff_faktor, rolling_window, forecast_one_step = False):
if forecast_one_step:
name_supl = '_one_step'
else:
name_supl = ''
print('\n', given_model, ' Model started:')
subresults = pd.DataFrame(columns = ['Predicted', 'Expected'])
result = {'Used Model':None, 'Model':None, 'MSE':None, 'RMSE':None, 'Orders':None}
decomposition = decompose_and_plot(dependent_var=data, given_model = given_model + name_supl, index=data.index, rolling_window=rolling_window, save_dec = True, save_acf=True, save_stat=True, print_results = True)
if given_model in ['persistance','SARIMAX']:
splitted_data = split(data = decomposition.observed, diff_faktor= 0, rel_faktor = 0.9 )
if given_model == 'SARIMAX':
order_dict = input_ar_ma(given_model)
else:
order_dict = None
elif given_model in ['ARIMA', 'ARMA']:
diff_df = decomposition.observed.diff(diff_faktor)
diff_df.dropna(inplace=True)
print('\nAfter differencing:')
get_stationarity(diff_df, given_model = given_model + name_supl, window= rolling_window, save_plots=True, print_results = True) # proove data is now stationary
plot_acf_pacf(diff_df,given_model = given_model + name_supl, save_plots=True, rolling_window=diff_faktor)
splitted_data = split(data = diff_df, diff_faktor= 0, rel_faktor = 0.9 )
order_dict = input_ar_ma(given_model)
result['Orders'] = order_dict
history = [x for x in splitted_data['Train']]
predictions = list()
if forecast_one_step:
test_length = 1
splited_length = len(splitted_data['Test'])
else:
test_length = len(splitted_data['Test'])
splited_length = 1
for i in tqdm(range(splited_length)):
# predict
warnings.filterwarnings('ignore')
if given_model == 'persistance':
model_fit = None
yhat = history[-test_length:]
elif given_model == 'ARIMA':
model_fit = ARIMA(history, order=(order_dict['AR'],1,order_dict['MA'])).fit()
yhat = model_fit.forecast(test_length)
elif given_model == 'ARMA':
model_fit = ARMA(history, order=(order_dict['AR'],order_dict['MA'])).fit(disp=0)
yhat = model_fit.forecast(test_length)[0]
elif given_model == 'SARIMAX':
model_fit = SARIMAX(history, order = (order_dict['AR'],1,order_dict['MA']),
seasonal_order=(order_dict['SAR'],1,order_dict['SMA'],order_dict['S']),
enforce_stationarity=True, enforce_invertibility = True).fit(disp = 0)
yhat = model_fit.forecast(test_length)
predictions.append(yhat)
if forecast_one_step:
# observation
obs = splitted_data['Test'][i]
history.append(obs)
subresults.loc[i] = [yhat,obs]
else:
obs = splitted_data['Test']
obs = [x for x in obs]
subresults = [yhat,obs]
# print('>Predicted={}, Expected={}'.format(yhat, obs))
result['Model'] = model_fit
result['Used Model'] = given_model
if given_model == 'persistance':
predictions = sum(predictions, [])
else:
if not forecast_one_step:
predictions = predictions[0]
result['MSE'] = np.round(mean_squared_error(splitted_data['Test'][:,0], predictions, squared = True),2)
result['RMSE'] = np.round(mean_squared_error(splitted_data['Test'][:,0], predictions, squared = False),2)
print('RMSE: %.3f' % result['RMSE'])
warnings.filterwarnings('default')
return [result, subresults]
def monthly_aggregate(data_frame, combined):
if not combined:
try:
data_frame.index = data_frame['Verkaufsdatum']
data_frame = data_frame.loc[:,data_frame.columns != 'Verkaufsdatum']
except:
data_frame.index = data_frame['date']
data_frame = data_frame.loc[:,data_frame.columns != 'date']
result_df = pd.DataFrame(index = set(data_frame.index.to_period('M')), columns = data_frame.columns)
for year_month in tqdm(set(result_df.index)):
result_df.loc[year_month] = data_frame.loc[data_frame.index.to_period('M') == year_month].sum()
result_df.index.name = 'date'
return result_df.sort_index()
def combine_dataframe(data_frame_with_all_data, monthly= False, output_print = False):
head_names = ['Einzel Menge in ST', '4Fahrt Menge in ST', 'Tages Menge in ST', 'Gesamt Menge in ST']
df1 = data_frame_with_all_data[0]
df1.index = df1['Verkaufsdatum']
result_df = pd.DataFrame(columns = head_names)
result_df[list(set(head_names) - set(['Tages Menge in ST']))] = df1[list(set(head_names) - set(['Tages Menge in ST']))]
result_df['Tages Menge in ST'] = np.zeros(result_df.shape[0], dtype = int)
for df in tqdm(data_frame_with_all_data[1:]):
df.index = df['Verkaufsdatum']
temp_df = df
for time_stamp in set(df1.index):
if time_stamp not in set(df.index):
dic = {dict_key : 0 for dict_key in df.columns[1:]}
dic['Verkaufsdatum'] = time_stamp
temp_df = temp_df.append(dic, ignore_index = True)
temp_df.index = temp_df['Verkaufsdatum']
for name in head_names:
try:
result_df[name] = result_df[name] + temp_df[name]
except:
if output_print:
print('This header is not present in temp "{}" \n'.format(name))
pass
# insert new column
result_df['Gesamt Menge in ST calc'] = result_df[['Einzel Menge in ST', '4Fahrt Menge in ST', 'Tages Menge in ST']].sum(axis = 1)
if monthly:
print('\nMonthly data aggregation:')
time.sleep(0.3)
monthly_df = monthly_aggregate(result_df, combined = True)
return monthly_df, result_df
return result_df
def create_dict_from_monthly(monthly_given_list, monthly_names_given_list, agg_monthly_list,
agg_monthly_names_list, combined = False):
monthly_given_dict = {name:data for name, data in zip(monthly_names_given_list, monthly_given_list)}
agg_monthly_dict = {name:data for name, data in zip(agg_monthly_names_list,agg_monthly_list)}
monthly_dict_copy = {}
for dic in tqdm(agg_monthly_dict):
for dic1 in agg_monthly_dict:
if dic != dic1 and dic.split('_')[1] == dic1.split('_')[1]:
used_columns = remove_unimportant_columns(agg_monthly_dict[dic].columns, ['Verkaufsdatum','Tages Wert in EUR','Einzel Wert in EUR','4Fahrt Wert in EUR', 'Gesamt Wert in EUR'])
used_columns1 = remove_unimportant_columns(agg_monthly_dict[dic1].columns, ['Verkaufsdatum','Tages Wert in EUR','Einzel Wert in EUR','4Fahrt Wert in EUR', 'Gesamt Wert in EUR'])
temp = agg_monthly_dict[dic][used_columns].merge(agg_monthly_dict[dic1][used_columns1], left_index = True, right_index = True)
temp['Gesamt Menge in ST'] = temp[['Gesamt Menge in ST_x','Gesamt Menge in ST_y']].sum(axis=1)
monthly_dict_copy[dic.split('_')[1]] = temp.drop(['Gesamt Menge in ST_x','Gesamt Menge in ST_y'], axis = 1)
lis = list()
for nr,column in enumerate(monthly_dict_copy[dic.split('_')[1]].columns):
lis.append(column.split()[0])
monthly_dict_copy[dic.split('_')[1]].columns = lis
final_dict = {}
for monthly_name, monthly_data in tqdm(monthly_given_dict.items()):
einzel = monthly_data[(monthly_data['PGR'] == 200)]
fahrt4 = einzel[einzel[einzel.columns[1]].str.contains('4-Fahrten|4 Fahrten', regex=True)]
einzel = einzel[einzel[einzel.columns[1]].str.contains('4-Fahrten|4 Fahrten', regex=True) == False]
tages = monthly_data[(monthly_data['PGR'] == 300)]
final_df = pd.DataFrame([tages.sum(axis=0, numeric_only = True)[2:],
einzel.sum(axis=0, numeric_only = True)[2:],
fahrt4.sum(axis=0, numeric_only = True)[2:]],
index=['Tages', 'Einzel', '4Fahrt'])
final_df = final_df.T
las = list()
for year_month in final_df.index:
las.append(datetime.datetime.strptime(year_month, '%Y%m'))
final_df.index = las
final_df.index = final_df.index.to_period('M')
final_df['Gesamt'] = final_df.sum(axis = 1)
final_dict[monthly_name] = pd.concat([final_df, monthly_dict_copy[monthly_name].loc[
pd.Period(max(final_df.index)+1):, : ]])
if combined:
tages = list()
einzel = list()
fahrt_4 = list()
gesamt = list()
final = pd.DataFrame()
for key in final_dict.keys():
tages.append( final_dict[key][final_dict[key].columns[0]])
einzel.append( final_dict[key][final_dict[key].columns[1]])
fahrt_4.append( final_dict[key][final_dict[key].columns[2]])
gesamt.append( final_dict[key][final_dict[key].columns[3]])
final['Tages'] =
|
pd.DataFrame(tages)
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
)
import pandas._testing as tm
class TestDataFrameShift:
@pytest.mark.parametrize(
"input_data, output_data",
[(np.empty(shape=(0,)), []), (np.ones(shape=(2,)), [np.nan, 1.0])],
)
def test_shift_non_writable_array(self, input_data, output_data, frame_or_series):
# GH21049 Verify whether non writable numpy array is shiftable
input_data.setflags(write=False)
result = frame_or_series(input_data).shift(1)
if frame_or_series is not Series:
# need to explicitly specify columns in the empty case
expected = frame_or_series(
output_data,
index=range(len(output_data)),
columns=range(1),
dtype="float64",
)
else:
expected = frame_or_series(output_data, dtype="float64")
tm.assert_equal(result, expected)
def test_shift_mismatched_freq(self, frame_or_series):
ts = frame_or_series(
np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(1, freq="5T")
exp_index = ts.index.shift(1, freq="5T")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4H")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
@pytest.mark.parametrize(
"obj",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, obj, shift_size, frame_or_series):
# GH#22397
if frame_or_series is not Series:
obj = obj.to_frame()
assert obj.shift(shift_size) is not obj
def test_shift_object_non_scalar_fill(self):
# shift requires scalar fill_value except for object dtype
ser = Series(range(3))
with pytest.raises(ValueError, match="fill_value must be a scalar"):
ser.shift(1, fill_value=[])
df = ser.to_frame()
with pytest.raises(ValueError, match="fill_value must be a scalar"):
df.shift(1, fill_value=np.arange(3))
obj_ser = ser.astype(object)
result = obj_ser.shift(1, fill_value={})
assert result[0] == {}
obj_df = obj_ser.to_frame()
result = obj_df.shift(1, fill_value={})
assert result.iloc[0, 0] == {}
def test_shift_int(self, datetime_frame, frame_or_series):
ts = tm.get_obj(datetime_frame, frame_or_series).astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
tm.assert_equal(shifted, expected)
def test_shift_32bit_take(self, frame_or_series):
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
for dtype in ["int32", "int64"]:
arr = np.arange(5, dtype=dtype)
s1 = frame_or_series(arr, index=index)
p = arr[1]
result = s1.shift(periods=p)
expected = frame_or_series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("periods", [1, 2, 3, 4])
def test_shift_preserve_freqstr(self, periods, frame_or_series):
# GH#21275
obj = frame_or_series(
range(periods),
index=date_range("2016-1-1 00:00:00", periods=periods, freq="H"),
)
result = obj.shift(1, "2H")
expected = frame_or_series(
range(periods),
index=date_range("2016-1-1 02:00:00", periods=periods, freq="H"),
)
tm.assert_equal(result, expected)
def test_shift_dst(self, frame_or_series):
# GH#13926
dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
obj = frame_or_series(dates)
res = obj.shift(0)
tm.assert_equal(res, obj)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(1)
exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(-2)
exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
for ex in [10, -10, 20, -20]:
res = obj.shift(ex)
exp = frame_or_series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
def test_shift_by_zero(self, datetime_frame, frame_or_series):
# shift by 0
obj = tm.get_obj(datetime_frame, frame_or_series)
unshifted = obj.shift(0)
tm.assert_equal(unshifted, obj)
def test_shift(self, datetime_frame):
# naive shift
ser = datetime_frame["A"]
shifted = datetime_frame.shift(5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(5)
tm.assert_series_equal(shifted["A"], shifted_ser)
shifted = datetime_frame.shift(-5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(-5)
tm.assert_series_equal(shifted["A"], shifted_ser)
unshifted = datetime_frame.shift(5).shift(-5)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_frame.values[:-5]
)
unshifted_ser = ser.shift(5).shift(-5)
tm.assert_numpy_array_equal(unshifted_ser.dropna().values, ser.values[:-5])
def test_shift_by_offset(self, datetime_frame, frame_or_series):
# shift by DateOffset
obj = tm.get_obj(datetime_frame, frame_or_series)
offset = offsets.BDay()
shifted = obj.shift(5, freq=offset)
assert len(shifted) == len(obj)
unshifted = shifted.shift(-5, freq=offset)
tm.assert_equal(unshifted, obj)
shifted2 = obj.shift(5, freq="B")
tm.assert_equal(shifted, shifted2)
unshifted = obj.shift(0, freq=offset)
tm.assert_equal(unshifted, obj)
d = obj.index[0]
shifted_d = d + offset * 5
if frame_or_series is DataFrame:
tm.assert_series_equal(obj.xs(d), shifted.xs(shifted_d), check_names=False)
else:
tm.assert_almost_equal(obj.at[d], shifted.at[shifted_d])
def test_shift_with_periodindex(self, frame_or_series):
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
if frame_or_series is DataFrame:
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
else:
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_equal(shifted2, shifted3)
tm.assert_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# legacy support
shifted4 = ps.shift(1, freq="B")
tm.assert_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=offsets.BDay())
tm.assert_equal(shifted5, shifted4)
def test_shift_other_axis(self):
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
def test_shift_named_axis(self):
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical1(self, frame_or_series):
# GH#9416
obj = frame_or_series(["a", "b", "c", "d"], dtype="category")
rt = obj.shift(1).shift(-1)
tm.assert_equal(obj.iloc[:-1], rt.dropna())
def get_cat_values(ndframe):
# For Series we could just do ._values; for DataFrame
# we may be able to do this if we ever have 2D Categoricals
return ndframe._mgr.arrays[0]
cat = get_cat_values(obj)
sp1 = obj.shift(1)
tm.assert_index_equal(obj.index, sp1.index)
assert np.all(get_cat_values(sp1).codes[:1] == -1)
assert np.all(cat.codes[:-1] == get_cat_values(sp1).codes[1:])
sn2 = obj.shift(-2)
tm.assert_index_equal(obj.index, sn2.index)
assert np.all(get_cat_values(sn2).codes[-2:] == -1)
assert np.all(cat.codes[2:] == get_cat_values(sn2).codes[:-2])
tm.assert_index_equal(cat.categories, get_cat_values(sp1).categories)
tm.assert_index_equal(cat.categories, get_cat_values(sn2).categories)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
tm.assert_frame_equal(rs, xp)
def test_shift_categorical_fill_value(self, frame_or_series):
ts = frame_or_series(["a", "b", "c", "d"], dtype="category")
res = ts.shift(1, fill_value="a")
expected = frame_or_series(
pd.Categorical(
["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
)
tm.assert_equal(res, expected)
# check for incorrect fill_value
msg = r"Cannot setitem on a Categorical with a new category \(f\)"
with pytest.raises(TypeError, match=msg):
ts.shift(1, fill_value="f")
def test_shift_fill_value(self, frame_or_series):
# GH#24128
dti = date_range("1/1/2000", periods=5, freq="H")
ts = frame_or_series([1.0, 2.0, 3.0, 4.0, 5.0], index=dti)
exp = frame_or_series([0.0, 1.0, 2.0, 3.0, 4.0], index=dti)
# check that fill value works
result = ts.shift(1, fill_value=0.0)
tm.assert_equal(result, exp)
exp = frame_or_series([0.0, 0.0, 1.0, 2.0, 3.0], index=dti)
result = ts.shift(2, fill_value=0.0)
tm.assert_equal(result, exp)
ts = frame_or_series([1, 2, 3])
res = ts.shift(2, fill_value=0)
assert tm.get_dtype(res) == tm.get_dtype(ts)
# retain integer dtype
obj = frame_or_series([1, 2, 3, 4, 5], index=dti)
exp = frame_or_series([0, 1, 2, 3, 4], index=dti)
result = obj.shift(1, fill_value=0)
tm.assert_equal(result, exp)
exp = frame_or_series([0, 0, 1, 2, 3], index=dti)
result = obj.shift(2, fill_value=0)
tm.assert_equal(result, exp)
def test_shift_empty(self):
# Regression test for GH#8019
df = DataFrame({"foo": []})
rs = df.shift(-1)
tm.assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH#9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
# check all answers are the same
tm.assert_frame_equal(shifted[0], shifted[1])
tm.assert_frame_equal(shifted[0], shifted[2])
def test_shift_axis1_multiple_blocks(self, using_array_manager):
# GH#35488
df1 = DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1, df2], axis=1)
if not using_array_manager:
assert len(df3._mgr.blocks) == 2
result = df3.shift(2, axis=1)
expected = df3.take([-1, -1, 0, 1, 2], axis=1)
expected.iloc[:, :2] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
# rebuild df3 because `take` call above consolidated
df3 = pd.concat([df1, df2], axis=1)
if not using_array_manager:
assert len(df3._mgr.blocks) == 2
result = df3.shift(-2, axis=1)
expected = df3.take([2, 3, 4, -1, -1], axis=1)
expected.iloc[:, -2:] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) axis=1 support
def test_shift_axis1_multiple_blocks_with_int_fill(self):
# GH#42719
df1 = DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1)
result = df3.shift(2, axis=1, fill_value=np.int_(0))
assert len(df3._mgr.blocks) == 2
expected = df3.take([-1, -1, 0, 1], axis=1)
expected.iloc[:, :2] = np.int_(0)
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1)
result = df3.shift(-2, axis=1, fill_value=np.int_(0))
assert len(df3._mgr.blocks) == 2
expected = df3.take([2, 3, -1, -1], axis=1)
expected.iloc[:, -2:] = np.int_(0)
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_frame, frame_or_series):
# TODO(2.0): remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_equal(unshifted, ps)
shifted2 = ps.tshift(freq="B")
tm.assert_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=
|
offsets.BDay()
|
pandas.offsets.BDay
|
# ©<NAME>, @brianruizy
# Created: 03-15-2020
import datetime
import platform
import pandas as pd
# Datasets scraped can be found in the following URL's:
# https://github.com/CSSEGISandData/COVID-19
# https://github.com/owid/covid-19-data/tree/master/public/data
# Different styles in zero-padding in date depend on operating systems
if platform.system() == 'Linux':
STRFTIME_DATA_FRAME_FORMAT = '%-m/%-d/%y'
elif platform.system() == 'Windows':
STRFTIME_DATA_FRAME_FORMAT = '%#m/%#d/%y'
else:
STRFTIME_DATA_FRAME_FORMAT = '%-m/%-d/%y'
def daily_report(date_string=None):
# Reports aggegrade data, dating as far back to 01-22-2020
# If passing arg, must use above date formatting '01-22-2020'
report_directory = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/'
if date_string is None:
yesterday = datetime.date.today() - datetime.timedelta(days=2)
file_date = yesterday.strftime('%m-%d-%Y')
else:
file_date = date_string
df = pd.read_csv(report_directory + file_date + '.csv')
return df
def daily_confirmed():
# returns the daily reported cases for respective date,
# segmented globally and by country
df = pd.read_csv('https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/ecdc/new_cases.csv')
return df
def daily_deaths():
# returns the daily reported deaths for respective date,
df = pd.read_csv('https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/ecdc/new_deaths.csv')
return df
def confirmed_report():
# Returns time series version of total cases confirmed globally
df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
return df
def deaths_report():
# Returns time series version of total deaths globally
df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
return df
def recovered_report():
# Return time series version of total recoveries globally
df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')
return df
def realtime_growth(date_string=None, weekly=False, monthly=False):
"""[summary]: consolidates all reports, to create time series of statistics.
Columns excluded with list comp. are: ['Province/State','Country/Region','Lat','Long'].
Args:
date_string: must use following date formatting '4/12/20'.
weekly: bool, returns df for last 8 weks
monthly: bool, returns df for last 3 months
Returns:
[growth_df] -- [growth in series]
"""
df1 = confirmed_report()[confirmed_report().columns[4:]].sum()
df2 = deaths_report()[deaths_report().columns[4:]].sum()
df3 = recovered_report()[recovered_report().columns[4:]].sum()
growth_df = pd.DataFrame([])
growth_df['Confirmed'], growth_df['Deaths'], growth_df['Recovered'] = df1, df2, df3
growth_df.index = growth_df.index.rename('Date')
yesterday =
|
pd.Timestamp('now')
|
pandas.Timestamp
|
from pandas import DataFrame
from .description import description as description__
from .params import (
construct_params as params__,
construct_index_params as index_params__,
)
def construct(
data: dict = description__,
params: dict = params__,
index_params: dict = index_params__,
) -> DataFrame:
return
|
DataFrame.from_dict(data, **params)
|
pandas.DataFrame.from_dict
|
import os
import argparse
import pandas as pd
from datetime import datetime
import random
def process_basic_data(args, partition, episode_dirname='basic'):
df = None
df_path = os.path.join(args.output_path, partition + '_listfile.csv')
input_dir = os.path.join(args.root_path, partition)
patient_dirnames = list(filter(str.isdigit, os.listdir(input_dir)))
try:
output_dir = os.path.join(args.output_path, partition)
episode_outdir = os.path.join(output_dir, episode_dirname)
os.makedirs(episode_outdir)
except FileExistsError:
pass
for patient_index, patient in enumerate(patient_dirnames):
patient_dir = os.path.join(input_dir, patient)
ts_fnames = list(filter(lambda x: x.find("timeseries") != -1,
os.listdir(patient_dir)))
for ts_fname in ts_fnames:
lb_fname = ts_fname.replace("_timeseries", "")
label_df = pd.read_csv(os.path.join(patient_dir, lb_fname))
ts_df = pd.read_csv(os.path.join(patient_dir, ts_fname))
# Quality check ---------------------------
if len(label_df) == 0 or len(ts_df) == 0:
print('Empty label df or ts df', patient, ts_fname)
continue
assert len(label_df == 1)
label_df = label_df.to_dict(orient='records')[0]
los = label_df['Length of Stay'] * 24 # length of stay in hours
if pd.isnull(los):
print("length of stay is missing", patient, ts_fname)
continue
elif label_df["Mortality"] == pd.isnull(label_df["Deathtime"]):
print('Unmatch mortality label and deathtime', patient_dir)
continue
# Copy over time series data --------------
ts_df.to_csv(os.path.join(episode_outdir, patient + "_" + ts_fname))
# Collect time invarient information ------
rel_death_hours = None
if not pd.isnull(label_df["Deathtime"]) and not
|
pd.isnull(label_df["Intime"])
|
pandas.isnull
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as pl
import os
from scipy import stats
from tqdm import tqdm
import mdtraj as md
########################################################
def get_3drobot_native(data_flag):
root_dir = '/home/hyang/bio/erf/data/decoys/3DRobot_set'
pdb_list = pd.read_csv(f'{root_dir}/pdb_no_missing_residue.csv')['pdb'].values
energy_native = []
for pdb_id in pdb_list:
df = pd.read_csv(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_decoy_loss.csv')
energy_native.append(df['loss'].values[0])
energy_native = np.array(energy_native)
print(energy_native, np.mean(energy_native), np.min(energy_native), np.max(energy_native), np.std(energy_native))
def plot_3drobot(data_flag):
root_dir = '/home/hyang/bio/erf/data/decoys/3DRobot_set'
# pdb_list = pd.read_csv('pdb_local_rot.txt')['pdb'].values
# pdb_list = pd.read_csv('pdb_profile_diff.txt')['pdb'].values
# pdb_list = pd.read_csv(f'{root_dir}/pdb_profile_diff_match.txt')['pdb'].values
pdb_list = pd.read_csv(f'{root_dir}/pdb_no_missing_residue.csv')['pdb'].values
# data_flag = 'exp005_v2'
# data_flag = 'exp5'
# data_flag = 'exp6'
# data_flag = 'exp12'
# data_flag = 'exp14'
# data_flag = 'exp17'
# data_flag = 'exp21'
# data_flag = 'exp24'
# data_flag = 'exp29'
# data_flag = 'exp33'
# data_flag = 'exp35'
# data_flag = 'exp50'
# data_flag = 'exp50_relax'
# data_flag = 'exp49'
# data_flag = 'exp49_relax'
# data_flag = 'exp54'
# data_flag = 'exp61'
# data_flag = 'rosetta'
# data_flag = 'rosetta_relax'
# data_flag = 'rosetta_cen'
# if not os.path.exists(f'{root_dir}/fig_3drobot_{data_flag}'):
# os.system(f'mkdir -p {root_dir}/fig_3drobot_{data_flag}')
correct = 0
rank = []
for pdb_id in pdb_list:
df = pd.read_csv(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_decoy_loss.csv')
decoy_name = df['NAME'].values
assert(decoy_name[0] == 'native.pdb')
ind = (df['loss'] != 999)
loss = df['loss'][ind].values
rmsd = df['RMSD'][ind].values
if np.argmin(loss) == 0:
correct += 1
num = np.arange(loss.shape[0]) + 1
rank_i = num[np.argsort(loss) == 0][0]
rank.append(rank_i)
if rank_i > 1:
print(pdb_id, rmsd[np.argmin(loss)])
fig = pl.figure()
pl.plot(rmsd, loss, 'bo')
pl.plot([rmsd[0]], [loss[0]], 'rs', markersize=12)
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
# pl.savefig(f'{root_dir}/fig_3drobot_{data_flag}/{pdb_id}_score.pdf')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_score.pdf')
pl.close(fig)
rank = np.array(rank)
print(rank)
fig = pl.figure()
pl.hist(rank, bins=np.arange(21)+0.5)
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/rank.pdf')
pl.close(fig)
########################################################
def plot_casp11_loss():
# pdb_list = pd.read_csv('pdb_list_new.txt')['pdb'].values
pdb_list = pd.read_csv('pdb_no_need_copy_native.txt')['pdb'].values
flist = pd.read_csv('list_casp11.txt')['fname'].values
casp_dict = {x.split('#')[1][:5]: x.split('_')[0] for x in flist}
df_tm = pd.read_csv('casp11_decoy.csv')
tm_score_dict = {x: y for x, y in zip(df_tm['Target'], df_tm['Decoys'])}
# data_flag = 'exp3_v2'
# data_flag = 'exp5'
# data_flag = 'exp7'
# data_flag = 'exp13'
# data_flag = 'exp15'
# data_flag = 'exp21'
# data_flag = 'exp24'
# data_flag = 'exp29'
# data_flag = 'exp33'
# data_flag = 'exp35'
data_flag = 'exp61'
if not os.path.exists(f'fig_casp11_{data_flag}'):
os.system(f'mkdir fig_casp11_{data_flag}')
correct = 0
rank = []
tm_score = []
for pdb_id in pdb_list:
data_path = f'data_casp11_{data_flag}/{pdb_id}_decoy_loss.csv'
if not os.path.exists(data_path):
continue
df = pd.read_csv(data_path)
decoy_name = df['NAME'].values
# ind = (df['loss'] != 999)
# loss = df['loss'][ind].values
tm_score.append(tm_score_dict[pdb_id])
loss = df['loss'].values
num = np.arange(loss.shape[0])
i = (decoy_name == f'{pdb_id}.native.pdb')
if num[i] == np.argmin(loss):
# print(num.shape[0] - num[i])
correct += 1
rank.append(num[np.argsort(loss) == num[i]][0] + 1)
fig = pl.figure()
pl.plot(num, loss, 'bo')
i = (decoy_name == f'{pdb_id}.Zhang-Server_model1.pdb')
pl.plot([num[i]], [loss[i]], 'g^', markersize=12, label='zhang')
i = (decoy_name == f'{pdb_id}.QUARK_model1.pdb')
pl.plot([num[i]], [loss[i]], 'c*', markersize=12, label='quark')
i = (decoy_name == f'{pdb_id}.native.pdb')
pl.plot([num[i]], [loss[i]], 'rs', markersize=12, label='native')
pdb_id = casp_dict[pdb_id]
pl.title(f'{pdb_id}')
pl.xlabel('num')
pl.ylabel('energy score')
pl.savefig(f'fig_casp11_{data_flag}/{pdb_id}_score.pdf')
pl.close(fig)
rank = np.array(rank)
tm_score = np.array(tm_score)
pl.figure()
pl.hist(rank, bins=np.arange(21)+0.5)
# pl.figure()
# pl.plot(tm_score, rank, 'bo')
a = (rank <= 5)
b = (rank > 5)
pl.figure()
pl.hist(tm_score[a], bins=np.arange(9)*0.1+0.2, label='rank=1 or 2', histtype='stepfilled')
pl.hist(tm_score[b], bins=np.arange(9)*0.1+0.2, label='rank>10', histtype='step')
pl.xlabel('Best TM-score in decoys')
pl.ylabel('Num')
pl.legend(loc=2)
########################################################
def plot_casp11(data_flag):
# plot RMSD vs. loss for CASP11
root_dir = '/home/hyang/bio/erf/data/decoys/casp11'
pdb_list = pd.read_csv(f'{root_dir}/casp11_rmsd/casp11_rmsd.txt')['pdb']
flist = pd.read_csv(f'{root_dir}/list_casp11.txt')['fname'].values
casp_dict = {x.split('#')[1][:5]: x.split('_')[0] for x in flist}
# data_flag = 'exp3_v2'
# data_flag = 'exp5'
# data_flag = 'exp7'
# data_flag = 'exp13'
# data_flag = 'exp21'
# data_flag = 'exp24'
# data_flag = 'exp29'
# data_flag = 'exp33'
# data_flag = 'exp35'
# data_flag = 'exp61'
for pdb_id in pdb_list:
data_path = f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_decoy_loss.csv'
if not os.path.exists(data_path):
continue
df = pd.read_csv(data_path)
decoy_name = df['NAME'].values
# ind = (df['loss'] != 999)
# loss = df['loss'][ind].values
loss = df['loss'].values
df2 =
|
pd.read_csv(f'{root_dir}/casp11_rmsd/{pdb_id}_rmsd.csv')
|
pandas.read_csv
|
"""
Tests for helper functions in the cython tslibs.offsets
"""
from datetime import datetime
import pytest
from pandas._libs.tslibs.ccalendar import get_firstbday, get_lastbday
import pandas._libs.tslibs.offsets as liboffsets
from pandas._libs.tslibs.offsets import roll_qtrday
from pandas import Timestamp
@pytest.fixture(params=["start", "end", "business_start", "business_end"])
def day_opt(request):
return request.param
@pytest.mark.parametrize(
"dt,exp_week_day,exp_last_day",
[
(datetime(2017, 11, 30), 3, 30), # Business day.
(datetime(1993, 10, 31), 6, 29), # Non-business day.
],
)
def test_get_last_bday(dt, exp_week_day, exp_last_day):
assert dt.weekday() == exp_week_day
assert get_lastbday(dt.year, dt.month) == exp_last_day
@pytest.mark.parametrize(
"dt,exp_week_day,exp_first_day",
[
(datetime(2017, 4, 1), 5, 3), # Non-weekday.
(datetime(1993, 10, 1), 4, 1), # Business day.
],
)
def test_get_first_bday(dt, exp_week_day, exp_first_day):
assert dt.weekday() == exp_week_day
assert get_firstbday(dt.year, dt.month) == exp_first_day
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(0, 15, datetime(2017, 11, 15)),
(0, None, datetime(2017, 11, 30)),
(1, "start", datetime(2017, 12, 1)),
(-145, "end", datetime(2005, 10, 31)),
(0, "business_end", datetime(2017, 11, 30)),
(0, "business_start", datetime(2017, 11, 1)),
],
)
def test_shift_month_dt(months, day_opt, expected):
dt = datetime(2017, 11, 30)
assert liboffsets.shift_month(dt, months, day_opt=day_opt) == expected
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(1, "start", Timestamp("1929-06-01")),
(-3, "end", Timestamp("1929-02-28")),
(25, None,
|
Timestamp("1931-06-5")
|
pandas.Timestamp
|
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
|
tm.assert_series_equal(s - s2, -exp)
|
pandas.util.testing.assert_series_equal
|
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with
|
tm.assertRaisesRegexp(TypeError, msg)
|
pandas.util.testing.assertRaisesRegexp
|
"""Unit tests for Caravel"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
import doctest
import imp
import os
import unittest
from mock import Mock, patch
from flask import escape
from flask_appbuilder.security.sqla import models as ab_models
import caravel
from caravel import app, db, models, utils, appbuilder
from caravel.models import DruidCluster
os.environ['CARAVEL_CONFIG'] = 'tests.caravel_test_config'
app.config['TESTING'] = True
app.config['CSRF_ENABLED'] = False
app.config['SECRET_KEY'] = 'thisismyscretkey'
app.config['WTF_CSRF_ENABLED'] = False
app.config['PUBLIC_ROLE_LIKE_GAMMA'] = True
BASE_DIR = app.config.get("BASE_DIR")
cli = imp.load_source('cli', BASE_DIR + "/bin/caravel")
class CaravelTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(CaravelTestCase, self).__init__(*args, **kwargs)
self.client = app.test_client()
utils.init(caravel)
admin = appbuilder.sm.find_user('admin')
if not admin:
appbuilder.sm.add_user(
'admin', 'admin',' user', '<EMAIL>',
appbuilder.sm.find_role('Admin'),
password='<PASSWORD>')
gamma = appbuilder.sm.find_user('gamma')
if not gamma:
appbuilder.sm.add_user(
'gamma', 'gamma', 'user', '<EMAIL>',
appbuilder.sm.find_role('Gamma'),
password='<PASSWORD>')
utils.init(caravel)
def login_admin(self):
resp = self.client.post(
'/login/',
data=dict(username='admin', password='<PASSWORD>'),
follow_redirects=True)
assert 'Welcome' in resp.data.decode('utf-8')
def login_gamma(self):
resp = self.client.post(
'/login/',
data=dict(username='gamma', password='<PASSWORD>'),
follow_redirects=True)
assert 'Welcome' in resp.data.decode('utf-8')
def setup_public_access_for_dashboard(self, dashboard_name):
public_role = appbuilder.sm.find_role('Public')
perms = db.session.query(ab_models.PermissionView).all()
for perm in perms:
if (perm.permission.name == 'datasource_access' and
perm.view_menu and dashboard_name in perm.view_menu.name):
appbuilder.sm.add_permission_role(public_role, perm)
class CoreTests(CaravelTestCase):
def __init__(self, *args, **kwargs):
# Load examples first, so that we setup proper permission-view relations
# for all example data sources.
self.load_examples()
super(CoreTests, self).__init__(*args, **kwargs)
self.table_ids = {tbl.table_name: tbl.id for tbl in (
db.session
.query(models.SqlaTable)
.all()
)}
def setUp(self):
pass
def tearDown(self):
pass
def load_examples(self):
cli.load_examples(load_test_data=True)
def test_save_slice(self):
self.login_admin()
slice_id = (
db.session.query(models.Slice.id)
.filter_by(slice_name="Energy Sankey")
.scalar())
copy_name = "Test Sankey Save"
tbl_id = self.table_ids.get('energy_usage')
url = "/caravel/explore/table/{}/?viz_type=sankey&groupby=source&groupby=target&metric=sum__value&row_limit=5000&where=&having=&flt_col_0=source&flt_op_0=in&flt_eq_0=&slice_id={}&slice_name={}&collapsed_fieldsets=&action={}&datasource_name=energy_usage&datasource_id=1&datasource_type=table&previous_viz_type=sankey"
db.session.commit()
resp = self.client.get(
url.format(tbl_id, slice_id, copy_name, 'save'),
follow_redirects=True)
assert copy_name in resp.data.decode('utf-8')
resp = self.client.get(
url.format(tbl_id, slice_id, copy_name, 'overwrite'),
follow_redirects=True)
assert 'Energy' in resp.data.decode('utf-8')
def test_slices(self):
# Testing by running all the examples
self.login_admin()
Slc = models.Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [
(slc.slice_name, slc.slice_url),
(slc.slice_name, slc.viz.json_endpoint),
(slc.slice_name, slc.viz.csv_endpoint),
]
for name, url in urls:
print("Slice: " + name)
self.client.get(url)
def test_dashboard(self):
self.login_admin()
urls = {}
for dash in db.session.query(models.Dashboard).all():
urls[dash.dashboard_title] = dash.url
for title, url in urls.items():
assert escape(title) in self.client.get(url).data.decode('utf-8')
def test_doctests(self):
modules = [utils]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_misc(self):
assert self.client.get('/health').data.decode('utf-8') == "OK"
assert self.client.get('/ping').data.decode('utf-8') == "OK"
def test_shortner(self):
self.login_admin()
data = "//caravel/explore/table/1/?viz_type=sankey&groupby=source&groupby=target&metric=sum__value&row_limit=5000&where=&having=&flt_col_0=source&flt_op_0=in&flt_eq_0=&slice_id=78&slice_name=Energy+Sankey&collapsed_fieldsets=&action=&datasource_name=energy_usage&datasource_id=1&datasource_type=table&previous_viz_type=sankey"
resp = self.client.post('/r/shortner/', data=data)
assert '/r/' in resp.data.decode('utf-8')
def test_save_dash(self):
self.login_admin()
dash = db.session.query(models.Dashboard).filter_by(slug="births").first()
data = """{"positions":[{"slice_id":"131","col":8,"row":8,"size_x":2,"size_y":4},{"slice_id":"132","col":10,"row":8,"size_x":2,"size_y":4},{"slice_id":"133","col":1,"row":1,"size_x":2,"size_y":2},{"slice_id":"134","col":3,"row":1,"size_x":2,"size_y":2},{"slice_id":"135","col":5,"row":4,"size_x":3,"size_y":3},{"slice_id":"136","col":1,"row":7,"size_x":7,"size_y":4},{"slice_id":"137","col":9,"row":1,"size_x":3,"size_y":3},{"slice_id":"138","col":5,"row":1,"size_x":4,"size_y":3},{"slice_id":"139","col":1,"row":3,"size_x":4,"size_y":4},{"slice_id":"140","col":8,"row":4,"size_x":4,"size_y":4}],"css":"None","expanded_slices":{}}"""
url = '/caravel/save_dash/{}/'.format(dash.id)
resp = self.client.post(url, data=dict(data=data))
assert "SUCCESS" in resp.data.decode('utf-8')
def test_gamma(self):
self.login_gamma()
resp = self.client.get('/slicemodelview/list/')
print(resp.data.decode('utf-8'))
assert "List Slice" in resp.data.decode('utf-8')
resp = self.client.get('/dashboardmodelview/list/')
assert "List Dashboard" in resp.data.decode('utf-8')
def test_public_user_dashboard_access(self):
# Try access before adding appropriate permissions.
resp = self.client.get('/slicemodelview/list/')
data = resp.data.decode('utf-8')
assert '<a href="/tablemodelview/edit/3">birth_names</a>' not in data
resp = self.client.get('/dashboardmodelview/list/')
data = resp.data.decode('utf-8')
assert '<a href="/caravel/dashboard/births/">' not in data
resp = self.client.get('/caravel/explore/table/3/', follow_redirects=True)
data = resp.data.decode('utf-8')
assert "You don't seem to have access to this datasource" in data
self.setup_public_access_for_dashboard('birth_names')
# Try access after adding appropriate permissions.
resp = self.client.get('/slicemodelview/list/')
data = resp.data.decode('utf-8')
assert '<a href="/tablemodelview/edit/3">birth_names</a>' in data
resp = self.client.get('/dashboardmodelview/list/')
data = resp.data.decode('utf-8')
assert '<a href="/caravel/dashboard/births/">' in data
resp = self.client.get('/caravel/dashboard/births/')
data = resp.data.decode('utf-8')
assert '[dashboard] Births' in data
resp = self.client.get('/caravel/explore/table/3/')
data = resp.data.decode('utf-8')
assert '[explore] birth_names' in data
# Confirm that public doesn't have access to other datasets.
resp = self.client.get('/slicemodelview/list/')
data = resp.data.decode('utf-8')
assert '<a href="/tablemodelview/edit/2">wb_health_population</a>' not in data
resp = self.client.get('/dashboardmodelview/list/')
data = resp.data.decode('utf-8')
assert '<a href="/caravel/dashboard/world_health/">' not in data
resp = self.client.get('/caravel/explore/table/2/', follow_redirects=True)
data = resp.data.decode('utf-8')
assert "You don't seem to have access to this datasource" in data
SEGMENT_METADATA = [{
"id": "some_id",
"intervals": [ "2013-05-13T00:00:00.000Z/2013-05-14T00:00:00.000Z" ],
"columns": {
"__time": {
"type": "LONG", "hasMultipleValues": False,
"size": 407240380, "cardinality": None, "errorMessage": None },
"dim1": {
"type": "STRING", "hasMultipleValues": False,
"size": 100000, "cardinality": 1944, "errorMessage": None },
"dim2": {
"type": "STRING", "hasMultipleValues": True,
"size": 100000, "cardinality": 1504, "errorMessage": None },
"metric1": {
"type": "FLOAT", "hasMultipleValues": False,
"size": 100000, "cardinality": None, "errorMessage": None }
},
"aggregators": {
"metric1": {
"type": "longSum",
"name": "metric1",
"fieldName": "metric1" }
},
"size": 300000,
"numRows": 5000000
}]
GB_RESULT_SET = [
{
"version": "v1",
"timestamp": "2012-01-01T00:00:00.000Z",
"event": {
"name": 'Canada',
"sum__num": 12345678,
}
},
{
"version": "v1",
"timestamp": "2012-01-01T00:00:00.000Z",
"event": {
"name": 'USA',
"sum__num": 12345678 / 2,
}
},
]
class DruidTests(CaravelTestCase):
"""Testing interactions with Druid"""
def __init__(self, *args, **kwargs):
super(DruidTests, self).__init__(*args, **kwargs)
@patch('caravel.models.PyDruid')
def test_client(self, PyDruid):
self.login_admin()
instance = PyDruid.return_value
instance.time_boundary.return_value = [
{'result': {'maxTime': '2016-01-01'}}]
instance.segment_metadata.return_value = SEGMENT_METADATA
cluster = (
db.session
.query(DruidCluster)
.filter_by(cluster_name='test_cluster')
.first()
)
if cluster:
db.session.delete(cluster)
db.session.commit()
cluster = DruidCluster(
cluster_name='test_cluster',
coordinator_host='localhost',
coordinator_port=7979,
broker_host='localhost',
broker_port=7980,
metadata_last_refreshed=datetime.now())
db.session.add(cluster)
cluster.get_datasources = Mock(return_value=['test_datasource'])
cluster.refresh_datasources()
datasource_id = cluster.datasources[0].id
db.session.commit()
resp = self.client.get('/caravel/explore/druid/{}/'.format(datasource_id))
assert "[test_cluster].[test_datasource]" in resp.data.decode('utf-8')
nres = [
list(v['event'].items()) + [('timestamp', v['timestamp'])]
for v in GB_RESULT_SET]
nres = [dict(v) for v in nres]
import pandas as pd
df =
|
pd.DataFrame(nres)
|
pandas.DataFrame
|
from enum import Enum
from typing import List
import numpy as np
import pandas as pd
class AggregationMode(str, Enum):
"""Enum for different aggregation modes."""
mean = "mean"
max = "max"
min = "min"
median = "median"
AGGREGATION_FN = {
AggregationMode.mean: np.mean,
AggregationMode.max: np.max,
AggregationMode.min: np.min,
AggregationMode.median: np.median,
}
def mrmr(
relevance_table: pd.DataFrame,
regressors: pd.DataFrame,
top_k: int,
relevance_aggregation_mode: str = AggregationMode.mean,
redundancy_aggregation_mode: str = AggregationMode.mean,
atol: float = 1e-10,
) -> List[str]:
"""
Maximum Relevance and Minimum Redundancy feature selection method.
Here relevance for each regressor is calculated as the per-segment aggregation of the relevance
values in relevance_table. The redundancy term for the regressor is calculated as a mean absolute correlation
between this regressor and other ones. The correlation between the two regressors is an aggregated pairwise
correlation for the regressors values in each segment.
Parameters
----------
relevance_table:
dataframe of shape n_segment x n_exog_series with relevance table, where ``relevance_table[i][j]``
contains relevance of j-th ``df_exog`` series to i-th df series
regressors:
dataframe with regressors in etna format
top_k:
num of regressors to select; if there are not enough regressors, then all will be selected
relevance_aggregation_mode:
the method for relevance values per-segment aggregation
redundancy_aggregation_mode:
the method for redundancy values per-segment aggregation
atol:
the absolute tolerance to compare the float values
Returns
-------
selected_features: List[str]
list of ``top_k`` selected regressors, sorted by their importance
"""
relevance_aggregation_fn = AGGREGATION_FN[AggregationMode(relevance_aggregation_mode)]
redundancy_aggregation_fn = AGGREGATION_FN[AggregationMode(redundancy_aggregation_mode)]
relevance = relevance_table.apply(relevance_aggregation_fn).fillna(0)
all_features = relevance.index.to_list()
selected_features: List[str] = []
not_selected_features = all_features.copy()
redundancy_table =
|
pd.DataFrame(np.inf, index=all_features, columns=all_features)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from timeflux.core.node import Node
class ConcatResult(Node):
""" Concat p1 and p2 score and add the diff
[{score_p1: (float), score_p2: (float), diff_p1_p2: (float)]
Attributes:
i_p1 (port): score player_1, expects Dataframe
i_p2 (port): score player_2, expects Dataframe
o (port): Dataframe
"""
def __init__(self):
pass
def update(self):
if not (self.i_p1.ready() & self.i_p2.ready()):
return
p1 = pd.DataFrame(self.i_p1.data).rename(columns={"score": "score_p1"})
p2 = pd.DataFrame(self.i_p2.data).rename(columns={"score": "score_p2"})
diff = p1['score_p1'].values[0] - p2['score_p2'].values[0]
result = pd.DataFrame([{'diff_p1_p2': diff}])
frames = [p1, p2, result]
self.o.data =
|
pd.concat(frames, axis=1)
|
pandas.concat
|
'''
This script is to implement Wass gan, same as feedback GAN paper
To start with the simplist situation, let's consider generating immunogenic
epitope for HLA-A0201, there are 2046 positive instance collected, we use them as
real image. see if it will work. If work, we can scale up to some more challenging task.
Stay tuned!
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
# build the model
class ResBlock(nn.Module):
def __init__(self,hidden): # hidden means the number of filters
super(ResBlock,self).__init__()
self.res_block = nn.Sequential(
nn.ReLU(True), # in_place = True
nn.Conv1d(hidden,hidden,kernel_size=3,padding=1),
nn.ReLU(True),
nn.Conv1d(hidden,hidden,kernel_size=3,padding=1),
)
def forward(self,input): # input [N, hidden, seq_len]
output = self.res_block(input)
return input + 0.3*output # [N, hidden, seq_len] doesn't change anything
class Generator(nn.Module):
def __init__(self,hidden,seq_len,n_chars,batch_size):
super(Generator,self).__init__()
self.fc1 = nn.Linear(128,hidden*seq_len)
self.block = nn.Sequential(
ResBlock(hidden),
ResBlock(hidden),
ResBlock(hidden),
ResBlock(hidden),
ResBlock(hidden),
)
self.conv1 = nn.Conv1d(hidden,n_chars,kernel_size=1)
self.hidden = hidden
self.seq_len = seq_len
self.n_chars = n_chars
self.batch_size = batch_size
def forward(self,noise): # noise [batch,128]
output = self.fc1(noise) # [batch,hidden*seq_len]
output = output.view(-1,self.hidden,self.seq_len) # [batch,hidden,seq_len]
output = self.block(output) # [batch,hidden,seq_len]
output = self.conv1(output) # [batch,n_chars,seq_len]
'''
In order to understand the following step, you have to understand how torch.view actually work, it basically
alloacte all entry into the resultant tensor of shape you specified. line by line, then layer by layer.
Also, contiguous is to make sure the memory is contiguous after transpose, make sure it will be the same as
being created form stracth
'''
output = output.transpose(1,2) # [batch,seq_len,n_chars]
output = output.contiguous()
output = output.view(self.batch_size*self.seq_len,self.n_chars)
output = F.gumbel_softmax(output,tau=0.75,hard=False) # github code tau=0.5, paper tau=0.75 [batch*seq_len,n_chars]
output = output.view(self.batch_size,self.seq_len,self.n_chars) # [batch,seq_len,n_chars]
return output
class Discriminator(nn.Module):
def __init__(self,hidden,n_chars,seq_len):
super(Discriminator,self).__init__()
self.block = nn.Sequential(
ResBlock(hidden),
ResBlock(hidden),
ResBlock(hidden),
ResBlock(hidden),
ResBlock(hidden),
)
self.conv1 = nn.Conv1d(n_chars,hidden,1)
self.fc = nn.Linear(seq_len*hidden,1)
self.hidden = hidden
self.n_chars = n_chars
self.seq_len = seq_len
def forward(self,input): # input [N,seq_len,n_chars]
output = input.transpose(1,2) # input [N, n_chars, seq_len]
output = output.contiguous()
output = self.conv1(output) # [N,hidden,seq_len]
output = self.block(output) # [N, hidden, seq_len]
output = output.view(-1,self.seq_len*self.hidden) # [N, hidden*seq_len]
output = self.fc(output) # [N,1]
return output
# define dataset
class real_dataset_class(torch.utils.data.Dataset):
def __init__(self,raw,seq_len,n_chars): # raw is a ndarray ['ARRRR','NNNNN']
self.raw = raw
self.seq_len = seq_len
self.n_chars = n_chars
self.post = self.process()
def process(self):
result = torch.empty(len(self.raw),self.seq_len,self.n_chars) # [N,seq_len,n_chars]
amino = 'ARNDCQEGHILKMFPSTWYV-'
identity = torch.eye(n_chars)
for i in range(len(self.raw)):
pep = self.raw[i]
if len(pep) == 9:
pep = pep[0:4] + '-' + pep[4:]
inner = torch.empty(len(pep),self.n_chars)
for p in range(len(pep)):
inner[p] = identity[amino.index(pep[p].upper()), :]
encode = torch.tensor(inner) # [seq_len,n_chars]
result[i] = encode
return result
def __getitem__(self,index):
return self.post[index]
def __len__(self):
return self.post.shape[0]
# auxiliary function during training GAN
def sample_generator(batch_size):
noise = torch.randn(batch_size,128).to(device) # [N, 128]
generated_data = G(noise) # [N, seq_len, n_chars]
return generated_data
def calculate_gradient_penalty(real_data,fake_data,lambda_=10):
alpha = torch.rand(batch_size,1,1).to(device)
alpha = alpha.expand_as(real_data) # [N,seq_len,n_chars]
interpolates = alpha * real_data + (1-alpha) * fake_data # [N,seq_len,n_chars]
interpolates = torch.autograd.Variable(interpolates,requires_grad=True)
disc_interpolates = D(interpolates)
# below, grad function will return a tuple with length one, so only take [0], it will be a tensor of shape inputs, gradient wrt each input
gradients = torch.autograd.grad(outputs=disc_interpolates,inputs=interpolates,grad_outputs=torch.ones(disc_interpolates.size()),create_graph=True,retain_graph=True)[0]
gradients = gradients.contiguous().view(batch_size,-1) # [N, seq_len*n_chars]
gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12) # [N,]
gradient_penalty = lambda_* ((gradients_norm - 1) ** 2).mean() # []
return gradient_penalty
def discriminator_train(real_data):
D_optimizer.zero_grad()
fake_data = sample_generator(batch_size) # generate a mini-batch of fake data
d_fake_pred = D(fake_data) # what's the prediction you get via discriminator
d_fake_error = d_fake_pred.mean() # compute mean, return a scalar value
d_real_pred = D(real_data) # what's the prediction you get for real data via discriminator
d_real_error = d_real_pred.mean() # compute mean
gradient_penalty = calculate_gradient_penalty(real_data,fake_data) # calculate gradient penalty
d_error_total = d_fake_error - d_real_error + gradient_penalty # [] # total error, you want to minimize this, so you hope fake image be more real
w_dist = d_real_error - d_fake_error
d_error_total.backward()
D_optimizer.step()
return d_fake_error,d_real_error,gradient_penalty, d_error_total, w_dist
def generator_train():
G_optimizer.zero_grad()
g_fake_data = sample_generator(batch_size)
dg_fake_pred = D(g_fake_data)
g_error_total = -torch.mean(dg_fake_pred)
g_error_total.backward()
G_optimizer.step()
return g_error_total
# processing function from previous code
def peptide_data_aaindex(peptide,after_pca): # return numpy array [10,12,1]
length = len(peptide)
if length == 10:
encode = aaindex(peptide,after_pca)
elif length == 9:
peptide = peptide[:5] + '-' + peptide[5:]
encode = aaindex(peptide,after_pca)
encode = encode.reshape(encode.shape[0], encode.shape[1], -1)
return encode
def dict_inventory(inventory):
dicA, dicB, dicC = {}, {}, {}
dic = {'A': dicA, 'B': dicB, 'C': dicC}
for hla in inventory:
type_ = hla[4] # A,B,C
first2 = hla[6:8] # 01
last2 = hla[8:] # 01
try:
dic[type_][first2].append(last2)
except KeyError:
dic[type_][first2] = []
dic[type_][first2].append(last2)
return dic
def rescue_unknown_hla(hla, dic_inventory):
type_ = hla[4]
first2 = hla[6:8]
last2 = hla[8:]
big_category = dic_inventory[type_]
#print(hla)
if not big_category.get(first2) == None:
small_category = big_category.get(first2)
distance = [abs(int(last2) - int(i)) for i in small_category]
optimal = min(zip(small_category, distance), key=lambda x: x[1])[0]
return 'HLA-' + str(type_) + '*' + str(first2) + str(optimal)
else:
small_category = list(big_category.keys())
distance = [abs(int(first2) - int(i)) for i in small_category]
optimal = min(zip(small_category, distance), key=lambda x: x[1])[0]
return 'HLA-' + str(type_) + '*' + str(optimal) + str(big_category[optimal][0])
def hla_data_aaindex(hla_dic,hla_type,after_pca): # return numpy array [34,12,1]
try:
seq = hla_dic[hla_type]
except KeyError:
hla_type = rescue_unknown_hla(hla_type,dic_inventory)
seq = hla_dic[hla_type]
encode = aaindex(seq,after_pca)
encode = encode.reshape(encode.shape[0], encode.shape[1], -1)
return encode
def construct_aaindex(ori,hla_dic,after_pca):
series = []
for i in range(ori.shape[0]):
peptide = ori['peptide'].iloc[i]
hla_type = ori['HLA'].iloc[i]
immuno = np.array(ori['immunogenicity'].iloc[i]).reshape(1,-1) # [1,1]
'''
If 'classfication': ['immunogenicity']
If 'regression': ['potential']
'''
encode_pep = peptide_data_aaindex(peptide,after_pca) # [10,12]
encode_hla = hla_data_aaindex(hla_dic,hla_type,after_pca) # [46,12]
series.append((encode_pep, encode_hla, immuno))
return series
def hla_df_to_dic(hla):
dic = {}
for i in range(hla.shape[0]):
col1 = hla['HLA'].iloc[i] # HLA allele
col2 = hla['pseudo'].iloc[i] # pseudo sequence
dic[col1] = col2
return dic
def aaindex(peptide,after_pca):
amino = 'ARNDCQEGHILKMFPSTWYV-'
matrix = np.transpose(after_pca) # [12,21]
encoded = np.empty([len(peptide), 12]) # (seq_len,12)
for i in range(len(peptide)):
query = peptide[i]
if query == 'X': query = '-'
query = query.upper()
encoded[i, :] = matrix[:, amino.index(query)]
return encoded
# post utils functions
def inverse_transform(hard): # [N,seq_len]
amino = 'ARNDCQEGHILKMFPSTWYV-'
result = []
for row in hard:
temp = ''
for col in row:
aa = amino[col]
temp += aa
result.append(temp)
return result
if __name__ == '__main__':
batch_size = 64
lr = 0.0001
num_epochs = 100
seq_len = 10
hidden = 128
n_chars = 21
d_steps = 10
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
data = pd.read_csv('wassGAN/gan_a0201.csv')
raw = data['peptide'].values
real_dataset = real_dataset_class(raw,seq_len,n_chars)
# training
G = Generator(hidden,seq_len,n_chars,batch_size).to(device)
D = Discriminator(hidden,n_chars,seq_len).to(device)
G_optimizer = torch.optim.Adam(G.parameters(),lr=lr,betas=(0.5,0.9)) # usually should be (0.9,0.999), (momentum,RMSprop)
D_optimizer = torch.optim.Adam(D.parameters(),lr=lr,betas=(0.5,0.9))
counter = 0
c_epoch = 0
array1,array2,array3,array4,array5 = [],[],[],[],[]
for epoch in range(num_epochs):
'''
The way I understand this trianing process is:
you first trian the discriminator to minimize the discrepancy between fake and real data, parameters in generator stay constant.
Then you train the generator, it will adapt to generate more real image.
It seems like the purpose is just to generate, not discriminate
'''
d_fake_losses, d_real_losses, grad_penalties = [],[],[]
G_losses, D_losses, W_dist = [],[],[]
real_dataloader = torch.utils.data.DataLoader(real_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
for mini_batch in real_dataloader:
d_fake_err,d_real_err,gradient_penalty,d_error_total,w_dist = discriminator_train(mini_batch)
grad_penalties.append(gradient_penalty.detach().cpu().numpy())
d_real_losses.append(d_real_err.detach().cpu().numpy())
d_fake_losses.append(d_fake_err.detach().cpu().numpy())
D_losses.append(d_error_total.detach().cpu().numpy())
W_dist.append(w_dist.detach().cpu().numpy())
if counter % d_steps == 0:
g_err = generator_train()
G_losses.append(g_err.detach().cpu().numpy())
counter += 1
summary_string = 'Epoch{0}/{1}: d_real_loss-{2:.2f},d_fake_loss-{3:.2f},d_total_loss-{4:.2f},G_total_loss-{5:.2f},W_dist-{6:.2f}'\
.format(epoch+1,num_epochs,np.mean(d_real_losses),np.mean(d_fake_losses),np.mean(D_losses),np.mean(G_losses),np.mean(W_dist))
print(summary_string)
array1.append(np.mean(d_real_losses))
array2.append(np.mean(d_fake_losses))
array3.append(np.mean(D_losses))
array4.append(np.mean(G_losses))
array5.append(np.mean(W_dist))
if epoch % 20 == 19:
total = []
for i in range(16):
generation = sample_generator(64).detach().cpu().numpy() # [N,seq_len,n_chars]
hard = np.argmax(generation, axis=2) # [N,seq_len]
pseudo = inverse_transform(hard)
df = pd.DataFrame({'peptide': pseudo, 'HLA': ['HLA-A*0201' for i in range(len(pseudo))],
'immunogenicity': [1 for i in range(len(pseudo))]})
total.append(df)
df_all = pd.concat(total)
df_all.to_csv('/Users/ligk2e/Desktop/df_all_epoch.csv', index=None)
c_epoch += 1
# visulization, draw history/log
ax0 = plt.subplot(5,1,1)
ax0.plot(np.arange(num_epochs),array1)
ax0.set_ylabel('d_real_losses')
ax1 = plt.subplot(5,1,2)
ax1.plot(np.arange(num_epochs),array2)
ax1.set_ylabel('d_fake_losses')
ax2 = plt.subplot(5,1,3)
ax2.plot(np.arange(num_epochs),array3)
ax2.set_ylabel('D_losses')
ax3 = plt.subplot(5,1,4)
ax3.plot(np.arange(num_epochs),array4)
ax3.set_ylabel('G_losses')
ax4 = plt.subplot(5,1,5)
ax4.plot(np.arange(num_epochs),array5)
ax4.set_ylabel('W_dist')
# visualization, t-sne
from sklearn.manifold import TSNE
after_pca = np.loadtxt('../immuno/immuno2/data/after_pca.txt')
hla = pd.read_csv('../immuno/immuno2/data/hla2paratopeTable_aligned.txt', sep='\t')
hla_dic = hla_df_to_dic(hla)
inventory = list(hla_dic.keys())
dic_inventory = dict_inventory(inventory)
dataset = construct_aaindex(data, hla_dic, after_pca)
X = np.empty((len(dataset), 12 * 10))
for i, (x, y, _) in enumerate(dataset):
x = x.reshape(-1)
X[i, :] = x
X_embedded = TSNE(n_components=2).fit_transform(X)
fig,ax = plt.subplots()
ax.scatter(X_embedded[:,0],X_embedded[:,1],color='b')
total = []
for i in range(16):
generation = sample_generator(64).detach().cpu().numpy() # [N,seq_len,n_chars]
hard = np.argmax(generation,axis=2) # [N,seq_len]
pseudo = inverse_transform(hard)
df = pd.DataFrame({'peptide':pseudo,'HLA':['HLA-A*0201' for i in range(len(pseudo))],'immunogenicity':[1 for i in range(len(pseudo))]})
test_dataset = construct_aaindex(df, hla_dic, after_pca)
test_X = np.empty((len(test_dataset), 12 * 10))
for i, (x, y, _) in enumerate(test_dataset):
x = x.reshape(-1)
test_X[i, :] = x
total.append(test_X)
test_X = np.concatenate(total,axis=0)
coalesed = np.concatenate([X,test_X],axis=0)
coalesed_embedded = TSNE(n_components=2).fit_transform(coalesed)
from itertools import repeat
fig,ax = plt.subplots()
ax.scatter(coalesed_embedded[:,0],coalesed_embedded[:,1],color=list(repeat('b',X.shape[0]))+list(repeat('r',test_X.shape[0])))
# compare total noise
total_noise = np.random.randn(1024,10,21)
hard = np.argmax(total_noise,axis=2)
pseudo = inverse_transform(hard)
df = pd.DataFrame({'peptide': pseudo, 'HLA': ['HLA-A*0201' for i in range(len(pseudo))],
'immunogenicity': [1 for i in range(len(pseudo))]})
df.to_csv('/Users/ligk2e/Desktop/df_noise.csv',index=None)
test_dataset = construct_aaindex(df, hla_dic, after_pca)
test_X = np.empty((len(test_dataset), 12 * 10))
for i, (x, y, _) in enumerate(test_dataset):
x = x.reshape(-1)
test_X[i, :] = x
coalesed = np.concatenate([X, test_X], axis=0)
coalesed_embedded = TSNE(n_components=2).fit_transform(coalesed)
from itertools import repeat
fig, ax = plt.subplots()
ax.scatter(coalesed_embedded[:, 0], coalesed_embedded[:, 1],
color=list(repeat('b', X.shape[0])) + list(repeat('r', test_X.shape[0])))
# subject generated epitope for immunogenicity prediction using CNN
total = []
for i in range(16):
generation = sample_generator(64).detach().cpu().numpy() # [N,seq_len,n_chars]
hard = np.argmax(generation,axis=2) # [N,seq_len]
pseudo = inverse_transform(hard)
df = pd.DataFrame({'peptide':pseudo,'HLA':['HLA-A*0201' for i in range(len(pseudo))],'immunogenicity':[1 for i in range(len(pseudo))]})
total.append(df)
df_all = pd.concat(total)
df_all.to_csv('/Users/ligk2e/Desktop/df_all.csv',index=None)
# here we perform a time-series inspection, take random noise(already done), epoch 20, 40, 60, 80, 100,
# see their t-sne distribution
# see their CNN prediction
df =
|
pd.read_csv('/Users/ligk2e/Desktop/immuno3/df/df_all_epoch100.csv')
|
pandas.read_csv
|
"""
This script transforms the Semeval Task 5: Hyperpartisan News Detection data
provided in XML format, to CSV format for easier use.
"""
import pandas as pd
import xml.etree.cElementTree as et
import numpy as np
gfiles = ["./ground-truth-training-byarticle-20181122.xml",
"./ground-truth-training-bypublisher-20181122.xml",
"./ground-truth-validation-bypublisher-20181122.xml"]
gdfCols = ["hyperpartisan", "id", "labeled-by", "url", "bias"]
files = ["./articles-training-byarticle-20181122.xml",
"./articles-training-bypublisher-20181122.xml",
"./articles-validation-bypublisher-20181122.xml"]
dfCols = ["id", "published-at", "title", "text"]
# Handles articles
for _file in files:
df = pd.DataFrame(columns=dfCols)
index = 1
i = []
p = []
ti = []
te = []
for node in et.parse(_file).getroot():
i.append(node.attrib.get("id"))
p.append(node.attrib.get("published-at"))
ti.append(node.attrib.get("title"))
if node.text is not None:
node.text = None
article = ""
for paragraph in node.itertext():
article += paragraph
te.append(article)
index += 1
if index % 100 == 0:
print(index)
df["id"], df["published-at"], df["title"], df["text"] = pd.Series(i), pd.Series(p), pd.Series(ti), pd.Series(te)
print(df.shape)
df.to_csv(_file[:-4] + ".csv")
# Handles ground truth
for gfile in gfiles:
df = pd.DataFrame(columns=gdfCols)
index = 1
h = []
i = []
l = []
u = []
b = []
for node in et.parse(gfile).getroot():
h.append(node.attrib.get("hyperpartisan"))
i.append(node.attrib.get("id"))
l.append(node.attrib.get("labeled-by"))
u.append(node.attrib.get("url"))
b.append(node.attrib.get("bias") if node.attrib.get("bias") is not None else "")
index += 1
if index % 100 == 0:
print(index)
df["hyperpartisan"], df["id"], df["labeled-by"], df["url"], df["bias"] = pd.Series(h), pd.Series(i), pd.Series(l), pd.Series(u), pd.Series(b)
print(df.shape)
df.to_csv(gfile[:-4] + ".csv")
# Merges
for i in range(len(files)):
print(i)
df_file, df_gfile = pd.read_csv(files[i][:-4] + ".csv", sep=','), pd.read_csv(gfiles[i][:-4] + ".csv", sep=',')
df =
|
pd.merge(df_file, df_gfile, on="id")
|
pandas.merge
|
import pandas as pd
import numpy as np
import requests
import time
import argparse
from tqdm import tqdm
from pyarrow import feather
def get_edit_history(
userid=None, user=None, latest_timestamp=None, earliest_timestamp=None, limit=None
):
"""For a particular user, pull their whole history of edits.
Args:
param1 (int): The first parameter.
param2 (str): The second parameter.
Returns:
bool: The return value. True for success, False otherwise.
"""
S = requests.Session()
S.headers.update(
{"User-Agent": "WikiRecs (<EMAIL>) One-time pull"}
)
URL = "https://en.wikipedia.org/w/api.php"
PARAMS = {
"action": "query",
"format": "json",
"ucnamespace": "0",
"list": "usercontribs",
"ucuserids": userid,
"ucprop": "title|ids|sizediff|flags|comment|timestamp",
"ucshow=": "!minor|!new",
}
if latest_timestamp is not None:
PARAMS["ucstart"] = latest_timestamp
if earliest_timestamp is not None:
PARAMS["ucend"] = earliest_timestamp
if user is not None:
PARAMS["ucuser"] = user
if userid is not None:
PARAMS["ucuserid"] = userid
PARAMS["uclimit"] = 500
R = S.get(url=URL, params=PARAMS)
DATA = R.json()
if "query" not in DATA:
print(DATA)
raise ValueError
USERCONTRIBS = DATA["query"]["usercontribs"]
all_ucs = USERCONTRIBS
i = 500
while i < 100000:
if "continue" not in DATA:
break
last_continue = DATA["continue"]
PARAMS.update(last_continue)
R = S.get(url=URL, params=PARAMS)
DATA = R.json()
USERCONTRIBS = DATA["query"]["usercontribs"]
all_ucs.extend(USERCONTRIBS)
i = i + 500
return all_ucs
def pull_edit_histories(
sampled_users_file,
edit_histories_file_pattern,
users_per_chunk,
earliest_timestamp,
start=0,
):
histories = []
cols = ["userid", "user", "pageid", "title", "timestamp", "sizediff"]
sampled_users = pd.read_csv(sampled_users_file)
sampled_users.loc[:, "userid"].astype(int)
sampled_users = sampled_users.reset_index()
# Iterate through all the users in the list
for i, (user, userid) in tqdm(
iterable=enumerate(
zip(sampled_users["user"][start:], sampled_users["userid"][start:]),
start=start,
),
total=len(sampled_users),
initial=start,
):
# Get the history of edits for this userid
thehistory = get_edit_history(
userid=int(userid), earliest_timestamp=earliest_timestamp
)
# If no edits, skip
if len(thehistory) == 0:
continue
thehistory = pd.DataFrame(thehistory)
# Remove edits using automated tools by looking for the word "using" in the comments
try:
thehistory = thehistory[
np.invert(thehistory.comment.astype(str).str.contains("using"))
]
except AttributeError:
continue
if len(thehistory) == 0:
continue
histories.append(thehistory.loc[:, cols])
if np.mod(i, 50) == 0:
print(
"Most recent: {}/{} {} ({}) has {} edits".format(
i, len(sampled_users), user, int(userid), len(thehistory)
)
)
# Every x users save it out, for the sake of ram limitations
if np.mod(i, users_per_chunk) == 0:
feather.write_feather(
pd.concat(histories), edit_histories_file_pattern.format(i)
)
histories = []
# Get the last few users that don't make up a full chunk
feather.write_feather(
|
pd.concat(histories)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Authors: <NAME>
UNESCO-IHE 2016
Contact: <EMAIL>
Repository: https://github.com/wateraccounting/wa
Module: Sheets/sheet1
"""
import os
import pandas as pd
import time
import xml.etree.ElementTree as ET
import subprocess
def create_sheet3(basin, period, units, data, output, template=False):
"""
Keyword arguments:
basin -- The name of the basin
period -- The period of analysis
units -- A list with the units of the data:
[<water consumption>, <land productivity>, <water productivity>]
data -- A csv file that contains the water data. The csv file has to
follow an specific format. A sample csv is available in the link:
https://github.com/wateraccounting/wa/tree/master/Sheets/csv
output -- A list (length 2) with the output paths of the jpg files
for the two parts of the sheet
template -- A list (length 2) of the svg files of the sheet.
Use False (default) to use the standard svg files.
Example:
from wa.Sheets import *
create_sheet3(basin='Helmand', period='2007-2011',
units=['km3/yr', 'kg/ha/yr', 'kg/m3'],
data=[r'C:\Sheets\csv\Sample_sheet3_part1.csv',
r'C:\Sheets\csv\Sample_sheet3_part2.csv'],
output=[r'C:\Sheets\sheet_3_part1.jpg',
r'C:\Sheets\sheet_3_part2.jpg'])
"""
# Read table
df1 = pd.read_csv(data[0], sep=';')
df2 = pd.read_csv(data[1], sep=';')
# Data frames
df1c = df1.loc[df1.USE == "CROP"]
df1n = df1.loc[df1.USE == "NON-CROP"]
df2c = df2.loc[df2.USE == "CROP"]
df2n = df2.loc[df2.USE == "NON-CROP"]
# Read csv file part 1
crop_r01c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c01 = crop_r02c01 + crop_r03c01
crop_r01c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c02 = crop_r02c02 + crop_r03c02
crop_r01c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c03 = crop_r02c03 + crop_r03c03
crop_r01c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c04 = crop_r02c04 + crop_r03c04
crop_r01c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c05 = crop_r02c05 + crop_r03c05
crop_r01c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c06 = crop_r02c06 + crop_r03c06
crop_r01c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c07 = crop_r02c07 + crop_r03c07
crop_r01c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c08 = crop_r02c08 + crop_r03c08
crop_r01c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c09 = crop_r02c09 + crop_r03c09
crop_r01c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c10 = crop_r02c10 + crop_r03c10
crop_r01c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c11 = crop_r02c11 + crop_r03c11
crop_r01c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c12 = crop_r02c12 + crop_r03c12
noncrop_r01c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c01 = noncrop_r02c01 + noncrop_r03c01
noncrop_r01c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c02 = noncrop_r02c02 + noncrop_r03c02
crop_r01 = pd.np.nansum([crop_r01c01, crop_r01c02, crop_r01c03,
crop_r01c04, crop_r01c05, crop_r01c06,
crop_r01c07, crop_r01c08, crop_r01c09,
crop_r01c10, crop_r01c11, crop_r01c12])
crop_r02 = pd.np.nansum([crop_r02c01, crop_r02c02, crop_r02c03,
crop_r02c04, crop_r02c05, crop_r02c06,
crop_r02c07, crop_r02c08, crop_r02c09,
crop_r02c10, crop_r02c11, crop_r02c12])
crop_r03 = pd.np.nansum([crop_r03c01, crop_r03c02, crop_r03c03,
crop_r03c04, crop_r03c05, crop_r03c06,
crop_r03c07, crop_r03c08, crop_r03c09,
crop_r03c10, crop_r03c11, crop_r03c12])
crop_r04 = crop_r02 + crop_r03
noncrop_r01 = pd.np.nansum([noncrop_r01c01, noncrop_r01c02])
noncrop_r02 = pd.np.nansum([noncrop_r02c01, noncrop_r02c02])
noncrop_r03 = pd.np.nansum([noncrop_r03c01, noncrop_r03c02])
noncrop_r04 = noncrop_r02 + noncrop_r03
ag_water_cons = crop_r01 + crop_r04 + noncrop_r01 + noncrop_r04
# Read csv file part 2
# Land productivity
lp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
# Water productivity
wp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
# Calculations & modify svgs
if not template:
path = os.path.dirname(os.path.abspath(__file__))
svg_template_path_1 = os.path.join(path, 'svg', 'sheet_3_part1.svg')
svg_template_path_2 = os.path.join(path, 'svg', 'sheet_3_part2.svg')
else:
svg_template_path_1 = os.path.abspath(template[0])
svg_template_path_2 = os.path.abspath(template[1])
tree1 = ET.parse(svg_template_path_1)
tree2 = ET.parse(svg_template_path_2)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Titles
xml_txt_box = tree1.findall('''.//*[@id='basin']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree1.findall('''.//*[@id='period']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree1.findall('''.//*[@id='units']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 1: Agricultural water consumption (' + units[0] + ')'
xml_txt_box = tree2.findall('''.//*[@id='basin2']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree2.findall('''.//*[@id='period2']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree2.findall('''.//*[@id='units2']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 2: Land productivity (' + units[1] + ') and water productivity (' + units[2] + ')'
# Part 1
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c01']''')[0]
if not pd.isnull(crop_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c02']''')[0]
if not pd.isnull(crop_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c03']''')[0]
if not pd.isnull(crop_r01c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c04']''')[0]
if not pd.isnull(crop_r01c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c05']''')[0]
if not pd.isnull(crop_r01c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c06']''')[0]
if not pd.isnull(crop_r01c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c07']''')[0]
if not pd.isnull(crop_r01c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c08']''')[0]
if not pd.isnull(crop_r01c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c09']''')[0]
if not pd.isnull(crop_r01c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c10']''')[0]
if not pd.isnull(crop_r01c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c11']''')[0]
if not pd.isnull(crop_r01c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c12']''')[0]
if not pd.isnull(crop_r01c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01']''')[0]
if not pd.isnull(crop_r01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c01']''')[0]
if not pd.isnull(crop_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c02']''')[0]
if not pd.isnull(crop_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c03']''')[0]
if not pd.isnull(crop_r02c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c04']''')[0]
if not pd.isnull(crop_r02c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c05']''')[0]
if not pd.isnull(crop_r02c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c06']''')[0]
if not pd.isnull(crop_r02c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c07']''')[0]
if not pd.isnull(crop_r02c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c08']''')[0]
if not pd.isnull(crop_r02c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c09']''')[0]
if not pd.isnull(crop_r02c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c10']''')[0]
if not pd.isnull(crop_r02c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c11']''')[0]
if not pd.isnull(crop_r02c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c12']''')[0]
if not pd.isnull(crop_r02c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02']''')[0]
if not pd.isnull(crop_r02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c01']''')[0]
if not pd.isnull(crop_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c02']''')[0]
if not pd.isnull(crop_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c03']''')[0]
if not pd.isnull(crop_r03c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c04']''')[0]
if not pd.isnull(crop_r03c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c05']''')[0]
if not pd.isnull(crop_r03c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c06']''')[0]
if not pd.isnull(crop_r03c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c07']''')[0]
if not pd.isnull(crop_r03c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c08']''')[0]
if not pd.isnull(crop_r03c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c09']''')[0]
if not pd.isnull(crop_r03c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c10']''')[0]
if not pd.isnull(crop_r03c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c11']''')[0]
if not pd.isnull(crop_r03c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c12']''')[0]
if not pd.isnull(crop_r03c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03']''')[0]
if not pd.isnull(crop_r03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c01']''')[0]
if not pd.isnull(crop_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c02']''')[0]
if not pd.isnull(crop_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c03']''')[0]
if not pd.isnull(crop_r04c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c04']''')[0]
if not pd.isnull(crop_r04c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c05']''')[0]
if not pd.isnull(crop_r04c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c06']''')[0]
if not pd.isnull(crop_r04c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c07']''')[0]
if not pd.isnull(crop_r04c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c08']''')[0]
if not pd.isnull(crop_r04c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c09']''')[0]
if not pd.isnull(crop_r04c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c10']''')[0]
if not pd.isnull(crop_r04c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c11']''')[0]
if not pd.isnull(crop_r04c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c12']''')[0]
if not pd.isnull(crop_r04c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04']''')[0]
if not pd.isnull(crop_r04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01c01']''')[0]
if not pd.isnull(noncrop_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01c02']''')[0]
if not pd.isnull(noncrop_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01']''')[0]
if not pd.isnull(noncrop_r01) and noncrop_r01 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02c01']''')[0]
if not pd.isnull(noncrop_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02c02']''')[0]
if not pd.isnull(noncrop_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02']''')[0]
if not pd.isnull(noncrop_r02) and noncrop_r02 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03c01']''')[0]
if not pd.isnull(noncrop_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03c02']''')[0]
if not pd.isnull(noncrop_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03']''')[0]
if not pd.isnull(noncrop_r03) and noncrop_r03 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04c01']''')[0]
if not pd.isnull(noncrop_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04c02']''')[0]
if not pd.isnull(noncrop_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04']''')[0]
if not pd.isnull(noncrop_r04) and noncrop_r04 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04
else:
xml_txt_box.getchildren()[0].text = '-'
# Part 2
xml_txt_box = tree1.findall('''.//*[@id='ag_water_cons']''')[0]
if not pd.isnull(ag_water_cons):
xml_txt_box.getchildren()[0].text = '%.2f' % ag_water_cons
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c01']''')[0]
if not pd.isnull(lp_r01c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c02']''')[0]
if not pd.isnull(lp_r01c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c03']''')[0]
if not pd.isnull(lp_r01c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c04']''')[0]
if not pd.isnull(lp_r01c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c05']''')[0]
if not pd.isnull(lp_r01c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c06']''')[0]
if not pd.isnull(lp_r01c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c07']''')[0]
if not pd.isnull(lp_r01c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c08']''')[0]
if not pd.isnull(lp_r01c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c09']''')[0]
if not pd.isnull(lp_r01c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c10']''')[0]
if not pd.isnull(lp_r01c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c11']''')[0]
if not pd.isnull(lp_r01c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c12']''')[0]
if not pd.isnull(lp_r01c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c01']''')[0]
if not pd.isnull(lp_r02c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c02']''')[0]
if not pd.isnull(lp_r02c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c03']''')[0]
if not pd.isnull(lp_r02c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c04']''')[0]
if not pd.isnull(lp_r02c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c05']''')[0]
if not pd.isnull(lp_r02c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c06']''')[0]
if not pd.isnull(lp_r02c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c07']''')[0]
if not pd.isnull(lp_r02c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c08']''')[0]
if not pd.isnull(lp_r02c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c09']''')[0]
if not pd.isnull(lp_r02c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c10']''')[0]
if not pd.isnull(lp_r02c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c11']''')[0]
if not pd.isnull(lp_r02c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c12']''')[0]
if not pd.isnull(lp_r02c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c01']''')[0]
if not pd.isnull(lp_r03c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c02']''')[0]
if not pd.isnull(lp_r03c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c03']''')[0]
if not pd.isnull(lp_r03c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c04']''')[0]
if not pd.isnull(lp_r03c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c05']''')[0]
if not pd.isnull(lp_r03c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c06']''')[0]
if not pd.isnull(lp_r03c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c07']''')[0]
if not pd.isnull(lp_r03c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c08']''')[0]
if not pd.isnull(lp_r03c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c09']''')[0]
if not pd.isnull(lp_r03c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c10']''')[0]
if not pd.isnull(lp_r03c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c11']''')[0]
if not pd.isnull(lp_r03c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c12']''')[0]
if not pd.isnull(lp_r03c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c01']''')[0]
if not pd.isnull(lp_r04c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c02']''')[0]
if not pd.isnull(lp_r04c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c03']''')[0]
if not pd.isnull(lp_r04c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c04']''')[0]
if not pd.isnull(lp_r04c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c05']''')[0]
if not pd.isnull(lp_r04c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c06']''')[0]
if not pd.isnull(lp_r04c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c07']''')[0]
if not pd.isnull(lp_r04c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c08']''')[0]
if not pd.isnull(lp_r04c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c09']''')[0]
if not pd.isnull(lp_r04c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c10']''')[0]
if not pd.isnull(lp_r04c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c11']''')[0]
if not pd.isnull(lp_r04c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c12']''')[0]
if not pd.isnull(lp_r04c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c01']''')[0]
if not pd.isnull(wp_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c02']''')[0]
if not pd.isnull(wp_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c03']''')[0]
if not pd.isnull(wp_r01c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c04']''')[0]
if not pd.isnull(wp_r01c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c05']''')[0]
if not pd.isnull(wp_r01c05):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c06']''')[0]
if not pd.isnull(wp_r01c06):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c07']''')[0]
if not pd.isnull(wp_r01c07):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c08']''')[0]
if not pd.isnull(wp_r01c08):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c09']''')[0]
if not pd.isnull(wp_r01c09):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c10']''')[0]
if not pd.isnull(wp_r01c10):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c11']''')[0]
if not
|
pd.isnull(wp_r01c11)
|
pandas.isnull
|
from typing import Dict, List, Union
import pandas as pd
import requests
import typer
from huggingface_hub import HfApi, HfFolder
def delete_repos(repository_ids: List[str], auth_token: str, repo_type: str = "dataset") -> None:
typer.echo(f"Found {len(repository_ids)} repos to delete")
for repo_id in repository_ids:
org, name = repo_id.split("/")
HfApi().delete_repo(token=auth_token, organization=org, name=name, repo_type=repo_type)
typer.echo(f"Deleted repo: {repo_id}")
def is_time_between(begin_time: str, end_time: str, check_time: str = None) -> bool:
# Adapted from: https://stackoverflow.com/questions/10048249/how-do-i-determine-if-current-time-is-within-a-specified-range-using-pythons-da
# If check time is not given, default to current UTC time
begin_time = pd.to_datetime(begin_time).tz_localize("UTC")
end_time =
|
pd.to_datetime(end_time)
|
pandas.to_datetime
|
#!/usr/bin/env python
from netCDF4 import Dataset, num2date, date2num
import xarray as xr
import os
import glob
import pandas as pd
import shutil
from subprocess import call
def setup_netcdf(p0, pointspec,outpath='.', height=None,**netCDF_kwargs):
d0 = Dataset(p0, 'r')
dataVar = 'spec001_mr'
lats = d0.variables['latitude'][:]
lons = d0.variables['longitude'][:]
heights = d0.variables['height'][:]
rel_lat = d0.variables['RELLAT1'][:][pointspec]
rel_lon = d0.variables['RELLNG1'][:][pointspec]
ts = d0.variables['time'][:]
time_units = d0['time'].units
relcom = d0.variables['RELCOM'][pointspec][:]
dims = d0.dimensions
rel_com_str = ''
for char in relcom:
rel_com_str += char.decode('UTF-8')
rel_com_str = rel_com_str.strip()
sdate = num2date(0,time_units).strftime('%Y%m%d%H%M')
f_unit = d0[dataVar].units
f_longname = d0[dataVar].long_name
ind_receptor = d0.ind_receptor
version = d0.source
out_lat0 = d0.outlat0
out_lon0 = d0.outlon0
ind_source = d0.ind_source
ind_receptor = d0.ind_receptor
lout_step = d0.loutstep
lout_aver = d0.loutaver
lsub_grid = d0.lsubgrid
s_time = d0.ietime
s_date = d0.iedate
title = d0.title
if ind_receptor == 1:
f_name = 'Conc'
f_unit = 's'
elif ind_receptor == 3:
f_name = 'WetDep'
f_unit = 'm'
elif ind_receptor ==4:
f_name = 'DryDep'
f_unit = 'm'
else:
raise(ValueError('Model settings not recognized, ind_receptor {}'.format(ind_receptor)))
name_str = '_'.join(p0.split('/')[-1].split('_')[:2])
outFileName = outpath + '/' + '_'.join(rel_com_str.split(' ')) + name_str + sdate +'.nc'
try:
ncfile = Dataset(outFileName, 'w', format="NETCDF4")
except PermissionError:
# If netcdf file exist delete old one
os.remove(outFileName)
ncfile = Dataset(outFileName, 'w', format='NETCDF4')
lat_dim = ncfile.createDimension('lat', dims['latitude'].size)
lon_dim = ncfile.createDimension('lon', dims['longitude'].size)
height_dim = ncfile.createDimension('height',dims['height'].size)
point_dim = ncfile.createDimension('npoint',1)
#temporal dims
time_dim = ncfile.createDimension('time', None)
btime_dim = ncfile.createDimension('btime', dims['time'].size)
#Setup lon/lat (spatial variables)
lat = ncfile.createVariable('lat', 'f4', ('lat', ),**netCDF_kwargs)
lat.units = 'degrees_north'
lat.long_name = 'latitude'
lon = ncfile.createVariable('lon', 'f4', ('lon', ), **netCDF_kwargs)
lon.units = 'degrees_east'
lon.long_name = 'longitude'
height = ncfile.createVariable('height', 'i4',('height',),**netCDF_kwargs)
height.units = 'm'
height.long_name = 'height above ground'
#Set receptor location
rellat = ncfile.createVariable('RELLAT', 'f4', ('npoint',),**netCDF_kwargs)
rellat.units = 'degrees_north'
rellat.long_name = 'latitude_receptor'
rellon = ncfile.createVariable('RELLON', 'f4', ('npoint',), **netCDF_kwargs)
rellon.units = 'degrees_east'
rellon.long_name = 'longitude_receptor'
btime = ncfile.createVariable('btime', 'i4', ('btime',), **netCDF_kwargs)
btime.units = 'hours'
btime.long_name = 'time along backtrajectory'
btime[:] = ts/3600
lon[:] = lons
lat[:] = lats
height[:] = heights
rellat[:] = rel_lat
rellon[:] = rel_lon
time_var = ncfile.createVariable('time', 'f8', ('time',), **netCDF_kwargs)
time_var.units = ''
time_var.long_name = 'time along back trajectory'
field = ncfile.createVariable(f_name, 'f4', ('time', 'btime', 'height','lat', 'lon'), **netCDF_kwargs)
field.units = f_unit
field.spec_name = f_longname
field.long_name = 'SRR {}'.format(name_str)
# ncfile attributes
ncfile.title = title
ncfile.info = 'Senstivity to emission from FLEXPART backwards simulation'
ncfile.version = version
# ncfile.concatenated = ','.join(ncfiles)
ncfile.dataVar = f_name
ncfile.ind_receptor = ind_receptor
ncfile.ind_source = ind_source
ncfile.outlon0 = out_lon0
ncfile.outlat0 = out_lat0
ncfile.sdate = s_date
ncfile.stime = s_time
ncfile.lsubgrid = lsub_grid
ncfile.close()
return {'path':outFileName, 'point': pointspec}
def concat_output(ncfiles,outpath, n_processes = None, netCDF_kwargs={}):
ncfiles.sort()
receptors = xr.open_dataset(ncfiles[0]).numpoint.values
p0 = ncfiles[0]
outpaths = [setup_netcdf(p0, receptor) for receptor in receptors]
dt1 =
|
pd.to_datetime(ncfiles[0][-17:-3])
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 20:48:37 2018
@author: elcok
"""
import os
import sys
import numpy as np
import geopandas as gpd
import pandas as pd
sys.path.append(os.path.join( '..'))
from scripts.functions import region_exposure,region_losses,poly_files,load_sample
from scripts.utils import load_config,download_osm_file
import country_converter as coco
cc = coco.CountryConverter()
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
pd.set_option('chained_assignment',None)
from multiprocessing import Pool,cpu_count
def all_countries_risk():
"""Function to estimate the risk for all countries consecutively.
"""
# specify country
countries = ['LU','CZ','CH','EE','LV','LT','PT','ES','AT','BE','DK','IE','NL','NO','SE','UK','PL','IT','FI','FR','DE']
for country in countries:
losses(country, parallel = False, event_set = True)
def all_countries_losses():
"""Function to estimate the losses for all countries consecutively.
"""
# specify country
countries = ['LU','CZ','CH','EE','LV','LT','PT','ES','AT','BE','DK','IE','NL','NO','SE','UK','PL','IT','FI','FR','DE']
for country in countries:
losses(country, parallel = True)
def all_countries_exposure():
"""Function to estimate the exposure for all countries consecutively.
"""
# specify country
countries = ['LU','CZ','CH','EE','LV','LT','PT','ES','AT','BE','DK','IE','NL','NO','SE','UK','PL','IT','FI','FR','DE']
for country in countries:
exposure(country, include_storms = True, parallel = False)
def exposure(country, include_storms = True, parallel = True,save=True):
"""
Creation of exposure table of the specified country.
Arguments:
*country* (string) -- ISO2 code of country to consider.
*include_storms* (bool) -- if set to False, it will only return a list of buildings and their characteristics (default: **True**).
*parallel* (bool) -- calculates all regions within a country parallel. Set to False if you have little capacity on the machine (default: **True**).
*save* (bool) -- boolean to decide whether you want to save the output to a csv file (default: **True**).
Returns:
*GeoDataframe* -- Geopandas dataframe with all buildings of the country and potential exposure to wind
"""
#make sure the country inserted is an ISO2 country name for he remainder of the analysis
#country = coco.convert(names=country, to='ISO2')
# get data path
data_path = load_config()['paths']['data']
# create country poly files
poly_files(data_path,country)
#download OSM file if it is not there yet:
download_osm_file(country)
#get list of regions for which we have poly files (should be all)
regions = os.listdir(os.path.join(data_path,country,'NUTS3_POLY'))
regions = [x.split('.')[0] for x in regions]
if include_storms == True:
storms = len(regions)*[True]
country_list = len(regions)*[country]
if parallel == True:
with Pool(cpu_count()-2) as pool:
country_table = pool.starmap(region_exposure,zip(regions,country_list,storms),chunksize=1)
else:
country_table = []
for region in regions:
country_table.append(region_exposure(region,country,True))
else:
storms = len(regions)*[False]
country_list = len(regions)*[country]
if parallel == True:
with Pool(cpu_count()-2) as pool:
country_table = pool.starmap(region_exposure,zip(regions,country_list,storms),chunksize=1)
else:
country_table = []
for region in regions:
country_table.append(region_exposure(region,country,True))
if save == True:
gdf_table = gpd.GeoDataFrame(
|
pd.concat(country_table)
|
pandas.concat
|
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
import pytest
import numpy as np
import numpy.ma as ma
import pandas as pd
import scipy as sp
import math
from itertools import repeat, chain
from ..bin import *
from ..bin import _process_column_initial, _encode_categorical_existing, _process_continuous
class StringHolder:
def __init__(self, internal_str):
self.internal_str = internal_str
def __str__(self):
return self.internal_str
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedStringHolder(StringHolder):
def __init__(self, internal_str):
StringHolder.__init__(self, internal_str)
class FloatHolder:
def __init__(self, internal_float):
self.internal_float = internal_float
def __float__(self):
return self.internal_float
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedFloatHolder(FloatHolder):
def __init__(self, internal_float):
FloatHolder.__init__(self, internal_float)
class FloatAndStringHolder:
def __init__(self, internal_float, internal_str):
self.internal_float = internal_float
self.internal_str = internal_str
def __float__(self):
return self.internal_float
def __str__(self):
return self.internal_str
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedFloatAndStringHolder(FloatAndStringHolder):
def __init__(self, internal_float, internal_str):
FloatAndStringHolder.__init__(self, internal_float, internal_str)
class NothingHolder:
# the result of calling str(..) includes the memory address, so they won't be dependable categories
def __init__(self, internal_str):
self.internal_str = internal_str
def check_pandas_normal(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val1, val2], dtype=np.object_), dtype=dtype)
feature_types_given = ['nominal']
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, [(0, None)], feature_names_in, None))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
c1 = {str(val1) : 1, str(val2) : 2}
X_cols = list(unify_columns(X, [(0, c1)], feature_names_in, feature_types_given))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
c2 = {str(val2) : 1, str(val1) : 2}
X_cols = list(unify_columns(X, [(0, c2)], feature_names_in, feature_types_given))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
def check_pandas_missings(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val2, val1, val1], dtype=np.object_), dtype=dtype)
X["feature2"] = pd.Series(np.array([None, val2, val1], dtype=np.object_), dtype=dtype)
X["feature3"] = pd.Series(np.array([val1, None, val2], dtype=np.object_), dtype=dtype)
X["feature4"] = pd.Series(np.array([val2, val1, None], dtype=np.object_), dtype=dtype)
c1 = {str(val1) : 1, str(val2) : 2}
c2 = {str(val2) : 1, str(val1) : 2}
feature_types_given = ['nominal', 'nominal', 'nominal', 'nominal']
X, n_samples = clean_X(X)
assert(n_samples == 3)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None), (3, None)], feature_names_in, None))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(len(X_cols[1][2]) == 2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(len(X_cols[2][2]) == 2)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(len(X_cols[3][2]) == 2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c1), (1, c1), (2, c1), (3, c1)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c1)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c1)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c1)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c2), (1, c2), (2, c2), (3, c2)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c2)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c1), (1, c2), (2, c1), (3, c2)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c1)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
def check_pandas_float(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val2, val1, val1], dtype=np.object_), dtype=dtype)
X["feature2"] = pd.Series(np.array([None, val2, val1], dtype=np.object_), dtype=dtype)
X["feature3"] = pd.Series(np.array([val1, None, val2], dtype=np.object_), dtype=dtype)
X["feature4"] = pd.Series(np.array([val2, val1, None], dtype=np.object_), dtype=dtype)
X, n_samples = clean_X(X)
assert(n_samples == 3)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in, min_unique_continuous=0))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'continuous')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is None)
assert(X_cols[0][1].dtype == np.float64)
assert(X_cols[0][1][0] == np.float64(dtype(val2)))
assert(X_cols[0][1][1] == np.float64(dtype(val1)))
assert(X_cols[0][1][2] == np.float64(dtype(val1)))
assert(X_cols[1][0] == 'continuous')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is None)
assert(X_cols[1][1].dtype == np.float64)
assert(np.isnan(X_cols[1][1][0]))
assert(X_cols[1][1][1] == np.float64(dtype(val2)))
assert(X_cols[1][1][2] == np.float64(dtype(val1)))
assert(X_cols[2][0] == 'continuous')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is None)
assert(X_cols[2][1].dtype == np.float64)
assert(X_cols[2][1][0] == np.float64(dtype(val1)))
assert(np.isnan(X_cols[2][1][1]))
assert(X_cols[2][1][2] == np.float64(dtype(val2)))
assert(X_cols[3][0] == 'continuous')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is None)
assert(X_cols[3][1].dtype == np.float64)
assert(X_cols[3][1][0] == np.float64(dtype(val2)))
assert(X_cols[3][1][1] == np.float64(dtype(val1)))
assert(np.isnan(X_cols[3][1][2]))
def check_numpy_throws(dtype_src, val1, val2):
X = np.array([[val1, val2], [val1, val2]], dtype=dtype_src)
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
try:
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_process_continuous_float64():
vals, bad = _process_continuous(np.array([3.5, 4.5], dtype=np.float64), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([3.5, 4.5], dtype=np.float64)))
def test_process_continuous_float32():
vals, bad = _process_continuous(np.array([3.1, np.nan], dtype=np.float32), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 2)
assert(vals[0] == 3.0999999046325684)
assert(np.isnan(vals[1]))
def test_process_continuous_int8():
vals, bad = _process_continuous(np.array([7, -9], dtype=np.int8), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([7, -9], dtype=np.float64)))
def test_process_continuous_uint16_missing():
vals, bad = _process_continuous(np.array([7], dtype=np.uint16), np.array([True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 2)
assert(vals[0] == 7)
assert(np.isnan(vals[1]))
def test_process_continuous_bool():
vals, bad = _process_continuous(np.array([False, True], dtype=np.bool_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([0, 1], dtype=np.float64)))
def test_process_continuous_bool_missing():
vals, bad = _process_continuous(np.array([False, True], dtype=np.bool_), np.array([True, False, True], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 3)
assert(vals[0] == 0)
assert(np.isnan(vals[1]))
assert(vals[2] == 1)
def test_process_continuous_obj_simple():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5")], dtype=np.object_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5, 3, 4.5, 5.5], dtype=np.float64)))
def test_process_continuous_obj_simple_missing():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5")], dtype=np.object_), np.array([True, True, True, True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 6)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(np.isnan(vals[5]))
def test_process_continuous_obj_hard():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5"), DerivedStringHolder("7.5"), FloatHolder(8.5), DerivedFloatHolder(9.5), FloatAndStringHolder(10.5, "88"), DerivedFloatAndStringHolder(11.5, "99")], dtype=np.object_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5, 3, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5], dtype=np.float64)))
def test_process_continuous_obj_hard_missing():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5")], dtype=np.object_), np.array([True, True, True, True, True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 7)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(vals[5] == 6.5)
assert(np.isnan(vals[6]))
def test_process_continuous_obj_hard_bad():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5"), "bad", StringHolder("bad2"), NothingHolder("bad3")], dtype=np.object_), np.array([True, True, True, True, True, True, True, False, True, True], dtype=np.bool_))
assert(len(bad) == 10)
assert(bad[0] is None)
assert(bad[1] is None)
assert(bad[2] is None)
assert(bad[3] is None)
assert(bad[4] is None)
assert(bad[5] is None)
assert(bad[6] == "bad")
assert(bad[7] is None)
assert(bad[8] == "bad2")
assert(isinstance(bad[9], str))
assert(vals.dtype == np.float64)
assert(len(vals) == 10)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(vals[5] == 6.5)
assert(np.isnan(vals[7]))
def test_process_continuous_str_simple():
vals, bad = _process_continuous(np.array(["1", "2.5"], dtype=np.unicode_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5], dtype=np.float64)))
def test_process_continuous_str_simple_missing():
vals, bad = _process_continuous(np.array(["1", "2.5"], dtype=np.unicode_), np.array([True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 3)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(np.isnan(vals[2]))
def test_process_continuous_str_hard_bad():
vals, bad = _process_continuous(np.array(["1", "2.5", "bad"], dtype=np.unicode_), np.array([True, True, True, False], dtype=np.bool_))
assert(len(bad) == 4)
assert(bad[0] is None)
assert(bad[1] is None)
assert(bad[2] == "bad")
assert(bad[3] is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 4)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(np.isnan(vals[3]))
def test_process_column_initial_int_float():
# this test is hard since np.unique seems to think int(4) == float(4.0) so naively it returns just "4"
encoded, c = _process_column_initial(np.array([4, 4.0], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["4"] == 1)
assert(c["4.0"] == 2)
assert(np.array_equal(encoded, np.array([c["4"], c["4.0"]], dtype=np.int64)))
def test_process_column_initial_float32_float64():
# np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 to float64 version has the lower mantisa bits
# all set to zero, and there will be another float64 that will be closer to "0.1" in float64 representation, so
# they aren't the same, but if to convert them to strings first then they are identical. Strings are the
# ultimate arbiter of categorical membership since strings are cross-platform and JSON encodable. np.unique
# will tend to separate the float32 and the float64 values since they aren't the same, but then serialize
# them to the same string. The our model has ["0.1", "0.1"] as the categories if we don't convert to float64!
encoded, c = _process_column_initial(np.array([np.float32(0.1), np.float64(0.1)], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["0.1"] == 1)
assert(c["0.10000000149011612"] == 2)
assert(np.array_equal(encoded, np.array([c["0.10000000149011612"], c["0.1"]], dtype=np.int64)))
def test_process_column_initial_obj_obj():
encoded, c = _process_column_initial(np.array([StringHolder("abc"), StringHolder("def")], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["def"] == 2)
assert(np.array_equal(encoded, np.array([c["abc"], c["def"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["xyz"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], c["xyz"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["xyz"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], 0, c["xyz"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["xyz"] == 1)
assert(c["abc"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], c["xyz"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["xyz"] == 1)
assert(c["abc"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], 0, c["xyz"]], dtype=np.int64)))
def test_process_column_initial_float64_nomissing():
encoded, c = _process_column_initial(np.array(["11.1", "2.2", "11.1"], dtype=np.unicode_), None, 'ANYTHING_ELSE', None)
assert(len(c) == 2)
assert(c["2.2"] == 1)
assert(c["11.1"] == 2)
assert(np.array_equal(encoded, np.array([c["11.1"], c["2.2"], c["11.1"]], dtype=np.int64)))
def test_process_column_initial_float64_missing():
encoded, c = _process_column_initial(np.array(["11.1", "2.2", "11.1"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'ANYTHING_ELSE', None)
assert(len(c) == 2)
assert(c["2.2"] == 1)
assert(c["11.1"] == 2)
assert(np.array_equal(encoded, np.array([c["11.1"], c["2.2"], 0, c["11.1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["-1"] == 1)
assert(c["1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], c["1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["-1"] == 1)
assert(c["1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], 0, c["1"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["1"] == 1)
assert(c["-1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], c["1"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["1"] == 1)
assert(c["-1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], 0, c["1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), None, 'nominal_alphabetical', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["False"] == 1)
assert(c["True"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), np.array([True, True, False, True, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["False"] == 1)
assert(c["True"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), None, 'nominal_prevalence', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["True"] == 1)
assert(c["False"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), np.array([True, True, False, True, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["True"] == 1)
assert(c["False"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["False"], c["True"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str():
c = {"cd": 1, "ab": 2}
encoded, bad = _encode_categorical_existing(np.array(["ab", "cd"], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["ab"], c["cd"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_bool():
c = {"True": 1, "False": 2}
encoded, bad = _encode_categorical_existing(np.array([True, False], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["True"], c["False"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_int_small():
c = {"-2": 1, "3": 2, "1": 3}
encoded, bad = _encode_categorical_existing(np.array([int(1), np.int8(-2), np.uint64(3)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1"], c["-2"], c["3"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_int_big():
c = {"-2": 1, "18446744073709551615": 2, "1": 3}
encoded, bad = _encode_categorical_existing(np.array([int(1), np.int8(-2), np.uint64("18446744073709551615")], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1"], c["-2"], c["18446744073709551615"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_floats():
# np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 to float64 version has the lower mantisa bits
# all set to zero, and there will be another float64 that will be closer to "0.1" in float64 representation, so
# they aren't the same, but if to convert them to strings first then they are identical. Strings are the
# ultimate arbiter of categorical membership since strings are cross-platform and JSON encodable. np.unique
# will tend to separate the float32 and the float64 values since they aren't the same, but then serialize
# them to the same string. The our model has ["0.1", "0.1"] as the categories if we don't convert to float64!
c = {"1.1": 1, "2.19921875": 2, "3.299999952316284": 3, "4.4": 4, "5.5": 5}
encoded, bad = _encode_categorical_existing(np.array([float(1.1), np.float16(2.2), np.float32(3.3), np.float64(4.4), np.longfloat(5.5)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1.1"], c["2.19921875"], c["3.299999952316284"], c["4.4"], c["5.5"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_int():
c = {"abc": 1, "1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", int(1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float():
c = {"abc": 1, "1.1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", float(1.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1.1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float64():
c = {"abc": 1, "1.1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", np.float64(1.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1.1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float32():
c = {"abc": 1, "1.100000023841858": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", np.float32(1.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1.100000023841858"]], dtype=np.int64)))
def test_encode_categorical_existing_int_float():
# this test is hard since np.unique seems to think int(4) == float(4) so naively it returns just "4"
c = {"4": 1, "4.0": 2}
encoded, bad = _encode_categorical_existing(np.array([int(4), 4.0], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["4"], c["4.0"]], dtype=np.int64)))
def test_encode_categorical_existing_int_float32():
# if you take np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 version has the lower mantisa
# bits all set to zero, and there will be another float64 that will be closer to "0.1" for float64s, so
# they aren't the same, but if to convert them to strings first then they are identical. I tend to think
# of strings are the ultimate arbiter of categorical membership since strings are cross-platform
# np.unique will tend to separate the float32 and the float64 values since they aren't the same, but then
# serialize them to the same string. The our model has ["0.1", "0.1"] as the categories!!
c = {"4": 1, "0.10000000149011612": 2}
encoded, bad = _encode_categorical_existing(np.array([int(4), np.float32(0.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["4"], c["0.10000000149011612"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_obj():
c = {"abc": 1, "def": 2}
encoded, bad = _encode_categorical_existing(np.array([StringHolder("abc"), StringHolder("def")], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["def"]], dtype=np.int64)))
def test_encode_categorical_existing_str():
c = {"abc": 1, "def": 2, "ghi": 3}
encoded, bad = _encode_categorical_existing(np.array(["abc", "ghi", "def", "something"], dtype=np.unicode_), np.array([True, True, False, True, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array([None, None, None, None, "something"], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([c["abc"], c["ghi"], 0, c["def"], -1], dtype=np.int64)))
def test_encode_categorical_existing_int8():
c = {"5": 1, "0": 2, "-9": 3}
encoded, bad = _encode_categorical_existing(np.array([5, -9, 0, 0, -9, 5, 99], dtype=np.int8), np.array([True, True, True, False, True, True, True, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array([None, None, None, None, None, None, None, "99"], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([c["5"], c["-9"], c["0"], 0, c["0"], c["-9"], c["5"], -1], dtype=np.int64)))
def test_encode_categorical_existing_bool():
c = {"False": 1, "True": 2}
encoded, bad = _encode_categorical_existing(np.array([False, True, False], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["False"], c["True"], 0, c["False"]], dtype=np.int64)))
def test_encode_categorical_existing_bool_true():
c = {"True": 1}
encoded, bad = _encode_categorical_existing(np.array([False, True, False], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array(["False", None, None, "False"], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([-1, c["True"], 0, -1], dtype=np.int64)))
def test_encode_categorical_existing_bool_false():
c = {"False": 1}
encoded, bad = _encode_categorical_existing(np.array([False, True, False], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array([None, "True", None, None], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([c["False"], -1, 0, c["False"]], dtype=np.int64)))
def test_process_column_initial_choose_floatcategories():
encoded, c = _process_column_initial(np.array([11.11, 2.2, np.float32(2.2), "2.2", StringHolder("2.2")], dtype=np.object_), None, None, 4)
assert(c["2.2"] == 1)
assert(c["2.200000047683716"] == 2)
assert(c["11.11"] == 3)
assert(np.array_equal(encoded, np.array([c["11.11"], c["2.2"], c["2.200000047683716"], c["2.2"], c["2.2"]], dtype=np.int64)))
def test_process_column_initial_choose_floats():
encoded, c = _process_column_initial(np.array([11.11, 2.2, np.float32(2.2), "2.2", StringHolder("2.2"), 3.3, 3.3], dtype=np.object_), None, None, 3)
assert(c is None)
assert(np.array_equal(encoded, np.array([11.11, 2.2, 2.200000047683716, 2.2, 2.2, 3.3, 3.3], dtype=np.float64)))
def test_unify_columns_numpy1():
X = np.array([1, 2, 3])
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["1"]], dtype=np.int64)))
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"]], dtype=np.int64)))
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["3"]], dtype=np.int64)))
def test_unify_columns_numpy2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["1"], X_cols[0][2]["4"]], dtype=np.int64)))
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"], X_cols[1][2]["5"]], dtype=np.int64)))
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["3"], X_cols[2][2]["6"]], dtype=np.int64)))
def test_unify_columns_numpy_ignore():
X = np.array([["abc", None, "def"], ["ghi", "jkl", None]])
feature_types_given=['ignore', 'ignore', 'ignore']
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in, feature_types_given))
assert(3 == len(X_cols))
assert(X_cols[0][0] == 'ignore')
assert(X_cols[0][2] is None)
assert(X_cols[0][1] is None)
assert(np.array_equal(X_cols[0][3], np.array(["abc", "ghi"], dtype=np.object_)))
assert(X_cols[1][0] == 'ignore')
assert(X_cols[1][2] is None)
assert(X_cols[1][1] is None)
assert(np.array_equal(X_cols[1][3], np.array([None, "jkl"], dtype=np.object_)))
assert(X_cols[2][0] == 'ignore')
assert(X_cols[2][2] is None)
assert(X_cols[2][1] is None)
assert(np.array_equal(X_cols[2][3], np.array(["def", None], dtype=np.object_)))
def test_unify_columns_scipy():
X = sp.sparse.csc_matrix([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["1"], X_cols[0][2]["4"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"], X_cols[1][2]["5"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["3"], X_cols[2][2]["6"]], dtype=np.int64)))
def test_unify_columns_dict1():
X = {"feature1" : [1], "feature2" : "hi", "feature3" : None}
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X, feature_names_given=["feature3", "feature2", "feature1"])
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == 0)
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["hi"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["1"])
def test_unify_columns_dict2():
X = {"feature1" : [1, 4], "feature2" : [2, 5], "feature3" : [3, 6]}
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_names_given=["feature3", "feature2", "feature1"])
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["3"], X_cols[0][2]["6"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"], X_cols[1][2]["5"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["1"], X_cols[2][2]["4"]], dtype=np.int64)))
def test_unify_columns_list1():
X = [1, 2.0, "hi", None]
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(4 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == X_cols[0][2]["1"])
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["2.0"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["hi"])
assert(X_cols[3][1].dtype == np.int64)
assert(X_cols[3][1][0] == 0)
def test_unify_columns_list2():
P1 = pd.DataFrame()
P1["feature1"] = pd.Series(np.array([1, None, np.nan], dtype=np.object_))
P2 = pd.DataFrame()
P2["feature1"] = pd.Series(np.array([1], dtype=np.float32))
P2["feature2"] = pd.Series(np.array([None], dtype=np.object_))
P2["feature3"] = pd.Series(np.array([np.nan], dtype=np.object_))
S1 = sp.sparse.csc_matrix([[1, 2, 3]])
S2 = sp.sparse.csc_matrix([[1], [2], [3]])
X = [np.array([1, 2, 3], dtype=np.int8), pd.Series([4.0, None, np.nan]), [1, 2.0, "hi"], (np.double(4.0), "bye", None), {1, 2, 3}, {"abc": 1, "def": 2, "ghi":3}.keys(), {"abc": 1, "def": 2, "ghi":3}.values(), range(1, 4), (x for x in [1, 2, 3]), np.array([1, 2, 3], dtype=np.object_), np.array([[1, 2, 3]], dtype=np.int8), np.array([[1], [2], [3]], dtype=np.int8), P1, P2, S1, S2]
X, n_samples = clean_X(X)
assert(n_samples == 16)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([c["1"], c["4.0"], c["1"], c["4.0"], c["1"], c["abc"], c["1"], c["1"], c["1"], c["1"], c["1"], c["1"], c["1"], c["1.0"], c["1"], c["1"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
c = X_cols[1][2]
assert(np.array_equal(X_cols[1][1], np.array([c["2"], 0, c["2.0"], c["bye"], c["2"], c["def"], c["2"], c["2"], c["2"], c["2"], c["2"], c["2"], 0, 0, c["2"], c["2"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
c = X_cols[2][2]
assert(np.array_equal(X_cols[2][1], np.array([c["3"], 0, c["hi"], 0, c["3"], c["ghi"], c["3"], c["3"], c["3"], c["3"], c["3"], c["3"], 0, 0, c["3"], c["3"]], dtype=np.int64)))
def test_unify_columns_tuple1():
X = (1, 2.0, "hi", None)
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(4 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == X_cols[0][2]["1"])
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["2.0"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["hi"])
assert(X_cols[3][1].dtype == np.int64)
assert(X_cols[3][1][0] == 0)
def test_unify_columns_tuple2():
X = (np.array([1, 2, 3], dtype=np.int8), pd.Series([4, 5, 6]), [1, 2.0, "hi"], (np.double(4.0), "bye", None), {1, 2, 3}, {"abc": 1, "def": 2, "ghi":3}.keys(), {"abc": 1, "def": 2, "ghi":3}.values(), range(1, 4), (x for x in [1, 2, 3]), np.array([1, 2, 3], dtype=np.object_))
X, n_samples = clean_X(X)
assert(n_samples == 10)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([c["1"], c["4"], c["1"], c["4.0"], c["1"], c["abc"], c["1"], c["1"], c["1"], c["1"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
c = X_cols[1][2]
assert(np.array_equal(X_cols[1][1], np.array([c["2"], c["5"], c["2.0"], c["bye"], c["2"], c["def"], c["2"], c["2"], c["2"], c["2"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
c = X_cols[2][2]
assert(np.array_equal(X_cols[2][1], np.array([c["3"], c["6"], c["hi"], 0, c["3"], c["ghi"], c["3"], c["3"], c["3"], c["3"]], dtype=np.int64)))
def test_unify_columns_generator1():
X = (x for x in [1, 2.0, "hi", None])
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(4 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == X_cols[0][2]["1"])
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["2.0"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["hi"])
assert(X_cols[3][1].dtype == np.int64)
assert(X_cols[3][1][0] == 0)
def test_unify_columns_generator2():
X = (x for x in [np.array([1, 2, 3], dtype=np.int8), pd.Series([4, 5, 6]), [1, 2.0, "hi"], (np.double(4.0), "bye", None), {1, 2, 3}, {"abc": 1, "def": 2, "ghi":3}.keys(), {"abc": 1, "def": 2, "ghi":3}.values(), range(1, 4), (x for x in [1, 2, 3]), np.array([1, 2, 3], dtype=np.object_)])
X, n_samples = clean_X(X)
assert(n_samples == 10)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([c["1"], c["4"], c["1"], c["4.0"], c["1"], c["abc"], c["1"], c["1"], c["1"], c["1"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
c = X_cols[1][2]
assert(np.array_equal(X_cols[1][1], np.array([c["2"], c["5"], c["2.0"], c["bye"], c["2"], c["def"], c["2"], c["2"], c["2"], c["2"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
c = X_cols[2][2]
assert(np.array_equal(X_cols[2][1], np.array([c["3"], c["6"], c["hi"], 0, c["3"], c["ghi"], c["3"], c["3"], c["3"], c["3"]], dtype=np.int64)))
def test_unify_columns_pandas_normal_int8():
check_pandas_normal(np.int8, -128, 127)
def test_unify_columns_pandas_normal_uint8():
check_pandas_normal(np.uint8, 0, 255)
def test_unify_columns_pandas_normal_int16():
check_pandas_normal(np.int16, -32768, 32767)
def test_unify_columns_pandas_normal_uint16():
check_pandas_normal(np.uint16, 0, 65535)
def test_unify_columns_pandas_normal_int32():
check_pandas_normal(np.int32, -2147483648, 2147483647)
def test_unify_columns_pandas_normal_uint32():
check_pandas_normal(np.uint32, 0, 4294967295)
def test_unify_columns_pandas_normal_int64():
check_pandas_normal(np.int64, -9223372036854775808, 9223372036854775807)
def test_unify_columns_pandas_normal_uint64():
check_pandas_normal(np.uint64, np.uint64("0"), np.uint64("18446744073709551615"))
def test_unify_columns_pandas_normal_bool():
check_pandas_normal(np.bool_, False, True)
def test_unify_columns_pandas_missings_float64():
check_pandas_float(np.float64, -1.1, 2.2)
def test_unify_columns_pandas_missings_longfloat():
check_pandas_float(np.longfloat, -1.1, 2.2)
def test_unify_columns_pandas_missings_float32():
check_pandas_float(np.float32, -1.1, 2.2)
def test_unify_columns_pandas_missings_float16():
check_pandas_float(np.float16, -1.1, 2.2)
def test_unify_columns_pandas_missings_Int8Dtype():
check_pandas_missings(pd.Int8Dtype(), -128, 127)
def test_unify_columns_pandas_missings_UInt8Dtype():
check_pandas_missings(pd.UInt8Dtype(), 0, 255)
def test_unify_columns_pandas_missings_Int16Dtype():
check_pandas_missings(pd.Int16Dtype(), -32768, 32767)
def test_unify_columns_pandas_missings_UInt16Dtype():
check_pandas_missings(pd.UInt16Dtype(), 0, 65535)
def test_unify_columns_pandas_missings_Int32Dtype():
check_pandas_missings(pd.Int32Dtype(), -2147483648, 2147483647)
def test_unify_columns_pandas_missings_UInt32Dtype():
check_pandas_missings(pd.UInt32Dtype(), 0, 4294967295)
def test_unify_columns_pandas_missings_Int64Dtype():
check_pandas_missings(pd.Int64Dtype(), -9223372036854775808, 9223372036854775807)
def test_unify_columns_pandas_missings_UInt64Dtype():
check_pandas_missings(pd.UInt64Dtype(), np.uint64("0"), np.uint64("18446744073709551615"))
def test_unify_columns_pandas_missings_BooleanDtype():
check_pandas_missings(pd.BooleanDtype(), False, True)
def test_unify_columns_pandas_missings_str():
check_pandas_missings(np.object_, "abc", "def")
def test_unify_columns_pandas_missings_nice_str():
check_pandas_missings(np.object_, StringHolder("abc"), "def")
def test_unify_columns_pandas_missings_pure_ints():
check_pandas_missings(np.object_, 1, 2)
def test_unify_columns_pandas_missings_pure_floats():
check_pandas_missings(np.object_, 1.1, 2.2)
def test_unify_columns_pandas_missings_mixed_floats():
check_pandas_missings(np.object_, 1.1, "2.2")
def test_unify_columns_pandas_missings_mixed_floats2():
check_pandas_missings(np.object_, StringHolder("1.1"), "2.2")
def test_unify_columns_str_throw():
X = "abc"
try:
X, n_samples = clean_X(X)
assert(False)
except:
pass
try:
feature_names_in = unify_feature_names(X)
assert(False)
except:
pass
try:
feature_names_in = ["ANYTHING"]
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_unify_columns_int_throw():
X = 1
try:
X, n_samples = clean_X(X)
assert(False)
except:
pass
try:
feature_names_in = unify_feature_names(X)
assert(False)
except:
pass
try:
feature_names_in = ["ANYTHING"]
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_unify_columns_duplicate_colnames_throw():
X = pd.DataFrame()
X["0"] = [1, 2]
X[0] = [3, 4]
try:
feature_names_in = unify_feature_names(X)
assert(False)
except:
pass
try:
feature_names_in = ["ANYTHING"]
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_unify_columns_opaque_str_throw():
# this should fail since the default string generator makes a useless as a category string like:
# <interpret.glassbox.ebm.test.test_bin.NothingHolder object at 0x0000019525E9FE48>
check_numpy_throws(np.object_, NothingHolder("abc"), "def")
def test_unify_columns_list_throw():
check_numpy_throws(np.object_, ["abc", "bcd"], "def")
def test_unify_columns_tuple_throw():
check_numpy_throws(np.object_, ("abc", "bcd"), "def")
def test_unify_columns_set_throw():
check_numpy_throws(np.object_, {"abc", "bcd"}, "def")
def test_unify_columns_dict_throw():
check_numpy_throws(np.object_, {"abc": 1, "bcd": 2}, "def")
def test_unify_columns_keys_throw():
check_numpy_throws(np.object_, {"abc": 1, "bcd": 2}.keys(), "def")
def test_unify_columns_values_throw():
check_numpy_throws(np.object_, {"abc": 1, "bcd": 2}.values(), "def")
def test_unify_columns_range_throw():
check_numpy_throws(np.object_, range(1, 2), "def")
def test_unify_columns_generator_throw():
check_numpy_throws(np.object_, (x for x in [1, 2]), "def")
def test_unify_columns_ndarray_throw():
check_numpy_throws(np.object_, np.array([1, "abc"], dtype=np.object_), "def")
def test_unify_columns_pandas_obj_to_float():
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([None, np.nan, np.float16(np.nan), 0, -1, 2.2, "-3.3", np.float16("4.4"), StringHolder("-5.5"), np.float32("6.6").item()], dtype=np.object_), dtype=np.object_)
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 10)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(X_cols[0][0] == 'continuous')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is None)
assert(X_cols[0][1].dtype == np.float64)
assert(np.isnan(X_cols[0][1][0]))
assert(np.isnan(X_cols[0][1][1]))
assert(np.isnan(X_cols[0][1][2]))
assert(X_cols[0][1][3] == 0)
assert(X_cols[0][1][4] == -1)
assert(X_cols[0][1][5] == 2.2)
assert(X_cols[0][1][6] == -3.3)
assert(X_cols[0][1][7] == 4.3984375)
assert(X_cols[0][1][8] == -5.5)
assert(X_cols[0][1][9] == 6.5999999046325684) # python internal objects are float64
def test_unify_columns_pandas_obj_to_str():
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([None, np.nan, np.float16(np.nan), 0, -1, 2.2, "-3.3", np.float16("4.4"), StringHolder("-5.5"), 5.6843418860808014e-14, "None", "nan"], dtype=np.object_), dtype=np.object_)
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 12)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
# For "5.684341886080802e-14", we need to round the 16th digit up for this to be the shortest string since
# "5.684341886080801e-14" doesn't work
# https://www.exploringbinary.com/the-shortest-decimal-string-that-round-trips-may-not-be-the-nearest/
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["0"], c["-1"], c["2.2"], c["-3.3"], c["4.3984375"], c["-5.5"], c["5.684341886080802e-14"], c["None"], c["nan"]], dtype=np.int64)))
assert(np.array_equal(na, X_cols[0][1] == 0))
def test_unify_columns_pandas_categorical():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 3)
assert(X_cols[0][2]["a"] == 1)
assert(X_cols[0][2]["0"] == 2)
assert(X_cols[0][2]["bcd"] == 3)
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_ordinal():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd"], ordered=True))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'ordinal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 3)
assert(X_cols[0][2]["a"] == 1)
assert(X_cols[0][2]["0"] == 2)
assert(X_cols[0][2]["bcd"] == 3)
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_shorter():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "0"], dtype=pd.CategoricalDtype(categories=["a", "0"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 5)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_equals():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_longer():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd", "in_categories"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:2]))
assert(all(~na[2:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(np.array_equal(X_cols[0][3], np.array([None, None, "in_categories", None, None, None], dtype=np.object_)))
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, -1, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_reordered_shorter():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "0"], dtype=pd.CategoricalDtype(categories=["0", "a"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 5)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_reordered_equals():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "bcd", "0"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_reordered_longer1():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "in_categories", "bcd"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:2]))
assert(all(~na[2:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(np.array_equal(X_cols[0][3], np.array([None, None, "in_categories", None, None, None], dtype=np.object_)))
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, -1, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_reordered_longer2():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["0", "a", "bcd", "in_categories"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:2]))
assert(all(~na[2:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(np.array_equal(X_cols[0][3], np.array([None, None, "in_categories", None, None, None], dtype=np.object_)))
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, -1, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_compressed_categories():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "bcd", "0"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
# here we're combining the "a" category and the "0" category into a single one that tracks both.
# in JSON this can be expressed as the equivalent of [["a", "0"], "bcd"]
c = {"a": 1, "0": 1, "bcd": 2}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_feature_names_numpy1():
X = np.array([1, 2, 3])
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[2][1][0] == 3.0)
def test_unify_feature_names_numpy2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_data_frame1():
X = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_data_frame2():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_scipy():
X = sp.sparse.csc_matrix([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_dict1():
X = {"feature1" : [1], "feature2" : [2], "feature3" : [3]}
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[2][1][0] == 3.0)
def test_unify_feature_names_dict2():
X = {"feature2" : [1, 4], "feature1" : [2, 5], "feature3" : [3, 6]}
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 2.0)
assert(X_cols[0][1][1] == 5.0)
assert(X_cols[1][1][0] == 1.0)
assert(X_cols[1][1][1] == 4.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_list1():
X = [1, 2, 3]
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[2][1][0] == 3.0)
def test_unify_feature_names_list2():
X = [pd.Series([1, 2, 3]), (4, 5, 6)]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_tuple1():
X = (1, 2, 3)
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[2][1][0] == 3.0)
def test_unify_feature_names_tuple2():
X = (np.array([1, 2, 3]), [4, 5, 6])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_feature_types1():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'continuous', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_feature_types2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_feature_types3():
X = np.array([[1, 3], [4, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given = ['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_pandas_feature_types1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'continuous', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_pandas_ignored_existing():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
feature_types_given=['continuous', 'ignore', 'continuous']
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_pandas_feature_types3():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature1", "feature_0001", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_names1():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]))
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_names2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2])
assert(isinstance(feature_names_in, list))
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]))
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_pandas_names2():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2])
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_types_names1():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'continuous', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_types_names2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'continuous', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_types_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'continuous', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_types_pandas_names2():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'continuous', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_types_ignored_names1():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_ignored_names2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_ignored_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_ignored_pandas_names2():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_dropped_names1():
X = np.array([[1, 3], [4, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_dropped_names2():
X = np.array([[1, 3], [4, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_dropped_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_dropped_pandas_names2():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_nondropped2_names2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "feature_0001", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_nondropped2_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "feature_0001", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_dropped2_names2():
X = np.array([[1, 3], [4, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "feature_0001", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_dropped2_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "feature_0001", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_keep_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'continuous', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_types_dropped3_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_dropped3_pandas_names2():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature1", "feature_0001", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_rearrange1_drop1():
X = pd.DataFrame()
X["width"] = [1, 4]
X["UNUSED"] = [2, 5]
X["length"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=["length", "SOMETHING", "width"], feature_types_given=feature_types_given)
assert(feature_names_in == ["length", "SOMETHING", "width"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 3.0)
assert(X_cols[0][1][1] == 6.0)
assert(X_cols[1][1][0] == 1.0)
assert(X_cols[1][1][1] == 4.0)
def test_unify_feature_names_types_rearrange1_drop2():
X =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import math
from fastparquet import write
def proc_radon(df_radon):
data = df_radon[df_radon.state=='MN']
data = data.loc[:,['county','activity','floor']]
data['log_radon'] = data.activity.apply(lambda x: float(x.strip()))
data['log_radon'] = data['log_radon'].apply(lambda x: math.log(x) if x>0. else np.nan)
data = data.loc[:,['county','log_radon','floor']]
data.floor =
|
pd.to_numeric(data.floor)
|
pandas.to_numeric
|
from .event_processing import event_information
from .event_processing import process_event
from .event_processing import number_of_valid_triggerd_cameras
from ctapipe.io.eventsourcefactory import EventSourceFactory
from ctapipe.io.lsteventsource import LSTEventSource
from ctapipe.calib import CameraCalibrator
from ctapipe.reco.HillasReconstructor import TooFewTelescopesException
from ctapipe.io import SimTelEventSource
import pandas as pd
import fact.io
import eventio
from tqdm import tqdm
from pathlib import Path
def process_data(input_file,
config,
return_input_file=False):
event_source = EventSourceFactory.produce(
input_url=input_file.as_posix(),
max_events=config.n_events if config.n_events > 1 else None,
product='LSTEventSource',
)
calibrator = CameraCalibrator(
eventsource=event_source,
r1_product='NullR1Calibrator', ## needs to be replaced?
extractor_product=config.integrator,
)
df_runs = pd.DataFrame()
array_events =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python3
# python3.6
# ref link: https://www.jianshu.com/p/91c98585b79b
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import os,re
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from scipy import stats
import seaborn as sns
import argparse
def gene_dis(fi, prefix):
data = pd.read_table(fi, header=0)
###
if data.columns[0] != 'gene':
data.rename(columns={data.columns[0]:'gene'}, inplace=True)
print(data.columns)
data_melt = data.melt('gene', var_name='sample')
data_melt = data_melt.query('value>0')
data_melt.index = data_melt['gene']
#print(data)
#data_melt = data.melt(var_name='sample')
#print(data_melt)
#data_melt = data_melt.query('value>0')
#data_melt.index = data_melt['gene']
### Gene species by gene type
gene_type = {}
with open("/sibcb1/wuliganglab1/liuwei/genome/hisat2_index/Ensembl96_GRCh38_GRcm38.gene.type") as f:
for line in f:
line=line.strip().split("\t")
gene_type[line[1]] = line[2]
gene_type =
|
pd.Series(gene_type, name='gene_type', dtype="string")
|
pandas.Series
|
""" A collection of utilities for the analysis of crawl databases """
import operator
import plyvel
import hashlib
import zlib
import glob
import gzip
import json
import os
import pandas as pd
import jsbeautifier
from irlutils.url.crawl import domain_utils as du
# General ##########
def unique(seq):
# Not order preserving
return {}.fromkeys(seq).keys()
def sort_by_value(dct, reverse=True):
""" Sort a dictionary by value """
return sorted(dct.items(), key=operator.itemgetter(1), reverse=reverse)
def sort_by_length(dct, reverse=True):
""" Sort a dictionary by value length """
return sorted(dct.iteritems(), key=lambda x: len(x[1]), reverse=reverse)
def get_dct_subset(dct, keys):
""" Returns a subset of the dictionary, limited to the input keys """
return dict((k, dct[k]) for k in keys if k in dct)
# Analysis ##########
def get_ranked_sites(location, as_list=False):
"""
returns a dictionary of site rank from the
top-1m.csv file located at <location>
<as_list> (True) returns rank ordered list
(False) returns dict[url] = rank
"""
with open(location, 'r') as f:
sites = ['http://' + x.split(',')[1]
for x in f.read().strip().split('\n')]
if as_list:
return sites
site_rank = dict()
for i in range(len(sites)):
site_rank[sites[i]] = i + 1
return site_rank
# print(utilities ##########)
def expand_params(url):
purl = urlparse(url)
print("\n====================================")
print("Scheme: {}".format(purl.scheme))
print("Hostname: {}".format(purl.hostname))
print("Path: {}".format(purl.path))
print("Query Parameters:")
for item in purl.query.split('&'):
try:
key, value = item.split('=')
print(("\t {:<30} = {}".format(key, value)))
except ValueError:
print(("\t {}".format(item)))
print(purl.fragment)
def prettify(item):
""" creates a json representation of item and returns as string """
if type(item) == set:
item = list(item)
return json.dumps(item, indent=2,
separators=(',', ': '))
def pretty_print(item):
""" Pretty prints a json representation of item """
print(prettify(item))
def print_as_markdown_table(l, heading=None):
"""print(`l` as a markdown formatted table)
Parameters
----------
l : list of lists or list of tuples or pandas.Series
the list of data you want printed. All rows must be same length
heading : list
a list of column headings. Must be same width as items of l
"""
if type(l) != list and type(l) != pd.Series:
raise TypeError("only supports printing list or pandas.Series")
if type(l) == pd.Series:
new_l = list()
for key in l.keys():
value = l.get_value(key)
if type(value) != tuple:
value = (value,)
new_l.append(key + value)
l = new_l
output = ''
if heading is not None:
output += ' | '.join([str(x) for x in heading]) + '\n'
output += '-|-'.join(['-'*len(str(x)) for x in heading]) + '\n'
for item in l:
output += ' | '.join([str(x) for x in item]) + '\n'
print(output)
# Dataframe ##########
def add_tp_col(df, col1, col2):
""" Add a third-party boolean column to dataframe `df`
Parameters
----------
df : pandas.DataFrame
the dataframe the third-party column will be added to
col1 : str
first column to retrieve a url from
col2 : str
second column to retrieve a url from
"""
df['is_tp'] = df[col1].apply(du.get_ps_plus_1) != \
df[col2].apply(du.get_ps_plus_1)
def read_cache_or_query_db(con, db_query, csv_file="", versioned=True):
"""Read from the cached CSV file or the database.
Cache the results to a CSV file. If `versioned` is True, the cache will
be tied to the `db_query`. If `db_query` changes, the cache will be
rebuilt."""
if csv_file != "" and versioned:
parts = csv_file.rsplit('.')
csv_file = "{}-{}.csv".format(parts[0], hashlib.md5(db_query).hexdigest())
used_cache = False
should_query_db = False
try:
results = pd.read_csv(csv_file, sep="\t", keep_default_na=False)
used_cache = True
except Exception:
should_query_db = True
if should_query_db:
results = pd.read_sql_query(db_query, con)
if csv_file and not used_cache: # only save if we queried the DB
results.to_csv(csv_file, sep='\t', encoding='utf-8',
index=False)
return results
# Sqlite ##########
def fetchiter(cursor, arraysize=10000):
""" Generator for cursor results """
while True:
rows = cursor.fetchmany(arraysize)
if rows == []:
break
for row in rows:
yield row
def list_placeholder(length, is_pg=False):
"""
Returns a (?,?,?,?...) string of the desired length
if is_pg, returns ({},{},{},....) instead.
"""
if is_pg:
return '(' + '{},'*(length-1) + '{})'
else:
return '(' + '?,'*(length-1) + '?)'
def optimize_db(cursor):
""" Runs PRAGMA queries to make sqlite better """
cursor.execute("PRAGMA cache_size = -{}".format((0.1 * 10**7))) # 10 GB
# Store temp tables, indicies in memory
cursor.execute("PRAGMA temp_store = 2")
def insert_get_id(cursor, table, arguments, unique):
"""
Executes an INSERT OR IGNORE on a table where one column is unique
The resulting ID is grabbed avoiding an extra SELECT statement if possible
<table> - table name as string
<arguments> - a dict of fields to insert
<unique> - string or list of the fields that have a unique constraint
"""
cursor.execute("INSERT OR IGNORE INTO {} ({}) VALUES {}".format(
table, ','.join(arguments.keys()), list_placeholder(len(arguments)),
arguments.values()))
cursor.execute("SELECT last_insert_rowid(), changes();")
ret_id, insert = cursor.fetchone()
if not insert:
if type(unique) == str:
unique = [unique]
cursor.execute("SELECT id FROM {} WHERE {}".format(table, " AND ".join(map(lambda x: "{} = ?".format((x, unique), map(lambda x: arguments[x], unique))))))
ret_id = cursor.fetchone()[0]
return ret_id
def insert_ignore(cursor, table, arguments, unique, index_fn=None):
"""
Execute an INSERT OR IGNORE on a table with at least one unique column
The ID of the new INSERT (or current row) is returned.
<table> - table name as string
<arguments> - a dict of fields to insert of form
i.e. {<col1_name>:<col1_value>, <col2_name>:<col2_value>, ...}
<unique> - string or list of the fields that have a unique constraint
i.e. 'col1' or ['col1','col2']
<index_fn> - A function name if the table has a functional index. e.g 'MD5'
ONLY arguments' values should come from untrusted sources as the rest are
not sanitized.
"""
if type(unique) == str:
unique = [unique]
if index_fn is not None:
where_str = index_fn + "({}) = " + index_fn + "(%{})"
else:
where_str = "{} = {}"
query = "".join(( # makes substitutions more readable
"WITH s AS ( ",
"SELECT id ",
"FROM {} ".format(table),
"WHERE {} ".format(" AND ".join(map(lambda x: where_str.format(x, unique)))),
"), i AS ( ",
"INSERT INTO {} ({}) ".format(table, ','.join(arguments.keys())),
"SELECT {} ".format(",".join(["{}"]*len(arguments))),
"WHERE NOT EXISTS (SELECT 1 FROM s) ",
"RETURNING id ",
") ",
"SELECT id ",
"FROM i ",
"UNION ALL ",
"SELECT id ",
"FROM s "
))
cursor.execute(query,
map(lambda x: arguments[x], unique) + arguments.values())
return cursor.fetchone()[0]
def get_distinct_content_hashes(cur, url):
cur.execute(
"SELECT content_hash FROM http_responses WHERE url = ?", (url,))
return set([x[0] for x in cur.fetchall()])
# LevelDB ##########
def get_leveldb(db_path, compression='snappy'):
"""
Returns an open handle for a leveldb database
with proper configuration settings.
"""
db = plyvel.DB(db_path,
lru_cache_size=10**9,
write_buffer_size=128*10**4,
bloom_filter_bits=128,
compression=compression)
return db
def get_url_content(url, sqlite_cur, ldb_con, beautify=True, visit_id=None):
"""Return javascript content for given url.
Parameters
----------
url : str
url to search content hash for
sqlite_cur : sqlite3.Cursor
cursor for crawl database
ldb_con : plyvel.DB
leveldb database storing javascript content
beautify : boolean
Control weather or not to beautify output
visit_id : int
(optional) `visit_id` of the page visit where this URL was loaded
"""
if visit_id is not None:
sqlite_cur.execute(
"SELECT content_hash FROM http_responses WHERE "
"visit_id = ? AND url = ? LIMIT 1;", (visit_id, url))
else:
sqlite_cur.execute(
"SELECT content_hash FROM http_responses WHERE url = ? LIMIT 1;",
(url,))
content_hash = sqlite_cur.fetchone()
if content_hash is None or len(content_hash) == 0:
print("Content hash not found for url {}".format( url))
return
return get_content(ldb_con, content_hash[0], beautify=beautify)
def get_content(db, content_hash, compression='snappy', beautify=True):
""" Returns decompressed content from javascript leveldb database """
content_hash=bytes(content_hash, "utf-8")
if content_hash is None:
print("ERROR: content_hash can't be None...")
return
content = db.get(content_hash)
if content is None:
print("ERROR: content hash: {} NOT FOUND".format(content_hash))
return
supported = ['snappy', 'none', 'gzip']
if compression not in supported:
print("Unsupported compression type {}. Only {} are the supported options.".format(compression, str(supported)))
return
elif compression == 'gzip':
try:
content = zlib.decompress(content, zlib.MAX_WBITS | 16)
except Exception:
try:
content = zlib.decompress(content)
except Exception:
print("Failed to decompress gzipped content...")
return
if beautify:
return jsbeautifier.beautify(str(content))
else:
return content
# ##### Page Source
def parse_src_frame(visit_id, url_hash, suffix, src_dict,
include_iframes, rv=None, parent=None):
"""Parse a frame from a page source dump into a flat return value"""
if rv is None:
rv = list()
document_url = src_dict['doc_url']
src = src_dict['source']
children = set()
for frame in src_dict['iframes'].values():
if include_iframes:
parse_src_frame(visit_id, url_hash, suffix, frame, include_iframes,
rv=rv, parent=document_url)
children.add(frame['doc_url'])
rv.append((visit_id, url_hash, suffix,
document_url, src, tuple(children), parent))
return
def build_page_source_df(src_dir, visit_ids=None, include_iframes=False):
"""Build a dataframe from crawl page source directory
Output columns:
visit_id, tab_url_hash, suffix, document_url, page_source,
children (tuple of document URLs), parent (document URL)
"""
if not os.path.isdir(src_dir):
raise ValueError("{} not found".format(src_dir))
src_zips = glob.glob(os.path.join(src_dir, '*.json.gz'))
if len(src_zips) == 0:
raise ValueError(" {} contains no source files".format(src_dir))
sources = list()
for src_zip in src_zips:
basename = os.path.basename(src_zip)
visit_id, url_hash, suffix = basename.rsplit('.', 2)[0].split('-')[0:3]
visit_id = int(visit_id)
if visit_ids is not None and visit_id not in visit_ids:
continue
with gzip.open(src_zip, 'rb') as f:
try:
page_src = json.load(f)
except ValueError:
continue
# Flatten frames
parse_src_frame(visit_id, url_hash, suffix,
page_src, include_iframes, rv=sources)
df =
|
pd.DataFrame(sources)
|
pandas.DataFrame
|
# What is different in this kernel:
# - data preprocessing was modularised and hopefully made more clear, as repetitative actions were moved into a separate function
# - LightGBM hyperparameters were taken from my another kernel, where they were tuned to the `application` data subset only:
# https://www.kaggle.com/mlisovyi/lightgbm-hyperparameter-optimisation-lb-0-746
# - Check out the feature importance plot. It is VERY different from any other kernel.
# It is most likely related to the regularisation in the model. This will have to be studied
#
# What was borrowed in this kernel:
# This script is a fork of the awesome kernel by olivier, that insiper a lot of kernels on this competition:
# https://www.kaggle.com/ogrellier/good-fun-with-ligthgbm
# It also uses memory-footprint-reduction technique copied over from this very clear and useful kernel:
# https://www.kaggle.com/gemartin/load-data-reduce-memory-usage
# The tiny add-on to store OOF predictions on the training dataset was taken from this kernel:
# https://www.kaggle.com/tilii7/olivier-lightgbm-parameters-by-bayesian-opt/
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score, precision_recall_curve, roc_curve, average_precision_score
from sklearn.model_selection import KFold, StratifiedKFold
from lightgbm import LGBMClassifier
import matplotlib.pyplot as plt
import seaborn as sns
import gc
PATH='~/.kaggle/competitions/home-credit-default-risk/'
def reduce_mem_usage(df):
""" iterate through all the columns of a dataframe and modify the data type
to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
def import_data(file):
"""create a dataframe and optimize its memory usage"""
df = pd.read_csv(file, parse_dates=True, keep_date_col=True)
df = reduce_mem_usage(df)
return df
def average_dummies(df, dummy_col, count_col, gb_col, preffix='', del_input=True):
print('DF shape : ', df.shape)
if dummy_col:
print('transform to dummies')
df = pd.concat([df, pd.get_dummies(df[dummy_col])], axis=1).drop(dummy_col, axis=1)
if count_col and gb_col:
print('Counting buros')
df_counts = df[[gb_col, count_col]].groupby(gb_col).count()
df[count_col] = df[gb_col].map(df_counts[count_col])
avg_df = None
if gb_col:
print('averaging ')
avg_df = df.groupby(gb_col).mean()
if preffix:
avg_df.columns = [preffix + f_ for f_ in avg_df.columns]
print(avg_df.head())
print(df.head())
if del_input:
print('Deleting input')
del df
gc.collect()
if avg_df is not None:
print(avg_df.head())
print(avg_df.columns.values)
return avg_df
elif not del_input:
return df
else:
return None
def build_model_input():
print('Read Bureau_Balance')
buro_bal = import_data(PATH+'/bureau_balance.csv')
avg_buro_bal = average_dummies(buro_bal, dummy_col='STATUS', count_col='MONTHS_BALANCE', gb_col='SK_ID_BUREAU', preffix='avg_buro_')
print('Read Bureau')
buro_full = import_data(PATH+'/bureau.csv')
buro_full = average_dummies(buro_full, dummy_col=['CREDIT_ACTIVE', 'CREDIT_CURRENCY', 'CREDIT_TYPE'], count_col=None, gb_col=None, preffix=None, del_input=False)
print('Merge with buro avg')
buro_full = buro_full.merge(right=avg_buro_bal.reset_index(), how='left', on='SK_ID_BUREAU', suffixes=('', '_bur_bal'))
avg_buro = average_dummies(buro_full, dummy_col=None, count_col='SK_ID_BUREAU', gb_col='SK_ID_CURR', preffix='avg_buro_', del_input=True)
print('Read prev')
prev = import_data(PATH+'/previous_application.csv')
prev_cat_features = [ f_ for f_ in prev.columns if prev[f_].dtype == 'object' ]
avg_prev = average_dummies(prev, dummy_col=prev_cat_features, count_col='SK_ID_PREV', gb_col='SK_ID_CURR', preffix='prev_')
print('Reading POS_CASH')
pos = import_data(PATH+'/POS_CASH_balance.csv')
avg_pos = average_dummies(pos, dummy_col='NAME_CONTRACT_STATUS', count_col='SK_ID_PREV', gb_col='SK_ID_CURR', preffix='pos_')
print('Reading CC balance')
cc_bal = import_data(PATH+'/credit_card_balance.csv')
avg_cc_bal = average_dummies(cc_bal, dummy_col='NAME_CONTRACT_STATUS', count_col='SK_ID_PREV', gb_col='SK_ID_CURR', preffix='cc_bal_')
print('Reading Installments')
inst = import_data(PATH+'/installments_payments.csv')
avg_inst = average_dummies(inst, dummy_col=None, count_col='SK_ID_PREV', gb_col='SK_ID_CURR', preffix='inst_')
print('Read data and test')
data = import_data(PATH+'/application_train.csv')
test = import_data(PATH+'/application_test.csv')
print('Shapes : ', data.shape, test.shape)
y = data['TARGET']
del data['TARGET']
categorical_feats = [ f for f in data.columns if data[f].dtype == 'object' ]
categorical_feats
for f_ in categorical_feats:
data[f_], indexer = pd.factorize(data[f_])
test[f_] = indexer.get_indexer(test[f_])
print('Merging')
data = data.merge(right=avg_buro.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_buro.reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(right=avg_prev.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_prev.reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(right=avg_pos.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_pos.reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(right=avg_cc_bal.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_cc_bal.reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(right=avg_inst.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_inst.reset_index(), how='left', on='SK_ID_CURR')
del avg_buro, avg_prev, avg_pos, avg_cc_bal, avg_inst
gc.collect()
print('Done with data preparation')
return data, test, y
def train_model(data_, test_, y_, folds_):
oof_preds = np.zeros(data_.shape[0])
sub_preds = np.zeros(test_.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in data_.columns if f not in ['SK_ID_CURR']]
for n_fold, (trn_idx, val_idx) in enumerate(folds_.split(data_,y_)):
trn_x, trn_y = data_[feats].iloc[trn_idx], y_.iloc[trn_idx]
val_x, val_y = data_[feats].iloc[val_idx], y_.iloc[val_idx]
#clf = LGBMClassifier(
# n_estimators=4000,
# learning_rate=0.03,
# num_leaves=30,
# colsample_bytree=.8,
# subsample=.9,
# max_depth=7,
# reg_alpha=.1,
# reg_lambda=.1,
# min_split_gain=.01,
# min_child_weight=2,
# silent=-1,
# verbose=-1,
#)
clf = LGBMClassifier(max_depth=-1, random_state=314, silent=True, metric='None', n_jobs=4, n_estimators=50, learning_rate=0.03)
opt_parameters = {'colsample_bytree': 0.9234, 'min_child_samples': 399, 'min_child_weight': 0.1, 'num_leaves': 13, 'reg_alpha': 2, 'reg_lambda': 5, 'subsample': 0.855}
clf.set_params(**opt_parameters)
#clf.set_params(is_unbalance=True)
clf.fit(trn_x, trn_y,
eval_set= [(trn_x, trn_y), (val_x, val_y)],
eval_metric='auc', verbose=100, early_stopping_rounds=100 #30
)
oof_preds[val_idx] = clf.predict_proba(val_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test_[feats], num_iteration=clf.best_iteration_)[:, 1] / folds_.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df =
|
pd.concat([feature_importance_df, fold_importance_df], axis=0)
|
pandas.concat
|
import json
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import pickle
import datetime as dt
from shutil import copyfile
from typing import Optional
from sklearn.cluster import KMeans
from dl_portfolio.logger import LOGGER
from dl_portfolio.pca_ae import get_layer_by_name, heat_map, build_model
from dl_portfolio.data import drop_remainder, get_features
from dl_portfolio.train import fit, embedding_visualization, plot_history, create_dataset, build_model_input
from dl_portfolio.constant import LOG_DIR
from dl_portfolio.nmf.semi_nmf import SemiNMF
from dl_portfolio.nmf.convex_nmf import ConvexNMF
def run_ae(config, data, assets, log_dir: Optional[str] = None, seed: Optional[int] = None):
"""
:param config: config
:param log_dir: if given save the result in log_dir folder, if not given use LOG_DIR
:param seed: if given use specific seed
:return:
"""
random_seed = np.random.randint(0, 100)
if config.seed:
seed = config.seed
if seed is None:
seed = np.random.randint(0, 1000)
np.random.seed(seed)
tf.random.set_seed(seed)
LOGGER.debug(f"Set seed: {seed}")
if config.save:
if log_dir is None:
log_dir = LOG_DIR
iter = len(os.listdir(log_dir))
if config.model_name is not None and config.model_name != '':
subdir = f'm_{iter}_' + config.model_name + f'_seed_{seed}'
else:
subdir = f'm_{iter}_'
subdir = subdir + '_' + str(dt.datetime.timestamp(dt.datetime.now())).replace('.', '')
save_dir = f"{log_dir}/{subdir}"
os.makedirs(save_dir)
copyfile('./dl_portfolio/config/ae_config.py',
os.path.join(save_dir, 'ae_config.py'))
base_asset_order = assets.copy()
assets_mapping = {i: base_asset_order[i] for i in range(len(base_asset_order))}
for cv in config.data_specs:
LOGGER.debug(f'Starting with cv: {cv}')
if config.save:
save_path = f"{save_dir}/{cv}"
os.mkdir(f"{save_dir}/{cv}")
else:
save_path = None
LOGGER.debug(f'Assets order: {assets}')
if config.loss == 'weighted_mse':
# reorder columns
df_sample_weights = df_sample_weights[assets]
# Build model
input_dim = len(assets)
n_features = None
model, encoder, extra_features = build_model(config.model_type,
input_dim,
config.encoding_dim,
n_features=n_features,
extra_features_dim=1,
activation=config.activation,
batch_normalization=config.batch_normalization,
kernel_initializer=config.kernel_initializer,
kernel_constraint=config.kernel_constraint,
kernel_regularizer=config.kernel_regularizer,
activity_regularizer=config.activity_regularizer,
loss=config.loss,
uncorrelated_features=config.uncorrelated_features,
weightage=config.weightage)
if config.nmf_model is not None:
train_data, _, _, _, _, _ = get_features(data,
config.data_specs[cv]['start'],
config.data_specs[cv]['end'],
assets,
val_start=config.data_specs[cv]['val_start'],
test_start=config.data_specs[cv].get(
'test_start'),
rescale=config.rescale,
scaler=config.scaler_func['name'],
resample=config.resample,
**config.scaler_func.get('params',
{}))
LOGGER.info(f"Initilize weights with NMF model from {config.nmf_model}/{cv}")
assert config.model_type in ["ae_model"]
if config.model_type == "ae_model":
nmf_model = pickle.load(open(f'{config.nmf_model}/{cv}/model.p', 'rb'))
# Set encoder weights
weights = nmf_model.encoding.copy()
# Add small constant to avoid 0 weights at beginning of training
weights += 0.2
# Make it unit norm
weights = weights ** 2
weights /= np.sum(weights, axis=0)
weights = weights.astype(np.float32)
bias = model.layers[1].get_weights()[1]
model.layers[1].set_weights([weights, bias])
# Set decoder weights
weights = nmf_model.components.copy()
# Add small constant to avoid 0 weights at beginning of training
weights += 0.2
# Make it unit norm
weights = weights ** 2
weights /= np.sum(weights, axis=0)
weights = weights.T
weights = weights.astype(np.float32)
## set bias
F = nmf_model.transform(train_data)
bias = (np.mean(train_data) - np.mean(F.dot(nmf_model.components.T), 0))
model.layers[-1].set_weights([weights, bias])
elif config.model_type == "pca_ae_model":
nmf_model = pickle.load(open(f'{config.nmf_model}/{cv}/model.p', 'rb'))
# Set encoder weights
weights = nmf_model.components.copy()
# Add small constant to avoid 0 weights at beginning of training
weights += 0.2
# Make it unit norm
weights = weights ** 2
weights /= np.sum(weights, axis=0)
weights = weights.astype(np.float32)
bias = model.layers[1].get_weights()[1]
model.layers[1].set_weights([weights, bias])
# Set decoder weights
layer_weights = model.layers[-1].get_weights()
weights = nmf_model.components.copy()
# Add small constant to avoid 0 weights at beginning of training
weights += 0.2
# Make it unit norm
weights = weights ** 2
weights /= np.sum(weights, axis=0)
weights = weights.astype(np.float32)
# set bias
F = nmf_model.transform(train_data)
bias = (np.mean(train_data) - np.mean(F.dot(nmf_model.components.T), 0))
layer_weights[0] = bias
layer_weights[1] = weights
model.layers[-1].set_weights(layer_weights)
# LOGGER.info(model.summary())
# Create dataset:
shuffle = False
if config.resample is not None:
if config.resample.get('when', None) != 'each_epoch':
train_dataset, val_dataset = create_dataset(data, assets,
config.data_specs[cv],
config.model_type,
batch_size=config.batch_size,
rescale=config.rescale,
scaler_func=config.scaler_func,
resample=config.resample,
loss=config.loss,
df_sample_weights=df_sample_weights if config.loss == 'weighted_mse' else None
)
else:
shuffle = True
# Set extra loss parameters
if shuffle:
model, history = fit(model,
None,
config.epochs,
config.learning_rate,
callbacks=config.callbacks,
val_dataset=None,
extra_features=n_features is not None,
save_path=f"{save_path}" if config.save else None,
shuffle=True,
cv=cv,
data=data,
assets=assets,
config=config)
else:
model, history = fit(model,
train_dataset,
config.epochs,
config.learning_rate,
loss=config.loss,
callbacks=config.callbacks,
val_dataset=val_dataset,
extra_features=n_features is not None,
save_path=f"{save_path}" if config.save else None,
shuffle=False)
if config.save:
# tensorboard viz
if config.model_type != 'ae_model2':
embedding_visualization(model, assets, log_dir=f"{save_path}/tensorboard/")
LOGGER.debug(f"Loading weights from {save_path}/model.h5")
model.load_weights(f"{save_path}/model.h5")
plot_history(history, save_path=save_path, show=config.show_plot)
# Get results for later analysis
data_spec = config.data_specs[cv]
train_data, val_data, test_data, scaler, dates, features = get_features(data,
data_spec['start'],
data_spec['end'],
assets,
val_start=data_spec['val_start'],
test_start=data_spec.get('test_start'),
rescale=config.rescale,
scaler=config.scaler_func['name'],
resample=config.resample,
**config.scaler_func.get('params',
{}))
LOGGER.debug(f'Train shape: {train_data.shape}')
LOGGER.debug(f'Validation shape: {val_data.shape}')
if features:
train_input = build_model_input(train_data, config.model_type, features=features['train'], assets=assets)
val_input = build_model_input(val_data, config.model_type, features=features['val'])
if test_data is not None:
test_input = build_model_input(test_data, config.model_type, features=features['test'],
assets=assets)
else:
train_input = build_model_input(train_data, config.model_type, features=None, assets=assets)
val_input = build_model_input(val_data, config.model_type, features=None, assets=assets)
if test_data is not None:
test_input = build_model_input(test_data, config.model_type, features=None, assets=assets)
## Get prediction
if n_features:
train_features = encoder.predict(train_input[0])
val_features = encoder.predict(val_input[0])
else:
train_features = encoder.predict(train_input)
val_features = encoder.predict(val_input)
train_features = pd.DataFrame(train_features, index=dates['train'])
LOGGER.info(f"Train features correlation:\n{train_features.corr()}")
val_features = pd.DataFrame(val_features, index=dates['val'])
LOGGER.info(f"Val features correlation:\n{val_features.corr()}")
val_prediction = model.predict(val_input)
val_prediction = scaler.inverse_transform(val_prediction)
val_prediction = pd.DataFrame(val_prediction, columns=assets, index=dates['val'])
## Get encoder weights
decoder_weights = None
if config.model_type in ['ae_model2', 'nl_pca_ae_model']:
encoder_layer1 = get_layer_by_name(name='encoder1', model=model)
encoder_weights1 = encoder_layer1.get_weights()[0]
encoder_layer2 = get_layer_by_name(name='encoder2', model=model)
encoder_weights2 = encoder_layer2.get_weights()[0]
encoder_weights = np.dot(encoder_weights1, encoder_weights2)
encoder_weights = pd.DataFrame(encoder_weights, index=assets)
heat_map(pd.DataFrame(encoder_weights1), show=True, vmin=0., vmax=1.)
heat_map(pd.DataFrame(encoder_weights2), show=True, vmin=0., vmax=1.)
heat_map(encoder_weights, show=True)
elif config.model_type == 'pca_ae_model':
encoder_layer = get_layer_by_name(name='encoder', model=model)
encoder_weights = encoder_layer.get_weights()
encoder_weights =
|
pd.DataFrame(encoder_weights[0], index=assets)
|
pandas.DataFrame
|
import training.train
import pandas as pd
import numpy as np
from skopt.space import Real, Categorical, Integer
from skopt.utils import use_named_args
from skopt import gp_minimize
def store_results(search_result, prior_names):
params =
|
pd.DataFrame(search_result['x_iters'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import click
import logging
import os
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import multiprocessing
import numpy as np
import pandas as pd
import src.helpers.third_party.preprocess as preprocess
from definitions import OUTPUT_DIR, TCE_TABLE_DIR, KEPLER_DATA_DIR
def generate_tce_data(tce_table):
# Initialise dataframes to populate with processed data
flattened_fluxes_df = pd.DataFrame()
folded_fluxes_df = pd.DataFrame()
globalbinned_fluxes_df = pd.DataFrame()
localbinned_fluxes_df = pd.DataFrame()
# Processing metrics
num_tces = len(tce_table)
processed_count = 0
failed_count = 0
# Iterate over every TCE in the table
for _, tce in tce_table.iterrows():
try:
# Process the TCE and retrieve the processed data.
flattened_flux, folded_flux, global_view, local_view = process_tce(tce)
# Append processed flux light curves for each TCE to output dataframes.
flattened_fluxes_df = flattened_fluxes_df.append(pd.Series(flattened_flux), ignore_index=True)
folded_fluxes_df = folded_fluxes_df.append(pd.Series(folded_flux), ignore_index=True)
globalbinned_fluxes_df = globalbinned_fluxes_df.append(pd.Series(global_view), ignore_index=True)
localbinned_fluxes_df = localbinned_fluxes_df.append(
|
pd.Series(local_view)
|
pandas.Series
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.