code
stringlengths
2.5k
150k
kind
stringclasses
1 value
# Heap Maps A heat map is a two-dimensional representation of data in which values are represented by colors. A simple heat map provides an immediate visual summary of information. ``` from beakerx import * data = [[533.08714795974, 484.92105712087596, 451.63070008303896, 894.4451947886148, 335.44965728686225, 640.9424094527392, 776.2709495045433, 621.8819257981404, 793.2905673902735, 328.97078791524234, 139.26962328268513, 800.9314566259062, 629.0795214099808, 418.90954534196544, 513.8036215424278, 742.9834968485734, 542.9393528649774, 671.4256827205828, 507.1129322933082, 258.8238039352692, 581.0354187924672, 190.1830169180297, 480.461111816312, 621.621218137835, 650.6023460248642, 635.7577683708486, 605.5201537254429, 364.55368485516846, 554.807212844458, 526.1823154945637], [224.1432052432479, 343.26660237811336, 228.29828973027486, 550.3809606942758, 340.16890889700994, 214.05332637480836, 461.3159325548031, 471.2546571575069, 503.071081294441, 757.4281483575993, 493.82140462579406, 579.4302306011925, 459.76905409338497, 580.1282535427403, 378.8722877921564, 442.8806517248869, 573.9346962907078, 449.0587543606964, 383.50503527041144, 378.90761994599256, 755.1883447435789, 581.6815170672886, 426.56807864689773, 602.6727518023347, 555.6481983927658, 571.1201152862207, 372.24744704437876, 424.73180136220844, 739.9173564499195, 462.3257604373609], [561.8684320610753, 604.2859791599086, 518.3421287392559, 524.6887104615442, 364.41920277904774, 433.37737233751386, 565.0508404421712, 533.6030951907703, 306.68809206630397, 738.7229466356732, 766.9678519097575, 699.8457506281374, 437.0340850742263, 802.4400914789037, 417.38754410115075, 907.5825538527938, 521.4281410545287, 318.6109350534576, 435.8275858900637, 463.82924688853524, 533.4069709666686, 404.50516534982546, 332.6966202103611, 560.0346672408426, 436.9691072984075, 631.3453929454839, 585.1581992195356, 522.3209865675237, 497.57041075817443, 525.8867246757814], [363.4020792898871, 457.31257834906256, 333.21325206873564, 508.0466632081777, 457.1905718373847, 611.2168422907173, 515.2088862309242, 674.5569500790505, 748.0512665828364, 889.7281605626981, 363.6454276219251, 647.0396659692233, 574.150119779024, 721.1853645071792, 309.5388283799724, 450.51745569875845, 339.1271937333267, 630.6976744426033, 630.1571298446103, 615.0700456998867, 780.7843408745639, 205.13803869051543, 784.5916902014255, 498.10545868387925, 553.936345186856, 207.59216580556847, 488.12270849418735, 422.6667046886397, 292.1061953879919, 565.1595338825396], [528.5186504364794, 642.5542319036714, 563.8776991112292, 537.0271437681837, 430.4056097950834, 384.50193545472877, 693.3404035076994, 573.0278734604005, 261.2443087970927, 563.412635691231, 258.13860041989085, 550.150017102056, 477.70582135030617, 509.4311099345934, 661.3308013433317, 523.1175760654914, 370.29659041946326, 557.8704186019502, 353.66591951113645, 510.5389425077261, 469.11212447314324, 626.2863927887214, 318.5642686423241, 141.13900677851177, 486.00711121264453, 542.0075639686526, 448.7161764573215, 376.65492084577164, 166.56246586635706, 718.6147921685923], [435.403218786657, 470.74259129379413, 615.3542648093958, 483.61792559031693, 607.9455289424717, 454.9949861614464, 869.45041758392, 750.3595195751914, 754.7958625343501, 508.38715645396553, 368.2779213892305, 662.23752125613, 350.46366230046397, 619.8010888063362, 497.9560438683688, 420.64163974607766, 487.16698403905633, 273.3352931767504, 354.02637708217384, 457.9408818614016, 496.2986534025747, 364.84710143814976, 458.29907844925157, 634.073520178434, 558.7161089429649, 603.6634230782621, 514.1019407724017, 539.6741842214251, 585.0639516732675, 488.3003071211236], [334.0264519516021, 459.5702037859653, 543.8547654459309, 471.6623772418301, 500.98627686914386, 740.3857774449933, 487.4853744264201, 664.5373560191691, 573.764159193263, 471.32565842016527, 448.8845519093864, 729.3173859836543, 453.34766656988694, 428.4975196541853, 575.1404740691066, 190.18782164376034, 243.90403003048107, 430.03959300145215, 429.08666492876233, 508.89662188951297, 669.6400651031191, 516.2894766192492, 441.39320293407405, 653.1948574772491, 529.6831617222962, 176.0833629734244, 568.7136007686755, 461.66494617366294, 443.39303344518356, 840.642834252332], [347.676690455591, 475.0701395711058, 383.94468812449156, 456.7512619303556, 547.1719187673109, 224.69458657065758, 458.98685335259506, 599.8561007491281, 231.02565460233575, 610.5318803183029, 763.3423474509603, 548.8104762105211, 445.95788564834953, 844.6566709331175, 591.2236009653337, 586.0438760821825, 399.6820689195621, 395.17360423878256, 535.9853351258233, 332.27242110850426, 801.7584039310705, 190.6337233666032, 805.700536966829, 799.6824375238089, 346.29917202656327, 611.7423892505719, 705.8824305058062, 535.9691379719488, 488.1708623023391, 604.3772264289142], [687.7108994865216, 483.44749361779685, 661.8182197739575, 591.5452701990528, 151.60961549943875, 524.1475889465452, 745.1142999852398, 665.6103992924466, 701.3015233859578, 648.9854638583182, 403.08097902196505, 384.97216329583586, 442.52161997463816, 590.5026536093199, 219.04366558018955, 899.2103705796073, 562.4908789323547, 666.088957218587, 496.97593850278065, 777.9572405840922, 531.7316118485633, 500.7782009017233, 646.4095967934252, 633.5713368259554, 608.1857007168994, 585.4020395597571, 490.06193749044934, 463.884131549627, 632.7981360348942, 634.8055942938928], [482.5550451528366, 691.7011356960619, 496.2851035642388, 529.4040886765091, 444.3593296445004, 198.06208336708823, 365.6472909266031, 391.3885069938369, 859.494451604626, 275.19483951927816, 568.4478784631463, 203.74971298680123, 676.2053582803082, 527.9859302404323, 714.4565600799949, 288.9012675397431, 629.6056652113498, 326.2525932990075, 519.5740740263301, 696.8119752318905, 347.1796230415255, 388.6576994098651, 357.54758351840974, 873.5528483422207, 507.0189947052724, 508.1981784529926, 536.9527958233257, 871.2838601964829, 361.93416709279154, 496.5981745168124]] data2 = [[103,104,104,105,105,106,106,106,107,107,106,106,105,105,104,104,104,104,105,107,107,106,105,105,107,108,109,110,110,110,110,110,110,109,109,109,109,109,109,108,107,107,107,107,106,106,105,104,104,104,104,104,104,104,103,103,103,103,102,102,101,101,100,100,100,100,100,99,98,97,97,96,96,96,96,96,96,96,95,95,95,94,94,94,94,94,94], [104,104,105,105,106,106,107,107,107,107,107,107,107,106,106,106,106,106,106,108,108,108,106,106,108,109,110,110,112,112,113,112,111,110,110,110,110,109,109,109,108,107,107,107,107,106,106,105,104,104,104,104,104,104,104,103,103,103,103,102,102,101,101,100,100,100,100,99,99,98,97,97,96,96,96,96,96,96,96,95,95,95,94,94,94,94,94], [104,105,105,106,106,107,107,108,108,108,108,108,108,108,108,108,108,108,108,108,110,110,110,110,110,110,110,111,113,115,116,115,113,112,110,110,110,110,110,110,109,108,108,108,108,107,106,105,105,105,105,105,105,104,104,104,104,103,103,103,102,102,102,101,100,100,100,99,99,98,97,97,96,96,96,96,96,96,96,96,95,95,94,94,94,94,94], [105,105,106,106,107,107,108,108,109,109,109,109,109,110,110,110,110,110,110,110,111,112,115,115,115,115,115,116,116,117,119,118,117,116,114,113,112,110,110,110,110,110,110,109,109,108,107,106,106,106,106,106,105,105,105,104,104,104,103,103,103,102,102,102,101,100,100,99,99,98,97,97,96,96,96,96,96,96,96,96,95,95,94,94,94,94,94], [105,106,106,107,107,108,108,109,109,110,110,110,110,111,110,110,110,110,111,114,115,116,121,121,121,121,121,122,123,124,124,123,121,119,118,117,115,114,112,111,110,110,110,110,110,110,109,109,108,109,107,107,106,106,105,105,104,104,104,104,103,103,102,102,102,101,100,100,99,99,98,97,96,96,96,96,96,96,96,96,95,95,94,94,94,94,94], [106,106,107,107,107,108,109,109,110,110,111,111,112,113,112,111,111,112,115,118,118,119,126,128,128,127,128,128,129,130,129,128,127,125,122,120,118,117,115,114,112,110,110,110,110,110,111,110,110,110,109,109,108,107,106,105,105,105,104,104,104,103,103,102,102,102,101,100,99,99,98,97,96,96,96,96,96,96,96,96,95,95,94,94,94,94,94], [106,107,107,108,108,108,109,110,110,111,112,113,114,115,114,115,116,116,119,123,125,130,133,134,134,134,134,135,135,136,135,134,132,130,128,124,121,119,118,116,114,112,111,111,111,112,112,111,110,110,110,109,108,108,107,108,107,106,105,104,104,104,103,103,103,102,101,100,99,99,98,97,96,96,96,96,96,96,96,96,95,95,95,94,94,94,94], [107,107,108,108,109,109,110,110,112,113,114,115,116,117,117,120,120,121,123,129,134,136,138,139,139,139,140,142,142,141,141,140,137,134,131,127,124,122,120,118,117,115,113,114,113,114,114,113,112,111,110,110,109,108,107,106,105,105,105,104,104,104,103,103,103,101,100,100,99,99,98,97,96,96,96,96,96,96,96,96,96,95,95,94,94,94,94], [107,108,108,109,109,110,111,112,114,115,116,117,118,119,121,125,125,127,131,136,140,141,142,144,144,145,148,149,148,147,146,144,140,138,136,130,127,125,123,121,119,118,117,117,116,116,116,115,114,113,113,111,110,109,108,107,106,105,105,103,103,102,102,102,103,101,100,100,100,99,98,98,97,96,96,96,96,96,96,96,96,95,95,95,94,94,94], [107,108,109,109,110,110,110,113,115,117,118,119,120,123,126,129,131,134,139,142,144,145,147,148,150,152,154,154,153,154,151,149,146,143,140,136,130,128,126,124,122,121,120,119,118,117,117,117,116,116,115,113,112,110,109,108,107,106,106,105,104,103,102,101,101,100,100,100,100,99,99,98,97,96,96,96,96,96,96,96,96,95,95,95,94,94,94], [107,108,109,109,110,110,110,112,115,117,119,122,125,127,130,133,137,141,143,145,148,149,152,155,157,159,160,160,161,162,159,156,153,149,146,142,139,134,130,128,126,125,122,120,120,120,119,119,119,118,117,115,113,111,110,110,109,108,107,106,106,105,104,104,103,102,100,100,100,99,99,98,97,96,96,96,96,96,96,96,96,95,95,95,95,94,94], [108,108,109,109,110,110,110,112,115,118,121,125,128,131,134,138,141,145,147,149,152,157,160,161,163,166,169,170,170,171,168,162,158,155,152,148,144,140,136,132,129,127,124,122,121,120,120,120,120,120,119,117,115,113,110,110,110,110,109,108,108,107,107,106,105,104,102,100,100,100,99,98,97,96,96,96,96,96,96,96,96,96,95,95,95,94,94], [108,109,109,110,110,111,112,114,117,120,124,128,131,135,138,142,145,149,152,155,158,163,166,167,170,173,175,175,175,173,171,169,164,160,156,153,149,144,140,136,131,129,126,124,123,123,122,121,120,120,120,119,117,115,111,110,110,110,110,110,109,109,110,109,108,106,103,101,100,100,100,98,97,96,96,96,96,96,96,96,96,96,95,95,95,95,94], [108,109,110,110,110,113,114,116,119,122,126,131,134,138,141,145,149,152,156,160,164,169,171,174,177,175,178,179,177,175,174,172,168,163,160,157,151,147,143,138,133,130,128,125,125,124,123,122,121,121,120,120,118,116,115,111,110,110,110,110,113,114,113,112,110,107,105,102,100,100,100,98,97,96,96,96,96,96,96,96,96,96,96,95,95,95,94], [108,109,110,110,112,115,116,118,122,125,129,133,137,140,144,149,152,157,161,165,169,173,176,179,179,180,180,180,178,178,176,175,171,165,163,160,153,148,143,139,135,132,129,128,127,125,124,124,123,123,122,122,120,118,117,118,115,117,118,118,119,117,116,115,112,109,107,105,100,100,100,100,97,96,96,96,96,96,96,96,96,96,96,95,95,95,95], [108,109,110,111,114,116,118,122,127,130,133,136,140,144,148,153,157,161,165,169,173,177,180,180,180,180,181,180,180,180,179,178,173,168,165,161,156,149,143,139,136,133,130,129,128,126,126,125,125,125,125,124,122,121,120,120,120,120,121,122,123,122,120,117,114,111,108,106,105,100,100,100,100,96,96,96,96,96,96,96,96,96,96,96,95,95,95], [107,108,110,113,115,118,121,126,131,134,137,140,143,148,152,157,162,165,169,173,177,181,181,181,180,181,181,181,180,180,180,178,176,170,167,163,158,152,145,140,137,134,132,130,129,127,127,126,127,128,128,126,125,125,125,123,126,128,129,130,130,125,124,119,116,114,112,110,107,106,105,100,100,100,96,96,96,96,96,96,96,96,96,96,96,95,95], [107,109,111,116,119,122,125,130,135,137,140,144,148,152,156,161,165,168,172,177,181,184,181,181,181,180,180,180,180,180,180,178,178,173,168,163,158,152,146,141,138,136,134,132,130,129,128,128,130,130,130,129,128,129,129,130,132,133,133,134,134,132,128,122,119,116,114,112,108,106,105,105,100,100,100,97,97,97,97,97,97,97,96,96,96,96,95], [108,110,112,117,122,126,129,135,139,141,144,149,153,156,160,165,168,171,177,181,184,185,182,180,180,179,178,178,180,179,179,178,176,173,168,163,157,152,148,143,139,137,135,133,131,130,130,131,132,132,132,131,132,132,133,134,136,137,137,137,136,134,131,124,121,118,116,114,111,109,107,106,105,100,100,100,97,97,97,97,97,97,97,96,96,96,96], [108,110,114,120,126,129,134,139,142,144,146,152,158,161,164,168,171,175,181,184,186,186,183,179,178,178,177,175,178,177,177,176,175,173,168,162,156,153,149,145,142,140,138,136,133,132,132,132,134,134,134,134,135,136,137,138,140,140,140,140,139,137,133,127,123,120,118,115,112,108,108,106,106,105,100,100,100,98,98,98,98,98,98,97,96,96,96], [108,110,116,122,128,133,137,141,143,146,149,154,161,165,168,172,175,180,184,188,189,187,182,178,176,176,175,173,174,173,175,174,173,171,168,161,157,154,150,148,145,143,141,138,135,135,134,135,135,136,136,137,138,139,140,140,140,140,140,140,140,139,135,130,126,123,120,117,114,111,109,108,107,106,105,100,100,100,99,99,98,98,98,98,97,97,96], [110,112,118,124,130,135,139,142,145,148,151,157,163,169,172,176,179,183,187,190,190,186,180,177,175,173,170,169,169,170,171,172,170,170,167,163,160,157,154,152,149,147,144,140,137,137,136,137,138,138,139,140,141,140,140,140,140,140,140,140,140,138,134,131,128,124,121,118,115,112,110,109,108,107,106,105,100,100,100,99,99,99,98,98,98,97,97], [110,114,120,126,131,136,140,143,146,149,154,159,166,171,177,180,182,186,190,190,190,185,179,174,171,168,166,163,164,163,166,169,170,170,168,164,162,161,158,155,153,150,147,143,139,139,139,139,140,141,141,142,142,141,140,140,140,140,140,140,140,137,134,131,128,125,122,119,116,114,112,110,109,109,108,107,105,100,100,100,99,99,99,98,98,97,97], [110,115,121,127,132,136,140,144,148,151,157,162,169,174,178,181,186,188,190,191,190,184,177,172,168,165,162,159,158,158,159,161,166,167,169,166,164,163,161,159,156,153,149,146,142,142,141,142,143,143,143,143,144,142,141,140,140,140,140,140,140,138,134,131,128,125,123,120,117,116,114,112,110,109,108,107,106,105,102,101,100,99,99,99,98,98,97], [110,116,121,127,132,136,140,144,148,154,160,166,171,176,180,184,189,190,191,191,191,183,176,170,166,163,159,156,154,155,155,158,161,165,170,167,166,165,163,161,158,155,152,150,146,145,145,145,146,146,144,145,145,144,142,141,140,140,140,140,138,136,134,131,128,125,123,121,119,117,115,113,112,111,111,110,108,106,105,102,100,100,99,99,99,98,98], [110,114,119,126,131,135,140,144,149,158,164,168,172,176,183,184,189,190,191,191,190,183,174,169,165,161,158,154,150,151,152,155,159,164,168,168,168,167,165,163,160,158,155,153,150,148,148,148,148,148,147,146,146,145,143,142,141,140,139,138,136,134,132,131,128,126,124,122,120,118,116,114,113,113,112,111,108,107,106,105,104,102,100,99,99,99,99], [110,113,119,125,131,136,141,145,150,158,164,168,172,177,183,187,189,191,192,191,190,183,174,168,164,160,157,153,150,149,150,154,158,162,166,170,170,168,166,164,162,160,158,155,152,151,151,151,151,151,149,148,147,146,145,143,142,140,139,137,135,134,132,131,129,127,125,123,121,119,117,116,114,114,113,112,110,108,107,105,103,100,100,100,100,99,99], [110,112,118,124,130,136,142,146,151,157,163,168,174,178,183,187,189,190,191,192,189,182,174,168,164,160,157,153,149,148,149,153,157,161,167,170,170,170,168,166,165,163,159,156,154,153,155,155,155,155,152,150,149,147,145,143,141,140,139,138,136,134,133,131,130,128,126,124,122,120,119,117,116,115,114,113,111,110,107,106,105,105,102,101,100,100,100], [110,111,116,122,129,137,142,146,151,158,164,168,172,179,183,186,189,190,192,193,188,182,174,168,164,161,157,154,151,149,151,154,158,161,167,170,170,170,170,169,168,166,160,157,156,156,157,158,159,159,156,153,150,148,146,144,141,140,140,138,136,135,134,133,131,129,127,125,123,122,120,118,117,116,115,114,112,111,110,108,107,106,105,104,102,100,100], [108,110,115,121,131,137,142,147,152,159,163,167,170,177,182,184,187,189,192,194,189,183,174,169,165,161,158,156,154,153,154,157,160,164,167,171,172,174,174,173,171,168,161,159,158,158,159,161,161,160,158,155,151,149,147,144,142,141,140,138,137,136,135,134,132,130,128,126,125,123,121,119,118,117,116,115,113,112,112,111,110,109,108,107,105,101,100], [108,110,114,120,128,134,140,146,152,158,162,166,169,175,180,183,186,189,193,195,190,184,176,171,167,163,160,158,157,156,157,159,163,166,170,174,176,178,178,176,172,167,164,161,161,160,161,163,163,163,160,157,153,150,148,146,144,142,141,140,139,138,136,135,134,133,129,127,126,124,122,121,119,118,117,116,114,113,112,111,110,110,109,109,107,104,100], [107,110,115,119,123,129,135,141,146,156,161,165,168,173,179,182,186,189,193,194,191,184,179,175,170,166,162,161,160,160,161,162,165,169,172,176,178,179,179,176,172,168,165,163,163,163,163,165,166,164,161,158,155,152,150,147,146,144,143,142,141,139,139,138,137,135,131,128,127,125,124,122,121,119,118,116,115,113,112,111,111,110,110,109,109,105,100], [107,110,114,117,121,126,130,135,142,151,159,163,167,171,177,182,185,189,192,193,191,187,183,179,174,169,167,166,164,164,165,166,169,171,174,178,179,180,180,178,173,169,166,165,165,166,165,168,169,166,163,159,157,154,152,149,148,147,146,145,143,142,141,140,139,138,133,130,128,127,125,124,122,120,118,117,115,112,111,111,111,111,110,109,108,106,100], [107,109,113,118,122,126,129,134,139,150,156,160,165,170,175,181,184,188,191,192,192,189,185,181,177,173,171,169,168,167,169,170,172,174,176,178,179,180,180,179,175,170,168,166,166,168,168,170,170,168,164,160,158,155,152,151,150,149,149,148,147,145,144,143,142,141,136,133,130,129,127,125,123,120,119,118,115,112,111,111,111,110,109,109,109,105,100], [105,107,111,117,121,124,127,131,137,148,154,159,164,168,174,181,184,187,190,191,191,190,187,184,180,178,175,174,172,171,173,173,173,176,178,179,180,180,180,179,175,170,168,166,168,169,170,170,170,170,166,161,158,156,154,153,151,150,150,150,150,148,147,146,145,143,139,135,133,131,129,126,124,121,120,118,114,111,111,111,110,110,109,107,106,104,100], [104,106,110,114,118,121,125,129,135,142,150,157,162,167,173,180,183,186,188,190,190,190,189,184,183,181,180,179,179,176,177,176,176,177,178,179,180,180,179,177,173,169,167,166,167,169,170,170,170,170,167,161,159,157,155,153,151,150,150,150,150,150,150,149,147,145,141,138,135,133,130,127,125,123,121,118,113,111,110,110,109,109,107,106,105,103,100], [104,106,108,111,115,119,123,128,134,141,148,154,161,166,172,179,182,184,186,189,190,190,190,187,185,183,180,180,180,179,179,177,176,177,178,178,178,177,176,174,171,168,166,164,166,168,170,170,170,170,168,162,159,157,155,153,151,150,150,150,150,150,150,150,150,148,144,140,137,134,132,129,127,125,122,117,111,110,107,107,106,105,104,103,102,101,100], [103,105,107,110,114,118,122,127,132,140,146,153,159,165,171,176,180,183,185,186,189,190,188,187,184,182,180,180,180,179,178,176,176,176,176,174,174,173,172,170,168,167,165,163,164,165,169,170,170,170,166,162,159,157,155,153,151,150,150,150,150,150,150,150,150,150,146,142,139,136,133,131,128,125,122,117,110,108,106,105,104,103,103,101,101,101,101], [102,103,106,108,112,116,121,125,130,138,145,151,157,163,170,174,178,181,181,184,186,186,187,186,184,181,180,180,180,179,178,174,173,173,171,170,170,169,168,167,166,164,163,162,161,164,167,169,170,168,164,160,158,157,155,153,151,150,150,150,150,150,150,150,150,150,147,144,141,138,135,133,128,125,122,116,109,107,104,104,103,102,101,101,101,101,101], [101,102,105,107,110,115,120,124,129,136,143,149,155,162,168,170,174,176,178,179,181,182,184,184,183,181,180,180,179,177,174,172,170,168,166,165,164,164,164,164,162,160,159,159,158,160,162,164,166,166,163,159,157,156,155,153,151,150,150,150,150,150,150,150,150,150,149,146,143,140,137,133,129,124,119,112,108,105,103,103,102,101,101,101,101,100,100], [101,102,104,106,109,113,118,122,127,133,141,149,155,161,165,168,170,172,175,176,177,179,181,181,181,180,180,179,177,174,171,167,165,163,161,160,160,160,160,160,157,155,155,154,154,155,157,159,161,161,161,159,156,154,154,153,151,150,150,150,150,150,150,150,150,150,149,147,144,141,137,133,129,123,116,110,107,104,102,102,101,101,101,100,100,100,100], [102,103,104,106,108,112,116,120,125,129,137,146,154,161,163,165,166,169,172,173,174,175,177,178,178,178,178,177,174,171,168,164,160,158,157,157,156,156,156,155,152,151,150,150,151,151,152,154,156,157,157,156,155,153,152,152,151,150,150,150,150,150,150,150,150,150,150,147,144,141,138,133,127,120,113,109,106,103,101,101,101,100,100,100,100,100,100], [103,104,105,106,108,110,114,118,123,127,133,143,150,156,160,160,161,162,167,170,171,172,173,175,175,174,174,173,171,168,164,160,156,155,154,153,153,152,152,150,149,148,148,148,148,148,149,149,150,152,152,152,152,151,150,150,150,150,150,150,150,150,150,150,150,150,149,147,144,141,138,132,125,118,111,108,105,103,102,101,101,101,100,100,100,100,100], [104,105,106,107,108,110,113,117,120,125,129,138,145,151,156,156,157,158,160,164,166,168,170,171,172,171,171,169,166,163,160,156,153,151,150,150,149,149,149,148,146,146,146,146,146,146,146,147,148,148,149,149,149,148,148,148,148,149,149,150,150,150,150,150,150,150,148,146,143,141,136,129,123,117,110,108,105,104,103,102,102,101,101,100,100,100,100], [103,104,105,106,107,109,111,115,118,122,127,133,140,143,150,152,153,155,157,159,162,164,167,168,168,168,167,166,163,160,157,153,150,148,148,147,147,147,145,145,144,143,143,143,144,144,144,144,145,145,145,145,146,146,146,146,146,147,147,148,149,150,150,150,150,149,147,145,143,141,134,127,123,117,111,108,105,105,104,104,103,103,102,101,100,100,100], [102,103,104,105,106,107,109,113,116,120,125,129,133,137,143,147,149,151,152,154,158,161,164,165,164,164,163,163,160,157,154,151,149,147,145,145,144,143,141,140,141,141,141,141,141,142,142,142,142,142,142,142,143,143,143,144,144,145,146,146,146,147,148,148,148,148,145,143,142,140,134,128,123,117,112,108,106,105,105,104,104,103,102,101,100,100,99], [102,103,104,105,105,106,108,110,113,118,123,127,129,132,137,141,142,142,145,150,154,157,161,161,160,160,160,159,157,154,151,148,146,145,143,142,142,139,137,136,137,137,138,138,139,139,139,139,139,139,139,139,140,140,141,142,142,143,144,144,144,145,145,145,145,145,144,142,140,139,136,129,124,119,113,109,106,106,105,104,103,102,101,101,100,99,99], [102,103,104,104,105,106,107,108,111,116,121,124,126,128,131,134,135,137,139,143,147,152,156,157,157,157,156,155,153,151,148,146,143,142,141,140,138,135,133,132,132,133,133,133,134,135,135,135,135,136,136,137,137,138,138,139,140,141,141,142,142,143,142,142,141,141,140,139,137,134,133,129,125,121,114,110,107,106,106,104,103,102,101,100,99,99,99], [102,103,104,104,105,105,106,108,110,113,118,121,124,126,128,130,132,134,136,139,143,147,150,154,154,154,153,151,149,148,146,143,141,139,137,136,132,130,128,128,128,129,129,130,130,131,132,132,132,133,134,134,135,135,136,137,138,139,139,140,140,140,139,139,138,137,137,135,132,130,129,127,124,120,116,112,109,106,105,103,102,101,101,100,99,99,99], [101,102,103,104,104,105,106,107,108,110,114,119,121,124,126,128,129,132,134,137,140,143,147,149,151,151,151,149,147,145,143,141,138,136,134,131,128,126,124,125,125,126,126,127,128,128,129,129,130,130,131,131,132,132,133,134,135,135,136,136,137,137,136,136,135,134,133,131,129,128,127,126,123,119,115,111,109,107,105,104,103,102,101,100,100,100,99], [101,102,103,103,104,104,105,106,108,110,112,116,119,121,124,125,127,130,132,135,137,140,143,147,149,149,149,147,145,143,141,139,136,133,131,128,125,122,121,122,122,122,123,125,125,126,127,127,127,128,128,128,129,129,130,131,131,132,132,133,133,133,132,132,131,131,130,129,128,126,125,124,121,117,111,109,108,106,105,104,103,102,101,101,100,100,100], [100,101,102,103,103,104,105,106,107,108,110,114,117,119,121,123,126,128,130,133,136,139,141,144,146,147,146,145,143,141,138,136,133,130,127,124,121,120,120,120,120,120,121,122,123,124,124,125,125,126,126,125,126,126,126,125,126,127,128,128,129,129,128,128,128,128,128,128,126,125,123,122,119,114,109,108,107,106,105,104,103,103,102,102,101,100,100], [100,101,102,103,104,105,106,107,108,109,110,112,115,117,120,122,125,127,130,132,135,137,139,142,144,144,144,142,140,138,136,132,129,126,123,120,120,119,119,118,119,119,120,120,120,121,122,122,123,123,123,123,122,123,122,122,121,122,122,122,123,123,123,124,125,125,126,126,125,124,122,120,116,113,109,107,106,105,104,104,103,102,102,101,101,100,100], [100,101,102,103,104,105,106,107,108,109,110,112,114,117,119,122,124,127,129,131,134,136,138,140,142,142,142,140,138,136,133,129,125,122,120,119,118,118,117,116,117,117,118,119,119,120,120,120,121,121,121,122,121,120,120,120,119,119,120,120,120,120,120,120,123,123,124,124,124,123,121,119,114,112,108,106,106,104,104,103,102,102,101,101,100,100,99], [101,102,103,104,105,106,107,108,109,110,111,113,114,116,119,121,124,126,128,130,133,135,137,138,140,140,139,137,135,133,131,127,122,120,118,118,117,117,116,115,116,116,117,118,118,118,119,119,120,120,121,121,120,119,119,118,117,117,118,119,118,118,118,119,120,122,123,123,123,122,120,117,113,110,108,106,105,104,103,103,102,101,101,100,100,99,99], [101,102,103,104,105,106,107,108,109,110,111,111,113,115,118,121,123,125,127,129,131,133,135,137,138,138,137,134,132,130,127,122,120,118,116,116,116,116,115,113,114,115,116,117,117,118,118,119,119,119,120,120,119,118,117,117,116,116,117,117,117,118,119,119,119,120,121,121,121,121,119,116,113,110,107,105,105,103,103,103,102,101,100,100,99,99,99], [101,102,103,104,105,106,107,108,109,110,111,112,114,116,117,120,122,124,126,129,130,132,133,135,136,136,134,132,129,126,122,120,118,116,114,114,114,114,114,113,113,114,115,116,116,117,117,117,118,118,119,119,118,117,116,116,115,115,116,116,116,117,117,118,118,119,120,120,120,120,119,116,113,109,106,104,104,103,102,102,101,101,100,99,99,99,98], [101,102,103,104,105,106,107,108,109,110,111,113,115,117,117,118,121,123,126,128,130,130,131,132,133,134,131,129,125,122,120,118,116,114,113,112,112,113,112,112,111,112,113,113,114,115,116,116,117,117,118,118,116,116,115,115,115,114,114,115,116,116,117,117,118,118,119,119,120,120,117,115,112,108,106,104,103,102,102,102,101,100,99,99,99,98,98], [101,102,103,104,105,105,106,107,108,109,110,111,113,115,117,118,120,122,125,126,127,128,129,130,131,131,128,125,121,120,118,116,114,113,113,111,111,111,111,110,109,110,111,112,113,113,114,115,115,116,117,117,116,115,114,114,113,113,114,114,115,115,116,116,117,118,118,119,119,118,116,114,112,108,105,103,103,102,101,101,100,100,99,99,98,98,97], [100,101,102,103,104,105,106,107,108,109,110,110,111,113,115,118,120,121,122,124,125,125,126,127,128,127,124,121,120,118,116,114,113,112,112,110,109,109,108,108,108,109,110,111,112,112,113,114,114,115,116,116,115,114,113,112,112,113,113,114,114,115,115,116,116,117,117,118,118,117,115,113,111,107,105,103,102,101,101,100,100,100,99,99,98,98,97], [100,101,102,103,104,105,105,106,107,108,109,110,110,111,114,116,118,120,120,121,122,122,123,124,123,123,120,118,117,115,114,115,113,111,110,109,108,108,107,107,107,108,109,110,111,111,112,113,113,114,115,115,114,113,112,111,111,112,112,112,113,114,114,115,115,116,116,117,117,116,114,112,109,106,104,102,101,100,100,99,99,99,99,98,98,97,97]] data3 = [[16,29, 12, 14, 16, 5, 9, 43, 25, 49, 57, 61, 37, 66, 79, 55, 51, 55, 17, 29, 9, 4, 9, 12, 9], [22,6, 2, 12, 23, 9, 2, 4, 11, 28, 49, 51, 47, 38, 65, 69, 59, 65, 59, 22, 11, 12, 9, 9, 13], [2, 5, 8, 44, 9, 22, 2, 5, 12, 34, 43, 54, 44, 49, 48, 54, 59, 69, 51, 21, 16, 9, 5, 4, 7], [3, 9, 9, 34, 9, 9, 2, 4, 13, 26, 58, 61, 59, 53, 54, 64, 55, 52, 53, 18, 3, 9, 12, 2, 8], [4, 2, 9, 8, 2, 23, 2, 4, 14, 31, 48, 46, 59, 66, 54, 56, 67, 54, 23, 14, 6, 8, 7, 9, 8], [5, 2, 23, 2, 9, 9, 9, 4, 8, 8, 6, 14, 12, 9, 14, 9, 21, 22, 34, 12, 9, 23, 9, 11, 13], [6, 7, 23, 23, 9, 4, 7, 4, 23, 11, 32, 2, 2, 5, 34, 9, 4, 12, 15, 19, 45, 9, 19, 9, 4]] HeatMap(data = data) HeatMap(title= "Heatmap Second Example", xLabel= "X Label", yLabel= "Y Label", data = data, legendPosition = LegendPosition.TOP) HeatMap(title = "Green Yellow White", data = data2, showLegend = False, color = GradientColor.GREEN_YELLOW_WHITE) colors = [Color.black, Color.yellow, Color.red] HeatMap(title= "Custom Gradient Example", data= data3, color= GradientColor(colors)) HeatMap(initWidth= 900, initHeight= 300, title= "Custom size, no tooltips", data= data3, useToolTip= False, showLegend= False, color= GradientColor.WHITE_BLUE) ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. !wget --no-check-certificate \ https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip \ -O /tmp/horse-or-human.zip !wget --no-check-certificate \ https://storage.googleapis.com/laurencemoroney-blog.appspot.com/validation-horse-or-human.zip \ -O /tmp/validation-horse-or-human.zip import os import zipfile local_zip = '/tmp/horse-or-human.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('/tmp/horse-or-human') local_zip = '/tmp/validation-horse-or-human.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('/tmp/validation-horse-or-human') zip_ref.close() # Directory with our training horse pictures train_horse_dir = os.path.join('/tmp/horse-or-human/horses') # Directory with our training human pictures train_human_dir = os.path.join('/tmp/horse-or-human/humans') # Directory with our training horse pictures validation_horse_dir = os.path.join('/tmp/validation-horse-or-human/horses') # Directory with our training human pictures validation_human_dir = os.path.join('/tmp/validation-horse-or-human/humans') ``` ## Building a Small Model from Scratch But before we continue, let's start defining the model: Step 1 will be to import tensorflow. ``` import tensorflow as tf ``` We then add convolutional layers as in the previous example, and flatten the final result to feed into the densely connected layers. Finally we add the densely connected layers. Note that because we are facing a two-class classification problem, i.e. a *binary classification problem*, we will end our network with a [*sigmoid* activation](https://wikipedia.org/wiki/Sigmoid_function), so that the output of our network will be a single scalar between 0 and 1, encoding the probability that the current image is class 1 (as opposed to class 0). ``` model = tf.keras.models.Sequential([ # Note the input shape is the desired size of the image 300x300 with 3 bytes color # This is the first convolution tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(300, 300, 3)), tf.keras.layers.MaxPooling2D(2, 2), # The second convolution tf.keras.layers.Conv2D(32, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), # The third convolution tf.keras.layers.Conv2D(64, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), # The fourth convolution tf.keras.layers.Conv2D(64, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), # The fifth convolution tf.keras.layers.Conv2D(64, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), # Flatten the results to feed into a DNN tf.keras.layers.Flatten(), # 512 neuron hidden layer tf.keras.layers.Dense(512, activation='relu'), # Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('horses') and 1 for the other ('humans') tf.keras.layers.Dense(1, activation='sigmoid') ]) from tensorflow.keras.optimizers import RMSprop model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=1e-4), metrics=['accuracy']) from tensorflow.keras.preprocessing.image import ImageDataGenerator # All images will be rescaled by 1./255 train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest') validation_datagen = ImageDataGenerator(rescale=1/255) # Flow training images in batches of 128 using train_datagen generator train_generator = train_datagen.flow_from_directory( '/tmp/horse-or-human/', # This is the source directory for training images target_size=(300, 300), # All images will be resized to 150x150 batch_size=128, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') # Flow training images in batches of 128 using train_datagen generator validation_generator = validation_datagen.flow_from_directory( '/tmp/validation-horse-or-human/', # This is the source directory for training images target_size=(300, 300), # All images will be resized to 150x150 batch_size=32, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') history = model.fit( train_generator, steps_per_epoch=8, epochs=100, verbose=1, validation_data = validation_generator, validation_steps=8) import matplotlib.pyplot as plt acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r', label='Training accuracy') plt.plot(epochs, val_acc, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.figure() plt.plot(epochs, loss, 'r', label='Training Loss') plt.plot(epochs, val_loss, 'b', label='Validation Loss') plt.title('Training and validation loss') plt.legend() plt.show() ```
github_jupyter
# Adversarial Examples Let's start out by importing all the required libraries ``` import os import sys sys.path.append(os.path.join(os.getcwd(), "venv")) import numpy as np import torch import torchvision.transforms as transforms from matplotlib import pyplot as plt from torch import nn from torch.autograd import Variable from torch.utils.data import DataLoader from torchvision.datasets import MNIST ``` ## MNIST Pytorch expects `Dataset` objects as input. Luckily, for MNIST (and few other datasets such as CIFAR and SVHN), torchvision has a ready made function to convert the dataset to a pytorch `Dataset` object. Keep in mind that these functions return `PIL` images so you will have to apply a transformation on them. ``` path = os.path.join(os.getcwd(), "MNIST") transform = transforms.Compose([transforms.ToTensor()]) train_mnist = MNIST(path, train=True, transform=transform) test_mnist = MNIST(path, train=False, transform=transform) ``` ### Visualize Dataset Set `batch_size` to 1 to visualize the dataset. ``` batch_size = 1 train_set = DataLoader(train_mnist, batch_size=batch_size, shuffle=True) test_set = DataLoader(test_mnist, batch_size=batch_size, shuffle=True) num_images = 2 for i, (image, label) in enumerate(train_set): if i == num_images: break #Pytorch returns batch_size x num_channels x 28 x 28 plt.imshow(image[0][0]) plt.show() print("label: " + str(label)) ``` ### Train a Model Set `batch_size` to start training a model on the dataset. ``` batch_size = 64 train_set = DataLoader(train_mnist, batch_size=batch_size, shuffle=True) ``` Define a `SimpleCNN` model to train on MNIST ``` def identity(): return lambda x: x class CustomConv2D(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, activation, stride): super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, kernel_size-2) self.activation = activation def forward(self, x): h = self.conv(x) return self.activation(h) class SimpleCNN(nn.Module): def __init__(self, in_channels=1, out_base=2, kernel_size=3, activation=identity(), stride=2, num_classes=10): super().__init__() self.conv1 = CustomConv2D(in_channels, out_base, kernel_size, activation, stride) self.pool1 = nn.MaxPool2d((2, 2)) self.conv2 = CustomConv2D(out_base, out_base, kernel_size, activation, stride) self.pool2 = nn.MaxPool2d((2, 2)) self.linear = nn.Linear(4 * out_base, num_classes, bias=True) self.log_softmax = nn.LogSoftmax(dim=-1) def forward(self, x): h = self.conv1(x) h = self.pool1(h) h = self.conv2(h) h = self.pool2(h) h = h.view([x.size(0), -1]) return self.log_softmax(self.linear(h)) ``` Create 4 model variations: identity_model: SimpleCNN model with identity activation functions relu_model: SimpleCNN model with relu activation functions sig_model: SimpleCNN model with sigmoid activation functions tanh_model: SimpleCNN model with tanh activation functions ``` identity_model = SimpleCNN() relu_model = SimpleCNN(activation=nn.ReLU()) sig_model = SimpleCNN(activation=nn.Sigmoid()) tanh_model = SimpleCNN(activation=nn.Tanh()) ``` Create a function to train the model ``` def train_model(model, train_set, num_epochs): optimizer = torch.optim.Adam(lr=0.001, params=model.parameters()) for epoch in range(num_epochs): epoch_accuracy, epoch_loss = 0, 0 train_set_size = 0 for images, labels in train_set: batch_size = images.size(0) images_var, labels_var = Variable(images), Variable(labels) log_probs = model(images_var) _, preds = torch.max(log_probs, dim=-1) loss = nn.NLLLoss()(log_probs, labels_var) epoch_loss += loss.data.numpy()[0] * batch_size accuracy = preds.eq(labels_var).float().mean().data.numpy()[0] * 100.0 epoch_accuracy += accuracy * batch_size train_set_size += batch_size optimizer.zero_grad() loss.backward() optimizer.step() epoch_accuracy = epoch_accuracy / train_set_size epoch_loss = epoch_loss / train_set_size print("epoch {}: loss= {:.3}, accuracy= {:.4}".format(epoch + 1, epoch_loss, epoch_accuracy)) return model trained_model = train_model(relu_model, train_set, 10) ``` ## Generating Adversarial Examples Now that we have a trained model, we can generate adversarial examples. ### Gradient Ascent Use Gradient Ascent to generate a targeted adversarial example. ``` def np_val(torch_var): return torch_var.data.numpy()[0] class AttackNet(nn.Module): def __init__(self, model, image_size): super().__init__() self.model = model self.params = nn.Parameter(torch.zeros(image_size), requires_grad=True) def forward(self, image): # clamp parameters here? or in backward? x = image + self.params x = torch.clamp(x, 0, 1) log_probs = self.model(x) return log_probs class GradientAscent(object): def __init__(self, model, confidence=0): super().__init__() self.model = model self.num_steps = 10000 self.confidence = confidence def attack(self, image, label, target=None): image_var = Variable(image) attack_net = AttackNet(self.model, image.shape) optimizer = torch.optim.Adam(lr=0.01, params=[attack_net.params]) target = Variable(torch.from_numpy(np.array([target], dtype=np.int64)) ) if target is not None else None log_probs = attack_net(image_var) confidence, predictions = torch.max(torch.exp(log_probs), dim=-1) if label.numpy()[0] != np_val(predictions): print("model prediction does not match label") return None, (None, None), (None, None) else: for step in range(self.num_steps): stop_training = self.perturb(image_var, attack_net, target, optimizer) if stop_training: print("Adversarial attack succeeded after {} steps!".format( step + 1)) break if stop_training is False: print("Adversarial attack failed") log_probs = attack_net(image_var) adv_confidence, adv_predictions = torch.max(torch.exp(log_probs), dim=-1) return attack_net.params, (confidence, predictions), (adv_confidence, adv_predictions) def perturb(self, image, attack_net, target, optimizer): log_probs = attack_net(image) confidence, predictions = torch.max(torch.exp(log_probs), dim=-1) if (np_val(predictions) == np_val(target) and np_val(confidence) >= self.confidence): return True loss = nn.NLLLoss()(log_probs, target) optimizer.zero_grad() loss.backward() optimizer.step() return False ``` Define a `GradientAscent` object ``` gradient_ascent = GradientAscent(trained_model) ``` Define a function to help plot the results ``` %matplotlib inline def plot_results(image, perturbation, orig_pred, orig_con, adv_pred, adv_con): plot_image = image.numpy()[0][0] plot_perturbation = perturbation.data.numpy()[0][0] fig_size = plt.rcParams["figure.figsize"] fig_size[0] = 10 fig_size[1] = 5 plt.rcParams["figure.figsize"] = fig_size ax = plt.subplot(131) ax.set_title("Original: " + str(np_val(orig_pred)) + " @ " + str(np.round(np_val(orig_con) * 100, decimals=1)) + "%") plt.imshow(plot_image) plt.subplot(132) plt.imshow(plot_perturbation) ax = plt.subplot(133) plt.imshow(plot_image + plot_perturbation) ax.set_title("Adversarial: " + str(np_val(adv_pred)) + " @ " + str(np.round(np_val(adv_con) * 100, decimals=1)) + "%") plt.show() ``` Let's generate some adversarial examples! ``` num_images = 2 for i, (test_image, test_label) in enumerate(test_set): if i == num_images: break target_classes = list(range(10)) target_classes.remove(test_label.numpy()[0]) target = np.random.choice(target_classes) perturbation, (orig_con, orig_pred), ( adv_con, adv_pred) = gradient_ascent.attack(test_image, test_label, target) if perturbation is not None: plot_results(test_image, perturbation, orig_pred, orig_con, adv_pred, adv_con) ``` ### Fast Gradient Now let's use the Fast Gradient Sign Method to generate untargeted adversarial examples. ``` class FastGradient(object): def __init__(self, model, confidence=0, alpha=0.1): super().__init__() self.model = model self.confidence = confidence self.alpha = alpha def attack(self, image, label): image_var = Variable(image, requires_grad=True) target = Variable(torch.from_numpy(np.array([label], dtype=np.int64)) ) if label is not None else None log_probs = self.model(image_var) confidence, predictions = torch.max(torch.exp(log_probs), dim=-1) if label.numpy()[0] != np_val(predictions): print("model prediction does not match label") return None, (None, None), (None, None) else: loss = nn.NLLLoss()(log_probs, target) loss.backward() x_grad = torch.sign(image_var.grad.data) adv_image = torch.clamp(image_var.data + self.alpha * x_grad, 0, 1) delta = adv_image - image_var.data adv_log_probs = self.model(Variable(adv_image)) adv_confidence, adv_predictions = torch.max(torch.exp(adv_log_probs), dim=-1) if (np_val(adv_predictions) != np_val(predictions) and np_val(adv_confidence) >= self.confidence): print("Adversarial attack succeeded!") else: print("Adversarial attack failed") return Variable(delta), (confidence, predictions), (adv_confidence, adv_predictions) ``` Define a `FastGradient` object ``` fast_gradient = FastGradient(trained_model) ``` Let's generate some adversarial examples! ``` num_images = 20 for i, (test_image, test_label) in enumerate(test_set): if i == num_images: break perturbation, (orig_con, orig_pred), ( adv_con, adv_pred) = fast_gradient.attack(test_image, test_label) if perturbation is not None: plot_results(test_image, perturbation, orig_pred, orig_con, adv_pred, adv_con) ```
github_jupyter
# Introduccion a la Inteligencia Artificial Veremos dos ejercicios con para entender el concepto de inteligencia artificial ## Objeto Rebotador En el siguiente ejercicio, realizaremos un objeto que al chocar con una de las paredes, este cambie de direccion y siga con su camino ``` !pip3 install ColabTurtle ``` Llamamos las librerias ``` import ColabTurtle.Turtle as robot import random ``` Ahora el codigo principal. De momento el robot rebota y vuelve en la misma dirección. Lo que tienes que hacer es poner un inicio aleatorio y modificar el `if` dentro del lazo `while` de manera que este cambie en un solo eje. ``` robot.initializeTurtle(initial_speed=1) pad = 15 max_w = robot.window_width() - pad max_h = robot.window_height() - pad robot.shape("circle") robot.color("green") robot.penup() robot.goto(0 + pad, 200) robot.dx = 10 # Velociad en x robot.dy = 10 # Velociad en y reflections = 0 # El numero de reflexiones puede ser modificado para que se ejecute por la eternidad. usando while True. # Pero, debido al limitado tiempo, este es limitado a solo 3 reflexiones. while reflections < 3: robot.speed(random.randrange(1, 10)) new_y = robot.gety() + robot.dy new_x = robot.getx() + robot.dx if (new_y < pad) or \ (new_y > max_h) or \ (new_x < pad) or \ (new_x > max_w): robot.dy *= -1 robot.dx *= -1 reflections += 1 robot.goto(new_x, new_y) ``` ## ChatBot Para el siguiente ejercicio, utilizaremos un ChatBot con definido por reglas. (Ejemplo tomado de: https://www.analyticsvidhya.com/blog/2021/07/build-a-simple-chatbot-using-python-and-nltk/) ``` !pip install nltk import nltk from nltk.chat.util import Chat, reflections ``` **Chat**: es la clase que contiene toda lo logica para processar el texto que el chatbot recibe y encontrar informacion util. **reflections**: es un diccionario que contiene entradas basica y sus correspondientes salidas. ``` print(reflections) ``` Comenzemos contruyendo las reglas. Las siguientes lineas generan un conjunto de reglas simples. ``` pairs = [ [ r"my name is (.*)", ["Hello %1, How are you today ?",] ], [ r"hi|hey|hello", ["Hello", "Hey there",] ], [ r"what is your name ?", ["I am a bot created by Analytics Vidhya. you can call me crazy!",] ], [ r"how are you ?", ["I'm doing goodnHow about You ?",] ], [ r"sorry (.*)", ["Its alright","Its OK, never mind",] ], [ r"I am fine", ["Great to hear that, How can I help you?",] ], [ r"i'm (.*) doing good", ["Nice to hear that","How can I help you?:)",] ], [ r"(.*) age?", ["I'm a computer program dudenSeriously you are asking me this?",] ], [ r"what (.*) want ?", ["Make me an offer I can't refuse",] ], [ r"(.*) created ?", ["Raghav created me using Python's NLTK library ","top secret ;)",] ], [ r"(.*) (location|city) ?", ['Indore, Madhya Pradesh',] ], [ r"how is weather in (.*)?", ["Weather in %1 is awesome like always","Too hot man here in %1","Too cold man here in %1","Never even heard about %1"] ], [ r"i work in (.*)?", ["%1 is an Amazing company, I have heard about it. But they are in huge loss these days.",] ], [ r"(.*)raining in (.*)", ["No rain since last week here in %2","Damn its raining too much here in %2"] ], [ r"how (.*) health(.*)", ["I'm a computer program, so I'm always healthy ",] ], [ r"(.*) (sports|game) ?", ["I'm a very big fan of Football",] ], [ r"who (.*) sportsperson ?", ["Messy","Ronaldo","Roony"] ], [ r"who (.*) (moviestar|actor)?", ["Brad Pitt"] ], [ r"i am looking for online guides and courses to learn data science, can you suggest?", ["Crazy_Tech has many great articles with each step explanation along with code, you can explore"] ], [ r"quit", ["BBye take care. See you soon :) ","It was nice talking to you. See you soon :)"] ], ] ``` Después de definir las reglas, definimos la función para ejecutar el proceso de chat. ``` def chat(this_creator='Nelson Yalta'): print(f"Hola!!! Yo soy un chatbot creado por {this_creator}, y estoy listo para sus servicios. Recuede que hablo inglés.") chat = Chat(pairs, reflections) chat.converse() chat() ```
github_jupyter
# Pi Estimation Using Monte Carlo In this exercise, we will use MapReduce and a Monte-Carlo-Simulation to estimate $\Pi$. If we are looking at this image from this [blog](https://towardsdatascience.com/how-to-make-pi-part-1-d0b41a03111f), we see a unit circle in a unit square: ![Circle_Box](https://miro.medium.com/max/700/1*y-GFdC5OM0ZtYfbfkjjB2w.png) The area: - for the circle is $A_{circle} = \Pi*r^2 = \Pi * 1*1 = \Pi$ - for the square is $A_{square} = d^2 = (2*r)^2 = 4$ The ratio of the two areas are therefore $\frac{A_{circle}}{A_{square}} = \frac{\Pi}{4}$ The Monte-Carlo-Simulation draws multiple points on the square, uniformly at random. For every point, we count if it lies within the circle or not. And so we get the approximation: $\frac{\Pi}{4} \approx \frac{\text{points_in_circle}}{\text{total_points}}$ or $\Pi \approx 4* \frac{\text{points_in_circle}}{\text{total_points}}$ If we have a point $x_1,y_1$ and we want to figure out if it lies in a circle with radius $1$ we can use the following formula: $\text{is_in_circle}(x_1,y_1) = \begin{cases} 1,& \text{if } (x_1)^2 + (y_1)^2 \leq 1\\ 0, & \text{otherwise} \end{cases}$ ## Implementation Write a MapReduce algorithm for estimating $\Pi$ ``` %%writefile pi.py #!/usr/bin/python3 from mrjob.job import MRJob from random import uniform class MyJob(MRJob): def mapper(self, _, line): for x in range(100): x = uniform(-1,1) y = uniform(-1,1) in_circle = x*x + y*y <=1 yield None, in_circle def reducer(self, key, values): values = list(values) yield "Pi", 4 * sum(values) / len(values) yield "number of values", len(values) # for v in values: # yield key, v if __name__ == '__main__': MyJob.run() ``` ## Another Approach Computing the mean in the mapper ``` %%writefile pi.py #!/usr/bin/python3 from mrjob.job import MRJob from random import uniform class MyJob(MRJob): def mapper(self, _, line): num_samples = 100 in_circles_list = [] for x in range(num_samples): x = uniform(-1,1) y = uniform(-1,1) in_circle = x*x + y*y <=1 in_circles_list.append(in_circle) yield None, [num_samples, sum(in_circles_list)/num_samples] def reducer(self, key, numSamples_sum_pairs): total_samples = 0 weighted_numerator_sum = 0 for (num_samples, current_sum) in numSamples_sum_pairs: total_samples += num_samples weighted_numerator_sum += num_samples*current_sum yield "Pi", 4 * weighted_numerator_sum / total_samples yield "weighted_numerator_sum", weighted_numerator_sum yield "total_samples", total_samples if __name__ == '__main__': MyJob.run() ``` ### Running the Job Unfortunately, the library does not work without an input file. I guess this comes from the fact that the hadoop streaming library also does not support this feature, see [stack overflow](https://stackoverflow.com/questions/22821005/hadoop-streaming-job-with-no-input-file). We fake the number of mappers with different input files. Not the most elegant solution :/ ``` !python pi.py /data/dataset/text/small.txt !python pi.py /data/dataset/text/holmes.txt ```
github_jupyter
``` import tensorflow as tf # You'll generate plots of attention in order to see which parts of an image # our model focuses on during captioning import matplotlib.pyplot as plt # Scikit-learn includes many helpful utilities from sklearn.model_selection import train_test_split from sklearn.utils import shuffle import re import numpy as np import os import time import json from glob import glob from PIL import Image import pickle # mount drive from google.colab import drive drive.mount('/gdrive') #set up pickle and checkpoints folder !ls /gdrive checkpoint_path = "/gdrive/My Drive/checkpoints/train" if not os.path.exists(checkpoint_path): os.mkdir("/gdrive/My Drive/checkpoints") os.mkdir("/gdrive/My Drive/checkpoints/train") if not os.path.exists("/gdrive/My Drive/pickles"): os.mkdir("/gdrive/My Drive/pickles") ``` ## Download and prepare the MS-COCO dataset You will use the [MS-COCO dataset](http://cocodataset.org/#home) to train our model. The dataset contains over 82,000 images, each of which has at least 5 different caption annotations. The code below downloads and extracts the dataset automatically. **Caution: large download ahead**. You'll use the training set, which is a 13GB file. ``` # Download caption annotation files annotation_folder = '/annotations/' if not os.path.exists(os.path.abspath('.') + annotation_folder): annotation_zip = tf.keras.utils.get_file('captions.zip', cache_subdir=os.path.abspath('.'), origin = 'http://images.cocodataset.org/annotations/annotations_trainval2014.zip', extract = True) annotation_file = os.path.dirname(annotation_zip)+'/annotations/captions_train2014.json' os.remove(annotation_zip) # Download image files image_folder = '/train2014/' if not os.path.exists(os.path.abspath('.') + image_folder): image_zip = tf.keras.utils.get_file('train2014.zip', cache_subdir=os.path.abspath('.'), origin = 'http://images.cocodataset.org/zips/train2014.zip', extract = True) PATH = os.path.dirname(image_zip) + image_folder os.remove(image_zip) else: PATH = os.path.abspath('.') + image_folder #Limiting size of dataset to 50000 # Read the json file with open(annotation_file, 'r') as f: annotations = json.load(f) # Store captions and image names in vectors all_captions = [] all_img_name_vector = [] for annot in annotations['annotations']: caption = '<start> ' + annot['caption'] + ' <end>' image_id = annot['image_id'] full_coco_image_path = PATH + 'COCO_train2014_' + '%012d.jpg' % (image_id) all_img_name_vector.append(full_coco_image_path) all_captions.append(caption) # Shuffle captions and image_names together # Set a random state train_captions, img_name_vector = shuffle(all_captions, all_img_name_vector, random_state=1) # Select the first 30000 captions from the shuffled set num_examples = 50000 train_captions = train_captions[:num_examples] img_name_vector = img_name_vector[:num_examples] len(train_captions), len(all_captions) ``` ## Preprocess the images using InceptionV3 Next, you will use InceptionV3 (which is pretrained on Imagenet) to classify each image. You will extract features from the last convolutional layer. First, you will convert the images into InceptionV3's expected format by: * Resizing the image to 299px by 299px * [Preprocess the images](https://cloud.google.com/tpu/docs/inception-v3-advanced#preprocessing_stage) using the [preprocess_input](https://www.tensorflow.org/api_docs/python/tf/keras/applications/inception_v3/preprocess_input) method to normalize the image so that it contains pixels in the range of -1 to 1, which matches the format of the images used to train InceptionV3. ``` def load_image(image_path): img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) img = tf.image.resize(img, (299, 299)) img = tf.keras.applications.inception_v3.preprocess_input(img) return img, image_path ``` ## Initialize InceptionV3 and load the pretrained Imagenet weights Now you'll create a tf.keras model where the output layer is the last convolutional layer in the InceptionV3 architecture. The shape of the output of this layer is ```8x8x2048```. You use the last convolutional layer because you are using attention in this example. You don't perform this initialization during training because it could become a bottleneck. * You forward each image through the network and store the resulting vector in a dictionary (image_name --> feature_vector). * After all the images are passed through the network, you pickle the dictionary and save it to disk. ``` image_model = tf.keras.applications.InceptionV3(include_top=False, weights='imagenet') new_input = image_model.input hidden_layer = image_model.layers[-1].output image_features_extract_model = tf.keras.Model(new_input, hidden_layer) ``` ## Caching the features extracted from InceptionV3 You will pre-process each image with InceptionV3 and cache the output to disk. Caching the output in RAM would be faster but also memory intensive, requiring 8 \* 8 \* 2048 floats per image. At the time of writing, this exceeds the memory limitations of Colab (currently 12GB of memory). Performance could be improved with a more sophisticated caching strategy (for example, by sharding the images to reduce random access disk I/O), but that would require more code. The caching will take about 10 minutes to run in Colab with a GPU. If you'd like to see a progress bar, you can: 1. install [tqdm](https://github.com/tqdm/tqdm): `!pip install tqdm` 2. Import tqdm: `from tqdm import tqdm` 3. Change the following line: `for img, path in image_dataset:` to: `for img, path in tqdm(image_dataset):` ``` # Get unique images encode_train = sorted(set(img_name_vector)) # Feel free to change batch_size according to your system configuration image_dataset = tf.data.Dataset.from_tensor_slices(encode_train) image_dataset = image_dataset.map( load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(16) for img, path in image_dataset: batch_features = image_features_extract_model(img) batch_features = tf.reshape(batch_features, (batch_features.shape[0], -1, batch_features.shape[3])) for bf, p in zip(batch_features, path): path_of_feature = p.numpy().decode("utf-8") np.save(path_of_feature, bf.numpy()) ``` ## Preprocess and tokenize the captions * First, you'll tokenize the captions (for example, by splitting on spaces). This gives us a vocabulary of all of the unique words in the data (for example, "surfing", "football", and so on). * Next, you'll limit the vocabulary size to the top 5,000 words (to save memory). You'll replace all other words with the token "UNK" (unknown). * You then create word-to-index and index-to-word mappings. * Finally, you pad all sequences to be the same length as the longest one. ``` # Find the maximum length of any caption in our dataset def calc_max_length(tensor): return max(len(t) for t in tensor) # Choose the top 5000 words from the vocabulary top_k = 5000 tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=top_k, oov_token="<unk>", filters='!"#$%&()*+.,-/:;=?@[\]^_`{|}~ ') tokenizer.fit_on_texts(train_captions) train_seqs = tokenizer.texts_to_sequences(train_captions) tokenizer.word_index['<pad>'] = 0 tokenizer.index_word[0] = '<pad>' pickle.dump( tokenizer, open( "tokeniser.pkl", "wb" ) ) !cp tokeniser.pkl "/gdrive/My Drive/pickles/tokeniser.pkl" # Create the tokenized vectors train_seqs = tokenizer.texts_to_sequences(train_captions) # Pad each vector to the max_length of the captions # If you do not provide a max_length value, pad_sequences calculates it automatically cap_vector = tf.keras.preprocessing.sequence.pad_sequences(train_seqs, padding='post') # Calculates the max_length, which is used to store the attention weights max_length = calc_max_length(train_seqs) print(max_length) #pickle.dump( max_length, open( "/gdrive/My Drive/max_length.p", "wb" ) ) pickle.dump( max_length, open( "max_length.pkl", "wb" ) ) !cp max_length.pkl "/gdrive/My Drive/pickles/max_length.pkl" #assert(False) ``` ## Split the data into training and testing ``` # Create training and validation sets using an 80-20 split img_name_train, img_name_val, cap_train, cap_val = train_test_split(img_name_vector, cap_vector, test_size=0.2, random_state=0) len(img_name_train), len(cap_train), len(img_name_val), len(cap_val) ``` ## Create a tf.data dataset for training ``` # Feel free to change these parameters according to your system's configuration BATCH_SIZE = 64 BUFFER_SIZE = 1000 embedding_dim = 256 units = 512 vocab_size = top_k + 1 num_steps = len(img_name_train) // BATCH_SIZE # Shape of the vector extracted from InceptionV3 is (64, 2048) # These two variables represent that vector shape features_shape = 2048 attention_features_shape = 64 # Load the numpy files def map_func(img_name, cap): img_tensor = np.load(img_name.decode('utf-8')+'.npy') return img_tensor, cap dataset = tf.data.Dataset.from_tensor_slices((img_name_train, cap_train)) # Use map to load the numpy files in parallel dataset = dataset.map(lambda item1, item2: tf.numpy_function( map_func, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE) # Shuffle and batch dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) ``` ## Model Fun fact: the decoder below is identical to the one in the example for [Neural Machine Translation with Attention](../sequences/nmt_with_attention.ipynb). The model architecture is inspired by the [Show, Attend and Tell](https://arxiv.org/pdf/1502.03044.pdf) paper. * In this example, you extract the features from the lower convolutional layer of InceptionV3 giving us a vector of shape (8, 8, 2048). * You squash that to a shape of (64, 2048). * This vector is then passed through the CNN Encoder (which consists of a single Fully connected layer). * The RNN (here GRU) attends over the image to predict the next word. ``` class BahdanauAttention(tf.keras.Model): def __init__(self, units): super(BahdanauAttention, self).__init__() self.W1 = tf.keras.layers.Dense(units) self.W2 = tf.keras.layers.Dense(units) self.V = tf.keras.layers.Dense(1) def call(self, features, hidden): # features(CNN_encoder output) shape == (batch_size, 64, embedding_dim) # hidden shape == (batch_size, hidden_size) # hidden_with_time_axis shape == (batch_size, 1, hidden_size) hidden_with_time_axis = tf.expand_dims(hidden, 1) # score shape == (batch_size, 64, hidden_size) score = tf.nn.tanh(self.W1(features) + self.W2(hidden_with_time_axis)) # attention_weights shape == (batch_size, 64, 1) # you get 1 at the last axis because you are applying score to self.V attention_weights = tf.nn.softmax(self.V(score), axis=1) # context_vector shape after sum == (batch_size, hidden_size) context_vector = attention_weights * features context_vector = tf.reduce_sum(context_vector, axis=1) return context_vector, attention_weights class CNN_Encoder(tf.keras.Model): # Since you have already extracted the features and dumped it using pickle # This encoder passes those features through a Fully connected layer def __init__(self, embedding_dim): super(CNN_Encoder, self).__init__() # shape after fc == (batch_size, 64, embedding_dim) self.fc = tf.keras.layers.Dense(embedding_dim) def call(self, x): x = self.fc(x) x = tf.nn.relu(x) return x class RNN_Decoder(tf.keras.Model): def __init__(self, embedding_dim, units, vocab_size): super(RNN_Decoder, self).__init__() self.units = units self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) self.gru = tf.keras.layers.GRU(self.units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform') #self.bi = tf.keras.layers.LSTM(self.units, # return_sequences=True, # return_state=True, # recurrent_initializer='glorot_uniform') #self.fc0 = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(self.units, activation='sigmoid')) self.fc1 = tf.keras.layers.Dense(self.units) self.fc2 = tf.keras.layers.Dense(vocab_size) self.attention = BahdanauAttention(self.units) def call(self, x, features, hidden): # defining attention as a separate model context_vector, attention_weights = self.attention(features, hidden) # x shape after passing through embedding == (batch_size, 1, embedding_dim) x = self.embedding(x) # x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size) x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1) # passing the concatenated vector to the GRU output, state = self.gru(x) #x = self.fc0(output) # shape == (batch_size, max_length, hidden_size) x = self.fc1(output) # x shape == (batch_size * max_length, hidden_size) x = tf.reshape(x, (-1, x.shape[2])) # output shape == (batch_size * max_length, vocab) x = self.fc2(x) return x, state, attention_weights def reset_state(self, batch_size): return tf.zeros((batch_size, self.units)) encoder = CNN_Encoder(embedding_dim) decoder = RNN_Decoder(embedding_dim, units, vocab_size) optimizer = tf.keras.optimizers.Adam() loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_mean(loss_) ``` ## Checkpoint ``` checkpoint_path = "/gdrive/My Drive/checkpoints/train" if not os.path.exists(checkpoint_path): os.mkdir(checkpoint_path) ckpt = tf.train.Checkpoint(encoder=encoder, decoder=decoder, optimizer = optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5) start_epoch = 0 if ckpt_manager.latest_checkpoint: start_epoch = int(ckpt_manager.latest_checkpoint.split('-')[-1]) # restoring the latest checkpoint in checkpoint_path ckpt.restore(ckpt_manager.latest_checkpoint) ``` ## Training * You extract the features stored in the respective `.npy` files and then pass those features through the encoder. * The encoder output, hidden state(initialized to 0) and the decoder input (which is the start token) is passed to the decoder. * The decoder returns the predictions and the decoder hidden state. * The decoder hidden state is then passed back into the model and the predictions are used to calculate the loss. * Use teacher forcing to decide the next input to the decoder. * Teacher forcing is the technique where the target word is passed as the next input to the decoder. * The final step is to calculate the gradients and apply it to the optimizer and backpropagate. ``` # adding this in a separate cell because if you run the training cell # many times, the loss_plot array will be reset loss_plot = [] @tf.function def train_step(img_tensor, target): loss = 0 # initializing the hidden state for each batch # because the captions are not related from image to image hidden = decoder.reset_state(batch_size=target.shape[0]) dec_input = tf.expand_dims([tokenizer.word_index['<start>']] * target.shape[0], 1) with tf.GradientTape() as tape: features = encoder(img_tensor) for i in range(1, target.shape[1]): # passing the features through the decoder predictions, hidden, _ = decoder(dec_input, features, hidden) loss += loss_function(target[:, i], predictions) # using teacher forcing dec_input = tf.expand_dims(target[:, i], 1) total_loss = (loss / int(target.shape[1])) trainable_variables = encoder.trainable_variables + decoder.trainable_variables gradients = tape.gradient(loss, trainable_variables) optimizer.apply_gradients(zip(gradients, trainable_variables)) return loss, total_loss EPOCHS = 40 for epoch in range(start_epoch, EPOCHS): start = time.time() total_loss = 0 for (batch, (img_tensor, target)) in enumerate(dataset): batch_loss, t_loss = train_step(img_tensor, target) total_loss += t_loss if batch % 100 == 0: print ('Epoch {} Batch {} Loss {:.4f}'.format( epoch + 1, batch, batch_loss.numpy() / int(target.shape[1]))) # storing the epoch end loss value to plot later loss_plot.append(total_loss / num_steps) if epoch % 5 == 0: ckpt_manager.save() print ('Epoch {} Loss {:.6f}'.format(epoch + 1, total_loss/num_steps)) print ('Time taken for 1 epoch {} sec\n'.format(time.time() - start)) plt.plot(loss_plot) plt.xlabel('Epochs') plt.ylabel('Loss') plt.title('Loss Plot') plt.show() pickle.dump( loss_plot, open( "/gdrive/My Drive/loss_plot_save.p", "wb" ) ) ```
github_jupyter
# Predict state Here is the current implementation of the `predict_state` function. It takes in a state (a Python list), and then separates those into position and velocity to calculate a new, predicted state. It uses a constant velocity motion model. **In this exercise, we'll be improving this function, and using matrix multiplication to efficiently calculate the predicted state!** ``` # The current predict state function # Predicts the next state based on a motion model def predict_state(state, dt): # Assumes a valid state had been passed in x = state[0] velocity = state[1] # Assumes a constant velocity model new_x = x + velocity*dt # Create and return the new, predicted state predicted_state = [new_x, velocity] return predicted_state ``` ## Matrix operations You've been given a matrix class that can create new matrices and performs one operation: multiplication. In our directory this is called `matrix.py`. Similar to the Car class, we can use this to initialize matrix objects. ``` # import the matrix file import matrix # Initialize a state vector initial_position = 0 # meters velocity = 50 # m/s # Notice the syntax for creating a state column vector ([ [x], [v] ]) # Commas separate these items into rows and brackets into columns initial_state = matrix.Matrix([ [initial_position], [velocity] ]) ``` ### Transformation matrix Next, define the state transformation matrix and print it out! ``` # Define the state transformation matrix dt = 1 tx_matrix = matrix.Matrix([ [1, dt], [0, 1] ]) print(tx_matrix) ``` ### TODO: Modify the predict state function to use matrix multiplication Now that you know how to create matrices, modify the `predict_state` function to work with them! Note: you can multiply a matrix A by a matrix B by writing `A*B` and it will return a new matrix. ``` # The current predict state function def predict_state_mtx(state, dt): ## TODO: Assume that the state passed in is a Matrix object ## Using a constant velocity model and a transformation matrix ## Create and return the new, predicted state! tx_matrix = matrix.Matrix([ [1, dt], [0, 1] ]) predicted_state = tx_matrix * state return predicted_state ``` ### Test cell Here is an initial state vector and dt to test your function with! ``` # initial state variables initial_position = 10 # meters velocity = 30 # m/s # Initial state vector initial_state = matrix.Matrix([ [initial_position], [velocity] ]) print('The initial state is: ' + str(initial_state)) # after 2 seconds make a prediction using the new function state_est1 = predict_state_mtx(initial_state, 2) print('State after 2 seconds is: ' + str(state_est1)) # Make more predictions! # after 3 more state_est2 = predict_state_mtx(state_est1, 3) print('State after 3 more seconds is: ' + str(state_est2)) # after 3 more state_est3 = predict_state_mtx(state_est2, 3) print('Final state after 3 more seconds is: ' + str(state_est3)) ```
github_jupyter
##### Copyright 2021 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Simple TFX Pipeline Tutorial using Penguin dataset ***A Short tutorial to run a simple TFX pipeline.*** Note: We recommend running this tutorial in a Colab notebook, with no setup required! Just click "Run in Google Colab". <div class="devsite-table-wrapper"><table class="tfo-notebook-buttons" align="left"> <td><a target="_blank" href="https://www.tensorflow.org/tfx/tutorials/tfx/penguin_simple"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png"/>View on TensorFlow.org</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/tfx/blob/master/docs/tutorials/tfx/penguin_simple.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png">Run in Google Colab</a></td> <td><a target="_blank" href="https://github.com/tensorflow/tfx/tree/master/docs/tutorials/tfx/penguin_simple.ipynb"> <img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">View source on GitHub</a></td> <td><a href="https://storage.googleapis.com/tensorflow_docs/tfx/docs/tutorials/tfx/penguin_simple.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a></td> </table></div> In this notebook-based tutorial, we will create and run a TFX pipeline for a simple classification model. The pipeline will consist of three essential TFX components: ExampleGen, Trainer and Pusher. The pipeline includes the most minimal ML workflow like importing data, training a model and exporting the trained model. Please see [Understanding TFX Pipelines](https://www.tensorflow.org/tfx/guide/understanding_tfx_pipelines) to learn more about various concepts in TFX. ## Set Up We first need to install the TFX Python package and download the dataset which we will use for our model. ### Upgrade Pip To avoid upgrading Pip in a system when running locally, check to make sure that we are running in Colab. Local systems can of course be upgraded separately. ``` try: import colab !pip install --upgrade pip except: pass ``` ### Install TFX ``` !pip install -U tfx ``` ### Did you restart the runtime? If you are using Google Colab, the first time that you run the cell above, you must restart the runtime by clicking above "RESTART RUNTIME" button or using "Runtime > Restart runtime ..." menu. This is because of the way that Colab loads packages. Check the TensorFlow and TFX versions. ``` import tensorflow as tf print('TensorFlow version: {}'.format(tf.__version__)) from tfx import v1 as tfx print('TFX version: {}'.format(tfx.__version__)) ``` ### Set up variables There are some variables used to define a pipeline. You can customize these variables as you want. By default all output from the pipeline will be generated under the current directory. ``` import os PIPELINE_NAME = "penguin-simple" # Output directory to store artifacts generated from the pipeline. PIPELINE_ROOT = os.path.join('pipelines', PIPELINE_NAME) # Path to a SQLite DB file to use as an MLMD storage. METADATA_PATH = os.path.join('metadata', PIPELINE_NAME, 'metadata.db') # Output directory where created models from the pipeline will be exported. SERVING_MODEL_DIR = os.path.join('serving_model', PIPELINE_NAME) from absl import logging logging.set_verbosity(logging.INFO) # Set default logging level. ``` ### Prepare example data We will download the example dataset for use in our TFX pipeline. The dataset we are using is [Palmer Penguins dataset](https://allisonhorst.github.io/palmerpenguins/articles/intro.html) which is also used in other [TFX examples](https://github.com/tensorflow/tfx/tree/master/tfx/examples/penguin). There are four numeric features in this dataset: - culmen_length_mm - culmen_depth_mm - flipper_length_mm - body_mass_g All features were already normalized to have range [0,1]. We will build a classification model which predicts the `species` of penguins. Because TFX ExampleGen reads inputs from a directory, we need to create a directory and copy dataset to it. ``` import urllib.request import tempfile DATA_ROOT = tempfile.mkdtemp(prefix='tfx-data') # Create a temporary directory. _data_url = 'https://raw.githubusercontent.com/tensorflow/tfx/master/tfx/examples/penguin/data/penguins_processed.csv' _data_filepath = os.path.join(DATA_ROOT, "data.csv") urllib.request.urlretrieve(_data_url, _data_filepath) ``` Take a quick look at the CSV file. ``` !head {_data_filepath} ``` You should be able to see five values. `species` is one of 0, 1 or 2, and all other features should have values between 0 and 1. ## Create a pipeline TFX pipelines are defined using Python APIs. We will define a pipeline which consists of following three components. - CsvExampleGen: Reads in data files and convert them to TFX internal format for further processing. There are multiple [ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen)s for various formats. In this tutorial, we will use CsvExampleGen which takes CSV file input. - Trainer: Trains an ML model. [Trainer component](https://www.tensorflow.org/tfx/guide/trainer) requires a model definition code from users. You can use TensorFlow APIs to specify how to train a model and save it in a _saved_model_ format. - Pusher: Copies the trained model outside of the TFX pipeline. [Pusher component](https://www.tensorflow.org/tfx/guide/pusher) can be thought of an deployment process of the trained ML model. Before actually define the pipeline, we need to write a model code for the Trainer component first. ### Write model training code We will create a simple DNN model for classification using TensorFlow Keras API. This model training code will be saved to a separate file. In this tutorial we will use [Generic Trainer](https://www.tensorflow.org/tfx/guide/trainer#generic_trainer) of TFX which support Keras-based models. You need to write a Python file containing `run_fn` function, which is the entrypoint for the `Trainer` component. ``` _trainer_module_file = 'penguin_trainer.py' %%writefile {_trainer_module_file} from typing import List from absl import logging import tensorflow as tf from tensorflow import keras from tensorflow_transform.tf_metadata import schema_utils from tfx import v1 as tfx from tfx_bsl.public import tfxio from tensorflow_metadata.proto.v0 import schema_pb2 _FEATURE_KEYS = [ 'culmen_length_mm', 'culmen_depth_mm', 'flipper_length_mm', 'body_mass_g' ] _LABEL_KEY = 'species' _TRAIN_BATCH_SIZE = 20 _EVAL_BATCH_SIZE = 10 # Since we're not generating or creating a schema, we will instead create # a feature spec. Since there are a fairly small number of features this is # manageable for this dataset. _FEATURE_SPEC = { **{ feature: tf.io.FixedLenFeature(shape=[1], dtype=tf.float32) for feature in _FEATURE_KEYS }, _LABEL_KEY: tf.io.FixedLenFeature(shape=[1], dtype=tf.int64) } def _input_fn(file_pattern: List[str], data_accessor: tfx.components.DataAccessor, schema: schema_pb2.Schema, batch_size: int = 200) -> tf.data.Dataset: """Generates features and label for training. Args: file_pattern: List of paths or patterns of input tfrecord files. data_accessor: DataAccessor for converting input to RecordBatch. schema: schema of the input data. batch_size: representing the number of consecutive elements of returned dataset to combine in a single batch Returns: A dataset that contains (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices. """ return data_accessor.tf_dataset_factory( file_pattern, tfxio.TensorFlowDatasetOptions( batch_size=batch_size, label_key=_LABEL_KEY), schema=schema).repeat() def _build_keras_model() -> tf.keras.Model: """Creates a DNN Keras model for classifying penguin data. Returns: A Keras Model. """ # The model below is built with Functional API, please refer to # https://www.tensorflow.org/guide/keras/overview for all API options. inputs = [keras.layers.Input(shape=(1,), name=f) for f in _FEATURE_KEYS] d = keras.layers.concatenate(inputs) for _ in range(2): d = keras.layers.Dense(8, activation='relu')(d) outputs = keras.layers.Dense(3)(d) model = keras.Model(inputs=inputs, outputs=outputs) model.compile( optimizer=keras.optimizers.Adam(1e-2), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.SparseCategoricalAccuracy()]) model.summary(print_fn=logging.info) return model # TFX Trainer will call this function. def run_fn(fn_args: tfx.components.FnArgs): """Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. """ # This schema is usually either an output of SchemaGen or a manually-curated # version provided by pipeline author. A schema can also derived from TFT # graph if a Transform component is used. In the case when either is missing, # `schema_from_feature_spec` could be used to generate schema from very simple # feature_spec, but the schema returned would be very primitive. schema = schema_utils.schema_from_feature_spec(_FEATURE_SPEC) train_dataset = _input_fn( fn_args.train_files, fn_args.data_accessor, schema, batch_size=_TRAIN_BATCH_SIZE) eval_dataset = _input_fn( fn_args.eval_files, fn_args.data_accessor, schema, batch_size=_EVAL_BATCH_SIZE) model = _build_keras_model() model.fit( train_dataset, steps_per_epoch=fn_args.train_steps, validation_data=eval_dataset, validation_steps=fn_args.eval_steps) # The result of the training should be saved in `fn_args.serving_model_dir` # directory. model.save(fn_args.serving_model_dir, save_format='tf') ``` Now you have completed all preparation steps to build a TFX pipeline. ### Write a pipeline definition We define a function to create a TFX pipeline. A `Pipeline` object represents a TFX pipeline which can be run using one of pipeline orchestration systems that TFX supports. ``` def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str) -> tfx.dsl.Pipeline: """Creates a three component penguin pipeline with TFX.""" # Brings data into the pipeline. example_gen = tfx.components.CsvExampleGen(input_base=data_root) # Uses user-provided Python function that trains a model. trainer = tfx.components.Trainer( module_file=module_file, examples=example_gen.outputs['examples'], train_args=tfx.proto.TrainArgs(num_steps=100), eval_args=tfx.proto.EvalArgs(num_steps=5)) # Pushes the model to a filesystem destination. pusher = tfx.components.Pusher( model=trainer.outputs['model'], push_destination=tfx.proto.PushDestination( filesystem=tfx.proto.PushDestination.Filesystem( base_directory=serving_model_dir))) # Following three components will be included in the pipeline. components = [ example_gen, trainer, pusher, ] return tfx.dsl.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, metadata_connection_config=tfx.orchestration.metadata .sqlite_metadata_connection_config(metadata_path), components=components) ``` ## Run the pipeline TFX supports multiple orchestrators to run pipelines. In this tutorial we will use `LocalDagRunner` which is included in the TFX Python package and runs pipelines on local environment. We often call TFX pipelines "DAGs" which stands for directed acyclic graph. `LocalDagRunner` provides fast iterations for developemnt and debugging. TFX also supports other orchestrators including Kubeflow Pipelines and Apache Airflow which are suitable for production use cases. See [TFX on Cloud AI Platform Pipelines](https://www.tensorflow.org/tfx/tutorials/tfx/cloud-ai-platform-pipelines) or [TFX Airflow Tutorial](https://www.tensorflow.org/tfx/tutorials/tfx/airflow_workshop) to learn more about other orchestration systems. Now we create a `LocalDagRunner` and pass a `Pipeline` object created from the function we already defined. The pipeline runs directly and you can see logs for the progress of the pipeline including ML model training. ``` tfx.orchestration.LocalDagRunner().run( _create_pipeline( pipeline_name=PIPELINE_NAME, pipeline_root=PIPELINE_ROOT, data_root=DATA_ROOT, module_file=_trainer_module_file, serving_model_dir=SERVING_MODEL_DIR, metadata_path=METADATA_PATH)) ``` You should see "INFO:absl:Component Pusher is finished." at the end of the logs if the pipeline finished successfully. Because `Pusher` component is the last component of the pipeline. The pusher component pushes the trained model to the `SERVING_MODEL_DIR` which is the `serving_model/penguin-simple` directory if you did not change the variables in the previous steps. You can see the result from the file browser in the left-side panel in Colab, or using the following command: ``` # List files in created model directory. !find {SERVING_MODEL_DIR} ``` ## Next steps You can find more resources on https://www.tensorflow.org/tfx/tutorials. Please see [Understanding TFX Pipelines](https://www.tensorflow.org/tfx/guide/understanding_tfx_pipelines) to learn more about various concepts in TFX.
github_jupyter
# Using a Different Corpus zh_segment makes it easy to use a different corpus for word segmentation. If you simply want to "teach" the algorithm a single phrase it doesn't know then read [this StackOverflow answer](http://stackoverflow.com/questions/20695825/english-word-segmentation-in-nlp). Now, let's get a new corpus. For this example, we'll use the text from Jane Austen's *Pride and Prejudice*. ``` import requests response = requests.get('https://www.gutenberg.org/ebooks/1342.txt.utf-8') text = response.text print len(text) ``` Great. We've got a new corpus for `zh_segment`. Now let's look at what parts of the API we need to change. There's one function and two dictionaries: `zh_segment.clean`, `zh_segment.bigram_counts` and `zh_segment.unigram_counts`. We'll work on these in reverse. ``` import zh_segment print type(zh_segment.unigram_counts), type(zh_segment.bigram_counts) print zh_segment.unigram_counts.items()[:3] print zh_segment.bigram_counts.items()[:3] ``` Ok, so `zh_segment.unigram_counts` is just a dictionary mapping unigrams to their counts. Let's write a method to tokenize our text. ``` import re def tokenize(text): pattern = re.compile('[a-zA-Z]+') return (match.group(0) for match in pattern.finditer(text)) print list(tokenize("Wait, what did you say?")) ``` Now we'll build our dictionaries. ``` from collections import Counter zh_segment.unigram_counts = Counter(tokenize(text)) def pairs(iterable): iterator = iter(iterable) values = [next(iterator)] for value in iterator: values.append(value) yield ' '.join(values) del values[0] zh_segment.bigram_counts = Counter(pairs(tokenize(text))) ``` That's it. Now, by default, `zh_segment.segment` lowercases all input and removes punctuation. In our corpus we have capitals so we'll also have to change the `clean` function. Our heaviest hammer is to simply replace it with the identity function. This will do no sanitation of the input to `segment`. ``` def identity(value): return value zh_segment.clean = identity zh_segment.segment('wantofawife') ``` If you find this behaves poorly then you may need to change the `zh_segment.TOTAL` variable to reflect the total of all unigrams. In our case that's simply: ``` zh_segment.TOTAL = float(sum(zh_segment.unigram_counts.values())) ``` zh_segment doesn't require any fancy machine learning training algorithms. Simply update the unigram and bigram count dictionaries and you're ready to go. ``` ```
github_jupyter
# Checking Container Dwell Times This works with the CSV export of ConFlowGen. Import libraries ``` import os import pathlib import ipywidgets as widgets import pandas as pd from IPython.display import Markdown import matplotlib.pyplot as plt from matplotlib import gridspec ``` Select input data ``` folder_of_this_jupyter_notebook = pathlib.Path.cwd() export_folder = os.path.join( folder_of_this_jupyter_notebook, os.pardir, os.pardir, os.pardir, "conflowgen", "data", "exports" ) folders = [ folder for folder in os.listdir(export_folder) if os.path.isdir( os.path.join( export_folder, folder ) ) ] dropdown_field = widgets.Dropdown( options=list(reversed(folders)), # always show the newest first description='', layout={'width': 'max-content'} ) dropdown_label = widgets.Label(value="Select the exported output: ") display(widgets.HBox([dropdown_label, dropdown_field])) path_to_selected_exported_content = os.path.join( export_folder, dropdown_field.value ) print("Working with directory " + path_to_selected_exported_content) ``` ## Load containers ``` path_to_containers = os.path.join( path_to_selected_exported_content, "containers.csv" ) print(f"Opening {path_to_containers}") df_containers = pd.read_csv(path_to_containers, index_col="id", dtype={ "delivered_by_truck": "Int64", "picked_up_by_truck": "Int64", "delivered_by_large_scheduled_vehicle": "Int64", "picked_up_by_large_scheduled_vehicle": "Int64" }) df_containers ``` Check number of large scheduled vehicles (deep sea vessels, feeders, barges, and trains). ``` df_containers.groupby(by="delivered_by_large_scheduled_vehicle").count() ``` ## Load scheduled vehicles Load the vehicles to enrich the information regarding the arrival and departure of the containers. ``` path_to_deep_sea_vessels = os.path.join( path_to_selected_exported_content, "deep_sea_vessels.csv" ) path_to_feeders = os.path.join( path_to_selected_exported_content, "feeders.csv" ) path_to_barges = os.path.join( path_to_selected_exported_content, "barges.csv" ) path_to_trains = os.path.join( path_to_selected_exported_content, "trains.csv" ) scheduled_vehicle_file_paths = { "deep_sea_vessels": path_to_deep_sea_vessels, "feeders": path_to_feeders, "barges": path_to_barges, "trains": path_to_trains } for name, path in scheduled_vehicle_file_paths.items(): print("Check file exists for vehicle " + name + ".") assert os.path.isfile(path) print("All files exist.") for name, path in list(scheduled_vehicle_file_paths.items()): print("Check file size for vehicle " + name) size_in_bytes = os.path.getsize(path) if size_in_bytes <= 4: print(" This file is empty, ignoring it in the analysis from now on") del scheduled_vehicle_file_paths[name] scheduled_vehicle_dfs = { name: pd.read_csv(path, index_col=0, parse_dates=["scheduled_arrival"]) for name, path in scheduled_vehicle_file_paths.items() } for name, df in scheduled_vehicle_dfs.items(): display(Markdown("#### " + name)) scheduled_vehicle_dfs[name]["vehicle_type"] = name display(scheduled_vehicle_dfs[name].sort_values(by="scheduled_arrival")) df_large_scheduled_vehicle = pd.concat( scheduled_vehicle_dfs.values() ) df_large_scheduled_vehicle.sort_index(inplace=True) df_large_scheduled_vehicle.info() df_large_scheduled_vehicle ``` Plot arrival pattern. ``` plt.figure(figsize=(15, 3)) x, y, z = [], [], [] y_axis = [] y_scaling_factor = 2 for i, (name, df) in enumerate(scheduled_vehicle_dfs.items()): y_axis.append((i/y_scaling_factor, name)) if len(df) == 0: continue arrivals_and_capacity = df[["scheduled_arrival", "moved_capacity"]] for _, row in arrivals_and_capacity.iterrows(): event = row["scheduled_arrival"] moved_capacity = row["moved_capacity"] x.append(event) y.append(i / y_scaling_factor) z.append(moved_capacity / 20) plt.xticks(rotation=45) plt.yticks(*list(zip(*y_axis))) plt.scatter(x, y, s=z, color='gray') plt.ylim([-0.5, 1.5]) plt.show() ``` Transform data to check how many containers are delivered and picked up by which vehicle. ``` vehicle_to_teu_to_deliver = {} vehicle_to_teu_to_pickup = {} for i, container in df_containers.iterrows(): teu = container["length"] / 20 assert 1 <= teu <= 2.5 if container["delivered_by"] != "truck": vehicle = container["delivered_by_large_scheduled_vehicle"] if vehicle not in vehicle_to_teu_to_deliver.keys(): vehicle_to_teu_to_deliver[vehicle] = 0 vehicle_to_teu_to_deliver[vehicle] += teu if container["picked_up_by"] != "truck": vehicle = container["picked_up_by_large_scheduled_vehicle"] if vehicle not in vehicle_to_teu_to_pickup.keys(): vehicle_to_teu_to_pickup[vehicle] = 0 vehicle_to_teu_to_pickup[vehicle] += teu vehicle_to_teu_to_deliver, vehicle_to_teu_to_pickup ``` Add transformed data to vehicles. ``` s_delivery = pd.Series(vehicle_to_teu_to_deliver) s_pickup = pd.Series(vehicle_to_teu_to_pickup) df_large_scheduled_vehicle["capacity_delivery"] = s_delivery df_large_scheduled_vehicle["capacity_pickup"] = s_pickup df_large_scheduled_vehicle for large_scheduled_vehicle_id in df_large_scheduled_vehicle.index: delivered_teu = vehicle_to_teu_to_deliver.get(large_scheduled_vehicle_id, 0) picked_up_teu = vehicle_to_teu_to_pickup.get(large_scheduled_vehicle_id, 0) capacity_in_teu = df_large_scheduled_vehicle.loc[large_scheduled_vehicle_id, "capacity_in_teu"] assert delivered_teu <= capacity_in_teu, f"{delivered_teu} is more than {capacity_in_teu} for vehicle "\ f"with id {large_scheduled_vehicle_id}" assert picked_up_teu <= capacity_in_teu, f"{picked_up_teu} is more than {capacity_in_teu} for vehicle "\ f"with id {large_scheduled_vehicle_id}" ``` ## Load trucks ``` path_to_trucks = os.path.join( path_to_selected_exported_content, "trucks.csv" ) assert os.path.isfile(path_to_trucks) df_truck = pd.read_csv( path_to_trucks, index_col=0, parse_dates=[ # Pickup "planned_container_pickup_time_prior_berthing", "realized_container_pickup_time", # Delivery "planned_container_delivery_time_at_window_start", "realized_container_delivery_time" ]) df_truck assert len(df_truck[df_truck["picks_up_container"] & pd.isna(df_truck["realized_container_pickup_time"])]) == 0, \ "If a truck picks up a container, it should always have a realized container pickup time" assert len(df_truck[df_truck["delivers_container"] & pd.isna(df_truck["realized_container_delivery_time"])]) == 0, \ "If a truck deliver a container, it should always have a realized container delivery time" assert len(df_truck[~(df_truck["delivers_container"] | df_truck["picks_up_container"])]) == 0, \ "There is no truck that neither delivers or picks up a container" ``` This is the probability of the truck to show up at any given hour of the week (by index). ``` delivered_and_picked_up_by_large_vessels_df = df_containers.loc[ ~pd.isna(df_containers["picked_up_by_large_scheduled_vehicle"]) ].join( df_large_scheduled_vehicle, on="picked_up_by_large_scheduled_vehicle", rsuffix="_picked_up" ).loc[ ~pd.isna(df_containers["delivered_by_large_scheduled_vehicle"]) ].join( df_large_scheduled_vehicle, on="delivered_by_large_scheduled_vehicle", rsuffix="_delivered_by" ) delivered_and_picked_up_by_large_vessels_df dwell_time = ( delivered_and_picked_up_by_large_vessels_df["scheduled_arrival"] - delivered_and_picked_up_by_large_vessels_df["scheduled_arrival_delivered_by"] ) dwell_time.describe() dwell_time.astype("timedelta64[h]").plot.hist(bins=30, color="gray") plt.xlabel("Hours between delivery and onward transportation (except trucks)") plt.ylabel("Number container") plt.show() ```
github_jupyter
# 2019 Formula One World Championship <div style="text-align: justify"> A Formula One season consists of a series of races, known as Grands Prix (French for ''grand prizes' or 'great prizes''), which take place worldwide on purpose-built circuits and on public roads. The results of each race are evaluated using a points system to determine two annual World Championships: one for drivers, the other for constructors. Drivers must hold valid Super Licences, the highest class of racing licence issued by the FIA. The races must run on tracks graded "1" (formerly "A"), the highest grade-rating issued by the FIA.Most events occur in rural locations on purpose-built tracks, but several events take place on city streets. There are a number of F1 races coming up: Singapore GP: Date: Sun, Sep 22, 8:10 AM Russian GP: Date: Sun, Sep 29, 7:10 AM Japanese GP: Date: Sun, Oct 13, 1:10 AM Mexican GP Date: Sun, Oct 13, 1:10 AM The Singaporean Grand Prix this weekend and the Russian Grand Prix the weekend after, as you can see here. The 2019 driver standings are given here. Given these standings: </div> # Lets Answer few fun questions? ``` #A Probability Distribution; an {outcome: probability} mapping. # Make probabilities sum to 1.0; assert no negative probabilities class ProbDist(dict): """A Probability Distribution; an {outcome: probability} mapping.""" def __init__(self, mapping=(), **kwargs): self.update(mapping, **kwargs) total = sum(self.values()) for outcome in self: self[outcome] = self[outcome] / total assert self[outcome] >= 0 def p(event, space): """The probability of an event, given a sample space of outcomes. event: a collection of outcomes, or a predicate that is true of outcomes in the event. space: a set of outcomes or a probability distribution of {outcome: frequency} pairs.""" # if event is a predicate it, "unroll" it as a collection if is_predicate(event): event = such_that(event, space) # if space is not an equiprobably collection (a simple set), # but a probability distribution instead (a dictionary set), # then add (union) the probabilities for all favorable outcomes if isinstance(space, ProbDist): return sum(space[o] for o in space if o in event) # simplest case: what we played with in our previous lesson else: return Fraction(len(event & space), len(space)) is_predicate = callable # Here we either return a simple collection in the case of equiprobable outcomes, or a dictionary collection in the # case of non-equiprobably outcomes def such_that(predicate, space): """The outcomes in the sample pace for which the predicate is true. If space is a set, return a subset {outcome,...} with outcomes where predicate(element) is true; if space is a ProbDist, return a ProbDist {outcome: frequency,...} with outcomes where predicate(element) is true.""" if isinstance(space, ProbDist): return ProbDist({o:space[o] for o in space if predicate(o)}) else: return {o for o in space if predicate(o)} ``` # Question Set 1 what is the Probability Distribution for each F1 driver to win the Singaporean Grand Prix? What is the Probability Distribution for each F1 driver to win both the Singaporean and Russian Grand Prix? What is the probability for Mercedes to win both races? What is the probability for Mercedes to win at least one race? Note that Mercedes, and each other racing team, has two drivers per race. # Solution 1. what is the Probability Distribution for each F1 driver to win the Singaporean Grand Prix? ``` SGP = ProbDist(LH=284,VB=221,CL=182,MV=185,SV=169,PG=65,CS=58,AA=34,DR=34,DK=33,NH=31,LN=25,KR=31,SP=27,LS=19,KM=18,RG=8,AG=3,RK=1, GR=0) print ("The probability of each driver winnning Singaporean Grand Prix ") SGP #Driver standing divided by / total of all driver standings, SGP returns total probability as 1 ``` 2. What is the Probability Distribution for each F1 driver to win both the Singaporean and Russian Grand Prix? ``` SGP = ProbDist( LH=284,VB=221,CL=182,MV=185,SV=169,PG=65,CS=58,AA=34,DR=34,DK=33,NH=31,LN=25,KR=31,SP=27,LS=19,KM=18, RG=8,AG=3,RK=1,GR=0) # data taken on saturday before race starts for Singapore RGP = ProbDist( LH=296,VB=231,CL=200,MV=200,SV=194,PG=69,CS=58,AA=42,DR=34,DK=33,NH=33,LN=31,KR=31,SP=27,LS=19,KM=18, RG=8,AG=4,RK=1,GR=0) # data taken on saturday before race starts for Russia #perfoms joint probabilities on SGP and RGP probability distributions def joint(A, B, sep=''): """The joint distribution of two independent probability distributions. Result is all entries of the form {a+sep+b: P(a)*P(b)}""" return ProbDist({a + sep + b: A[a] * B[b] for a in A for b in B}) bothSGPRGP= joint(SGP, RGP, ' ') print ("The probability of each driver winnning Singaporean Grand Prix and Russian Grand Prix") bothSGPRGP ``` 3. What is the probability for Mercedes to win both races? ``` def mercedes_T(outcome): return outcome == "VB" or outcome == "LH" mercedesWinningSGPRace = p(mercedes_T, SGP) #calculate probability of mercedes winning Singapore Frand Pix def mercedes_T(outcome): return outcome == "VB" or outcome == "LH" mercedesWinningRGPRace = p(mercedes_T, RGP) #calculate probability of mercedes winning Russia Grand Pix print ("The probability of mercedes winnning both the races ") mercedesWinningBothRaces = mercedesWinningRGPRace * mercedesWinningSGPRace mercedesWinningBothRaces #probability of two events occurring together as independent events (P1 * P2)= P ``` 4. What is the probability for Mercedes to win at least one race? ``` def p(event, space): """The probability of an event, given a sample space of outcomes. event: a collection of outcomes, or a predicate that is true of outcomes in the event. space: a set of outcomes or a probability distribution of {outcome: frequency} pairs.""" # if event is a predicate it, "unroll" it as a collection if is_predicate(event): event = such_that(event, space) # if space is not an equiprobably collection (a simple set), # but a probability distribution instead (a dictionary set), # then add (union) the probabilities for all favorable outcomes if isinstance(space, ProbDist): return sum(space[o] for o in space if o in event) # simplest case: what we played with in our previous lesson else: return Fraction(len(event & space), len(space)) is_predicate = callable # Here we either return a simple collection in the case of equiprobable outcomes, or a dictionary collection in the # case of non-equiprobably outcomes def such_that(predicate, space): """The outcomes in the sample pace for which the predicate is true. If space is a set, return a subset {outcome,...} with outcomes where predicate(element) is true; if space is a ProbDist, return a ProbDist {outcome: frequency,...} with outcomes where predicate(element) is true.""" if isinstance(space, ProbDist): return ProbDist({o:space[o] for o in space if predicate(o)}) else: return {o for o in space if predicate(o)} mercedesWinningAtleastOneRace = mercedesWinningBothRaces + (mercedesWinningRGPRace * (1 - mercedesWinningSGPRace))+mercedesWinningSGPRace * (1 - mercedesWinningRGPRace) print ("The probability of mercedes winnning at least one of the races ") mercedesWinningAtleastOneRace #probability of an event occurring at least once, it will be the complement of the probability of the event never occurring. ``` # Question Set 2 If Mercedes wins the first race, what is the probability that Mercedes wins the next one? If Mercedes wins at least one of these two races, what is the probability Mercedes wins both races? How about Ferrari, Red Bull, and Renault? # Solution If Mercedes wins the first race, what is the probability that Mercedes wins the next one? If Mercedes wins at least one of these two races, what is the probability Mercedes wins both races? How about Ferrari, Red Bull, and Renault? ``` SGP = ProbDist( LH=284,VB=221,CL=182,MV=185,SV=169,PG=65,CS=58,AA=34,DR=34,DK=33,NH=31,LN=25,KR=31,SP=27,LS=19,KM=18, RG=8,AG=3,RK=1,GR=0) RGP = ProbDist( LH=296,VB=231,CL=200,MV=200,SV=194,PG=69,CS=58,AA=42,DR=34,DK=33,NH=33,LN=31,KR=31,SP=27,LS=19,KM=18, RG=8,AG=4,RK=1,GR=0) Weather = ProbDist(RA=1, SU=1, SN=1, CL=1, FO=1) def Mercedes_Win_First(outcome): return outcome.startswith('LH') or outcome.startswith('VB') #choose prob of first set def Mercedes_Win_Second(outcome): return outcome.endswith('LH') or outcome.endswith('VB') p(Mercedes_Win_Second, such_that(Mercedes_Win_First,bothSGPRGP)) #given first race is won, the second will be won def Mercedes_WinBoth(outcome): return 'LH LH' in outcome or 'LH VB' in outcome or 'VB LH' in outcome or 'VB VB' in outcome def Mercedes_Win(outcome): return 'LH' in outcome or 'VB' in outcome p(Mercedes_WinBoth, such_that(Mercedes_Win,bothSGPRGP)) # (LH,LH VB,VB LH,VB VB,LH) 4 groups to pickup provided first race is won for the both event ``` If Ferrari wins the first race, what is the probability that Ferrari wins the next one? ``` def Ferrari_WinBoth(outcome): return 'CL CL' in outcome or 'CL SV' in outcome or 'SV SV' in outcome or 'SV CL' in outcome def Ferrari_Win(outcome): return 'CL' in outcome or 'SV' in outcome p(Ferrari_WinBoth, such_that(Ferrari_Win,bothSGPRGP)) ``` If RedBull wins the first race, what is the probability that RedBull wins the next one ``` def RedBull_WinBoth(outcome): return 'MV MV' in outcome or 'MV AA' in outcome or 'AA AA' in outcome or 'AA MV' in outcome def RedBull_Win(outcome): return 'MV' in outcome or 'AA' in outcome p(RedBull_WinBoth, such_that(RedBull_Win,bothSGPRGP)) ``` If Renault wins the first race, what is the probability that Renault wins the next one? ``` def Renault_WinBoth(outcome): return 'DR DR' in outcome or 'DR NH' in outcome or 'NH NH' in outcome or 'NH DR' in outcome def Renault_Win(outcome): return 'DR' in outcome or 'NH' in outcome p(Renault_WinBoth, such_that(Renault_Win,bothSGPRGP)) ``` # Question Set 3 Mercedes wins one of these two races on a rainy day. What is the probability Mercedes wins both races, assuming races can be held on either rainy, sunny, cloudy, snowy or foggy days? Assume that rain, sun, clouds, snow, and fog are the only possible weather conditions on race tracks. # Solution Mercedes wins one of these two races on a rainy day. What is the probability Mercedes wins both races, assuming races can be held on either rainy, sunny, cloudy, snowy or foggy days? Assume that rain, sun, clouds, snow, and fog are the only possible weather conditions on race tracks. ``` #create Probability Distribution for given Weather Condtions wher p(weather) will be 0.20 GivenFiveWeatherConditons = ProbDist( RainyDay=1, SunnyDay=1, CloudyDay=1, SnowyDay=1, FoggyDay=1 ) GivenFiveWeatherConditons #perfoms joint probabilities on SGP & weather and RGP & weather probability distributions Respectively def joint(A, B, A1, B1, sep=''): """The joint distribution of two independent probability distributions. Result is all entries of the form {a+sep+b: P(a)*P(b)}""" return ProbDist({a + sep + a1 + sep + b + sep + b1: A[a] * B[b] *A1[a1] * B1[b1] for a in A for b in B for a1 in A1 for b1 in B1}) bothSGPRGPWeather= joint(SGP, RGP, GivenFiveWeatherConditons,GivenFiveWeatherConditons, ' ') bothSGPRGPWeather def Mercedes_Wins_Race_On_Any_Rainy(outcome): return ('LH R' in outcome or 'VB R' in outcome) such_that(Mercedes_Wins_Race_On_Any_Rainy, bothSGPRGPWeather) def Mercedes_Wins_Race_On_Both_Rain(outcome): return ('LH' in outcome and 'VB' in outcome) or (outcome.count('LH')==2 ) or (outcome.count('VB')==2 ) p(Mercedes_Wins_Race_On_Both_Rain, such_that(Mercedes_Wins_Race_On_Any_Rainy, bothSGPRGPWeather)) ``` End!
github_jupyter
<a href="https://colab.research.google.com/github/JSJeong-me/KOSA-Big-Data_Vision/blob/main/Model/99_kaggle_credit_card_analysis_and_prediction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Importing Packages ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings import os from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.naive_bayes import GaussianNB from imblearn.over_sampling import SMOTE from sklearn.metrics import confusion_matrix,ConfusionMatrixDisplay,classification_report,plot_roc_curve,accuracy_score pd.set_option('display.max_columns',25) warnings.filterwarnings('ignore') # Importing Dataset data = pd.read_csv(r'./credit_cards_dataset.csv') data.head(10) data.info() #info shows that there is no null values and all the features are numeric data.describe(include='all') # Descriptive analysis data.rename(columns={'PAY_0':'PAY_1','default.payment.next.month':'def_pay'},inplace=True) #rename few columns ``` # Exploratory Data Analysis ``` plt.figure(figsize=(10,6)) data.groupby('def_pay')['AGE'].hist(legend=True) plt.show() #here we can see that, between age 20 to 45 most of the people will fall into.. sns.distplot(data['AGE']) plt.title('Age Distribution') sns.boxplot('def_pay','LIMIT_BAL',data=data) data[data['LIMIT_BAL']>700000].sort_values(ascending=False,by='LIMIT_BAL') data[data['LIMIT_BAL']>700000].value_counts().sum() plt.figure(figsize=(16,5)) plt.subplot(121) sns.boxplot(x='SEX', y= 'AGE',data = data) sns.stripplot(x='SEX', y= 'AGE',data = data,linewidth = 0.9) plt.title ('Sex vs AGE') plt.subplot(122) ax = sns.countplot(x='EDUCATION',data = data, order= data['EDUCATION'].value_counts().index) plt.title ('EDUCATION') labels = data['EDUCATION'].value_counts() for i, v in enumerate(labels): ax.text(i,v+100,v, horizontalalignment='center') plt.show() plt.figure(figsize=(20,5)) plt.subplot(121) sns.boxplot(x='def_pay', y= 'AGE',data = data) sns.stripplot(x='def_pay', y= 'AGE',data = data,linewidth = 0.9) plt.title ('Age vs def_pay') ax2=plt.subplot(1,2,2) pay_edu = data.groupby('EDUCATION')['def_pay'].value_counts(normalize=True).unstack() pay_edu = pay_edu.sort_values(ascending=False,by=1) pay_edu.plot(kind='bar',stacked= True,color=["#3f3e6fd1", "#85c6a9"], ax = ax2) plt.legend(loc=(1.04,0)) plt.title('Education vs def_pay') plt.show() # function for Multivariate analysis # This method is used to show point estimates and confidence intervals using scatter plot graphs def plotfig(df1,col11,col22,deft1): plt.figure(figsize=(16,6)) plt.subplot(121) sns.pointplot(df1[col11], df1[deft1],hue = df1[col22]) plt.subplot(122) sns.countplot(df1[col11], hue = df1[col22]) plt.show() def varplot(df2, col1, col2, deft, bin=3, unique=10): df=df2.copy() if len(df[col1].unique())>unique: df[col1+'cut']= pd.qcut(df[col1],bin) if len(df[col2].unique())>unique: df[col2+'cut']= pd.qcut(df[col2],bin) return plotfig(df,col1+'cut',col2+'cut',deft) else: df[col2+'cut']= df[col2] return plotfig(df,col1+'cut',col2+'cut',deft) else: return plotfig(df,col1,col2,deft) varplot(data,'AGE','SEX','def_pay',3) varplot(data,'LIMIT_BAL','AGE','def_pay',3) # Univariate Analysis df = data.drop('ID',1) nuniq = df.nunique() df = data[[col for col in df if nuniq[col]>1 and nuniq[col]<50]] row, cols = df.shape colnames = list(df) graph_perrow = 5 graph_row = (cols+graph_perrow-1)/ graph_perrow max_graph = 20 plt.figure(figsize=(graph_perrow*12,graph_row*8)) for i in range(min(cols,max_graph)): plt.subplot(graph_row,graph_perrow,i+1) coldf = df.iloc[:,i] if (not np.issubdtype(type(coldf),np.number)): sns.countplot(colnames[i],data= df, order= df[colnames[i]].value_counts().index) else: coldf.hist() plt.title(colnames[i]) plt.show() cont_var = df.select_dtypes(exclude='object').columns nrow = (len(cont_var)+5-1)/5 plt.figure(figsize=(12*5,6*2)) for i,j in enumerate(cont_var): plt.subplot(nrow,5,i+1) sns.distplot(data[j]) plt.show() # from the above,we can see that we have maximum clients from 20-30 age group followed by 31-40. # Hence with increasing age group the number of clients that will default the payment next month is decreasing. # Hence we can see that Age is important feature to predict the default payment for next month. plt.subplots(figsize=(26,20)) corr = data.corr() sns.heatmap(corr,annot=True) plt.show() from statsmodels.stats.outliers_influence import variance_inflation_factor df= data.drop(['def_pay','ID'],1) vif = pd.DataFrame() vif['Features']= df.columns vif['vif']= [variance_inflation_factor(df.values,i) for i in range(df.shape[1])] vif # From this heatmap and VIF we can see that there are some multicolinearity(values >10) in the data which we can handle # simply doing feature engineering of some columns bill_tot = pd.DataFrame(data['BILL_AMT1']+data['BILL_AMT2']+data['BILL_AMT3']+data['BILL_AMT4']+data['BILL_AMT5']+data['BILL_AMT6'],columns=['bill_tot']) pay_tot =pd.DataFrame(data['PAY_1']+data['PAY_2']+data['PAY_3']+data['PAY_4']+data['PAY_5']+data['PAY_6'],columns=['pay_tot']) pay_amt_tot = pd.DataFrame(data['PAY_AMT1']+data['PAY_AMT2']+data['PAY_AMT3']+data['PAY_AMT4']+data['PAY_AMT5']+data['PAY_AMT6'],columns=['pay_amt_tot']) frames=[bill_tot,pay_tot,pay_amt_tot,data['def_pay']] tot = pd.concat(frames,axis=1) plt.figure(figsize=(20,4)) plt.subplot(131) sns.boxplot(x='def_pay',y='pay_tot',data = tot) sns.stripplot(x='def_pay',y='pay_tot',data = tot,linewidth=1) plt.subplot(132) sns.boxplot(x='def_pay', y='bill_tot',data=tot) sns.stripplot(x='def_pay', y='bill_tot',data=tot,linewidth=1) plt.subplot(133) sns.boxplot(x='def_pay', y='pay_amt_tot',data=tot) sns.stripplot(x='def_pay', y='pay_amt_tot',data=tot,linewidth=1) plt.show() sns.pairplot(tot[['bill_tot','pay_amt_tot','pay_tot','def_pay']],hue='def_pay') plt.show() sns.violinplot(x=tot['def_pay'], y= tot['bill_tot']) tot.drop('def_pay',1,inplace=True) data1 = pd.concat([data,tot],1) data1.groupby('def_pay')['EDUCATION'].hist(legend=True) plt.show() data1.groupby('def_pay')['AGE'].hist() plt.figure(figsize=(12,6)) # we know that the Bill_AMT is the most correlated column so using that we create a data df= pd.concat([bill_tot,df],1) df1 = df.drop(['BILL_AMT1','BILL_AMT2','BILL_AMT3','BILL_AMT4','BILL_AMT5','BILL_AMT6'],1) vif = pd.DataFrame() vif['Features']= df1.columns vif['vif']= [variance_inflation_factor(df1.values,i) for i in range(df1.shape[1])] vif # above we can see that now our data doesnt have multicollinearty(no values >10) data2 = df1.copy() # using the above plot we can create age bins age = [20,27,32,37,42,48,58,64,80] lab = [8,7,6,5,4,3,2,1] data2['AGE'] = pd.cut(data2['AGE'],bins= age,labels=lab) data2 = pd.concat([data2,data['def_pay']],1) data2 data2.groupby('def_pay')['AGE'].hist() plt.figure(figsize=(12,6)) sns.countplot(data2['AGE']) data2.groupby('def_pay')['LIMIT_BAL'].hist(legend=True) plt.show() data2.columns ``` # Model Creation #### We know that we have a dataset where we have imbalance in the target variable #### you get a pretty high accuracy just by predicting the majority class, but you fail to capture the minority class #### which is most often the point of creating the model in the first place. #### Hence we try to create more model to get the best results ``` x= data2.drop(['def_pay'],1) y = data2['def_pay'] x_train,x_test, y_train, y_test = train_test_split(x,y,test_size=0.30, random_state=1) sc = StandardScaler() x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) # Accuracy is not the best metric to use when evaluating imbalanced datasets as it can be misleading. # hence we are using Classification Report and Confusion Matrix # function for accuracy and confusion matrix def res(y_test_valid,y_train_valid): cm_log = confusion_matrix(y_test,y_test_valid) ConfusionMatrixDisplay(cm_log).plot() print(classification_report(y_test,y_test_valid)) print('train_accuracy:',accuracy_score(y_train,y_train_valid)) print('test_accuracy:',accuracy_score(y_test,y_test_valid)) ``` # Logistic model ``` log_model= LogisticRegression() log_model.fit(x_train,y_train) y_pred_log = log_model.predict(x_test) y_pred_train = log_model.predict(x_train) res(y_pred_log,y_pred_train) plot_roc_curve(log_model,x_test,y_test) plt.show() # log model using Threshold threshold = 0.36 y_log_prob = log_model.predict_proba(x_test) y_train_log_prob = log_model.predict_proba(x_train) y_log_prob=y_log_prob[:,1] y_train_log_prob= y_train_log_prob[:,1] y_pred_log_prob = np.where(y_log_prob>threshold,1,0) y_pred_log_prob_train = np.where(y_train_log_prob>threshold,1,0) res(y_pred_log_prob,y_pred_log_prob_train) ``` # using Decision Tree model ``` dec_model = DecisionTreeClassifier() dec_model.fit(x_train,y_train) y_pred_dec = dec_model.predict(x_test) y_pred_dec_train = dec_model.predict(x_train) res(y_pred_dec,y_pred_dec_train) ``` ### Hyper parameter tuning for DecisionTree ``` parameters = {'max_depth':[1,2,3,4,5,6],'min_samples_split':[3,4,5,6,7],'min_samples_leaf':[1,2,3,4,5,6]} tree = GridSearchCV(dec_model, parameters,cv=10) tree.fit(x_train,y_train) tree.best_params_ # We know that Decision tree will have high variance due to which the model overfit hence we can reduce this by "Pruning" # By using the best parameter from GridSearchCV best parameters dec_model1 = DecisionTreeClassifier(max_depth=4,min_samples_split=10,min_samples_leaf=1) dec_model1.fit(x_train,y_train) y_pred_dec1 = dec_model1.predict(x_test) y_pred_dec_train1 = dec_model1.predict(x_train) res(y_pred_dec1,y_pred_dec_train1) ``` # Random Forest Model ``` rf_model = RandomForestClassifier(n_estimators=200, criterion='entropy', max_features='log2', max_depth=15, random_state=42) rf_model.fit(x_train,y_train) y_pred_rf = rf_model.predict(x_test) y_pred_rf_train = rf_model.predict(x_train) #res(y_pred_rf,y_pred_rf_train) from sklearn.metrics import confusion_matrix import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") cnf_matrix = confusion_matrix(y_test, y_pred_rf) plt.figure() plot_confusion_matrix(cnf_matrix, classes=['Non_Default','Default'], normalize=False, title='Non Normalized confusion matrix') from sklearn.metrics import recall_score print("Recall score:"+ str(recall_score(y_test, y_pred_rf))) ``` ### Again hyper parameter tuning for Random Forest ``` parameters = {'n_estimators':[60,70,80],'max_depth':[1,2,3,4,5,6],'min_samples_split':[3,4,5,6,7], 'min_samples_leaf':[1,2,3,4,5,6]} clf = GridSearchCV(rf_model, parameters,cv=10) clf.fit(x_train,y_train) clf.best_params_ # {'max_depth': 5, # 'min_samples_leaf': 4, # 'min_samples_split': 3, # 'n_estimators': 70} # Decision trees frequently perform well on imbalanced data. so using RandomForest uses bagging of n_trees will be a better idea. rf_model = RandomForestClassifier(n_estimators=80, max_depth=6, min_samples_leaf=2, min_samples_split=5) rf_model.fit(x_train,y_train) y_pred_rf = rf_model.predict(x_test) y_pred_rf_train = rf_model.predict(x_train) #res(y_pred_rf,y_pred_rf_train) cnf_matrix = confusion_matrix(y_test, y_pred_rf) plt.figure() plot_confusion_matrix(cnf_matrix, classes=['Non_Default','Default'], normalize=False, title='Non Normalized confusion matrix') print("Recall score:"+ str(recall_score(y_test, y_pred_rf))) ``` # KNN model ``` # finding the K value error = [] for i in range(1,21,2): knn = KNeighborsClassifier(n_neighbors=i) knn.fit(x_train,y_train) preds = knn.predict(x_test) error.append(np.mean(preds!=y_test)) plt.plot(range(1,21,2), error, linestyle = 'dashed', marker ='o', mfc= 'red') # By using the elbow graph we can see that the k=5 will perform better in the first place so impute k = 5 knn_model = KNeighborsClassifier(n_neighbors=5) knn_model.fit(x_train,y_train) y_pred_knn = knn_model.predict(x_test) y_pred_knn_train = knn_model.predict(x_train) res(y_pred_knn,y_pred_knn_train) ``` # SVM Model ``` # use penalized learning algorithms that increase the cost of classification mistakes on the minority class. svm_model = SVC(class_weight='balanced', probability=True) svm_model.fit(x_train,y_train) y_pred_svm = svm_model.predict(x_test) y_pred_svm_train = svm_model.predict(x_train) res(y_pred_svm,y_pred_svm_train) # we can see in SVM that our recall of target variable is 0.56 which is the best we ever predicted. ``` # Naive Bayes ``` nb_model = GaussianNB() nb_model.fit(x_train,y_train) y_pred_nb = nb_model.predict(x_test) y_pred_nb_train = nb_model.predict(x_train) res(y_pred_nb,y_pred_nb_train) # But here Naive bayes out performs every other model though over accuracy is acceptable, checkout the recall ``` # Boosting model XGB Classifier ``` from xgboost import XGBClassifier xgb_model = XGBClassifier() xgb_model.fit(x_train, y_train) xgb_y_predict = xgb_model.predict(x_test) xgb_y_predict_train = xgb_model.predict(x_train) res(xgb_y_predict,xgb_y_predict_train) # Even Boosting technique gives low recall for our target variable # So from the above model we can conclude that the data imbalance is playing a major part # Hence we try to fix that by doing ReSample techniques ``` # Random under-sampling ### Let’s apply some of these resampling techniques, using the Python library imbalanced-learn. ``` from collections import Counter from imblearn.under_sampling import RandomUnderSampler from imblearn.over_sampling import RandomOverSampler from imblearn.under_sampling import TomekLinks x= data2.drop(['def_pay'],1) y = data2['def_pay'] rus = RandomUnderSampler(random_state=1) x_rus, y_rus = rus.fit_resample(x,y) print('original dataset shape:', Counter(y)) print('Resample dataset shape', Counter(y_rus)) x_train,x_test, y_train, y_test = train_test_split(x_rus,y_rus,test_size=0.20, random_state=1) x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) # again we try to predict using Random Forest rf_model_rus = RandomForestClassifier(n_estimators=70, max_depth=5, min_samples_leaf=4, min_samples_split=3,random_state=1) rf_model_rus.fit(x_train,y_train) y_pred_rf_rus = rf_model_rus.predict(x_test) y_pred_rf_rus_train = rf_model_rus.predict(x_train) res(y_pred_rf_rus,y_pred_rf_rus_train) ``` # Random over-sampling ``` x= data2.drop(['def_pay'],1) y = data2['def_pay'] ros = RandomOverSampler(random_state=42) x_ros, y_ros = ros.fit_resample(x, y) print('Original dataset shape', Counter(y)) print('Resample dataset shape', Counter(y_ros)) x_train,x_test, y_train, y_test = train_test_split(x_ros,y_ros,test_size=0.20, random_state=1) x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) rf_model_ros = RandomForestClassifier(n_estimators=70, max_depth=5, min_samples_leaf=4, min_samples_split=3,random_state=1) rf_model_ros.fit(x_train,y_train) y_pred_rf_ros = rf_model_ros.predict(x_test) y_pred_rf_ros_train = rf_model_ros.predict(x_train) res(y_pred_rf_ros,y_pred_rf_ros_train) ``` # Under-sampling: Tomek links ``` x= data2.drop(['def_pay'],1) y = data2['def_pay'] tl = TomekLinks(sampling_strategy='majority') x_tl, y_tl = tl.fit_resample(x,y) print('Original dataset shape', Counter(y)) print('Resample dataset shape', Counter(y_tl)) x_train,x_test, y_train, y_test = train_test_split(x_tl,y_tl,test_size=0.20, random_state=1) x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) rf_model_tl = RandomForestClassifier(n_estimators=70, max_depth=5, min_samples_leaf=4, min_samples_split=3,random_state=1) rf_model_tl.fit(x_train,y_train) y_pred_rf_tl = rf_model_tl.predict(x_test) y_pred_rf_tl_train = rf_model_tl.predict(x_train) res(y_pred_rf_tl,y_pred_rf_tl_train) ``` # Synthetic Minority Oversampling Technique (SMOTE) ``` from imblearn.over_sampling import SMOTE smote = SMOTE() x_smote, y_smote = smote.fit_resample(x, y) print('Original dataset shape', Counter(y)) print('Resample dataset shape', Counter(y_smote)) x_train,x_test, y_train, y_test = train_test_split(x_smote,y_smote,test_size=0.20, random_state=1) x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) x_train = pd.DataFrame(x_train).fillna(0) x_test = pd.DataFrame(x_test).fillna(0) rf_model_smote = RandomForestClassifier(n_estimators=70, max_depth=5, min_samples_leaf=4, min_samples_split=3,random_state=1) rf_model_smote.fit(x_train,y_train) y_pred_rf_smote = rf_model_smote.predict(x_test) y_pred_rf_smote_train = rf_model_smote.predict(x_train) res(y_pred_rf_smote,y_pred_rf_smote_train) ``` ### Finally using SMOTE we can see our accuracy as well as recall and precision ratio are give equal ratio ### Though all the above models performs well, based on the accuracy but in a imbalance dataset like this, #### we actually prefer to change the performance metrics ### We can get better result when we do SVM and Naive bayes with our original data ### Even we dont have any variance in the model nor to much of bias ### But when we do over or Under sample the date the other metrics like sensity and specificity was better ### Hence we can conclue that if we use resample technique we will get better result
github_jupyter
# Sequence to Sequence Learning :label:`sec_seq2seq` As we have seen in :numref:`sec_machine_translation`, in machine translation both the input and output are a variable-length sequence. To address this type of problem, we have designed a general encoder-decoder architecture in :numref:`sec_encoder-decoder`. In this section, we will use two RNNs to design the encoder and the decoder of this architecture and apply it to *sequence to sequence* learning for machine translation :cite:`Sutskever.Vinyals.Le.2014,Cho.Van-Merrienboer.Gulcehre.ea.2014`. Following the design principle of the encoder-decoder architecture, the RNN encoder can take a variable-length sequence as the input and transforms it into a fixed-shape hidden state. In other words, information of the input (source) sequence is *encoded* in the hidden state of the RNN encoder. To generate the output sequence token by token, a separate RNN decoder can predict the next token based on what tokens have been seen (such as in language modeling) or generated, together with the encoded information of the input sequence. :numref:`fig_seq2seq` illustrates how to use two RNNs for sequence to sequence learning in machine translation. ![Sequence to sequence learning with an RNN encoder and an RNN decoder.](../img/seq2seq.svg) :label:`fig_seq2seq` In :numref:`fig_seq2seq`, the special "&lt;eos&gt;" token marks the end of the sequence. The model can stop making predictions once this token is generated. At the initial time step of the RNN decoder, there are two special design decisions. First, the special beginning-of-sequence "&lt;bos&gt;" token is an input. Second, the final hidden state of the RNN encoder is used to initiate the hidden state of the decoder. In designs such as :cite:`Sutskever.Vinyals.Le.2014`, this is exactly how the encoded input sequence information is fed into the decoder for generating the output (target) sequence. In some other designs such as :cite:`Cho.Van-Merrienboer.Gulcehre.ea.2014`, the final hidden state of the encoder is also fed into the decoder as part of the inputs at every time step as shown in :numref:`fig_seq2seq`. Similar to the training of language models in :numref:`sec_language_model`, we can allow the labels to be the original output sequence, shifted by one token: "&lt;bos&gt;", "Ils", "regardent", "." $\rightarrow$ "Ils", "regardent", ".", "&lt;eos&gt;". In the following, we will explain the design of :numref:`fig_seq2seq` in greater detail. We will train this model for machine translation on the English-French dataset as introduced in :numref:`sec_machine_translation`. ``` import collections import math from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn, rnn from d2l import mxnet as d2l npx.set_np() ``` ## Encoder Technically speaking, the encoder transforms an input sequence of variable length into a fixed-shape *context variable* $\mathbf{c}$, and encodes the input sequence information in this context variable. As depicted in :numref:`fig_seq2seq`, we can use an RNN to design the encoder. Let us consider a sequence example (batch size: 1). Suppose that the input sequence is $x_1, \ldots, x_T$, such that $x_t$ is the $t^{\mathrm{th}}$ token in the input text sequence. At time step $t$, the RNN transforms the input feature vector $\mathbf{x}_t$ for $x_t$ and the hidden state $\mathbf{h} _{t-1}$ from the previous time step into the current hidden state $\mathbf{h}_t$. We can use a function $f$ to express the transformation of the RNN's recurrent layer: $$\mathbf{h}_t = f(\mathbf{x}_t, \mathbf{h}_{t-1}). $$ In general, the encoder transforms the hidden states at all the time steps into the context variable through a customized function $q$: $$\mathbf{c} = q(\mathbf{h}_1, \ldots, \mathbf{h}_T).$$ For example, when choosing $q(\mathbf{h}_1, \ldots, \mathbf{h}_T) = \mathbf{h}_T$ such as in :numref:`fig_seq2seq`, the context variable is just the hidden state $\mathbf{h}_T$ of the input sequence at the final time step. So far we have used a unidirectional RNN to design the encoder, where a hidden state only depends on the input subsequence at and before the time step of the hidden state. We can also construct encoders using bidirectional RNNs. In this case, a hidden state depends on the subsequence before and after the time step (including the input at the current time step), which encodes the information of the entire sequence. Now let us [**implement the RNN encoder**]. Note that we use an *embedding layer* to obtain the feature vector for each token in the input sequence. The weight of an embedding layer is a matrix whose number of rows equals to the size of the input vocabulary (`vocab_size`) and number of columns equals to the feature vector's dimension (`embed_size`). For any input token index $i$, the embedding layer fetches the $i^{\mathrm{th}}$ row (starting from 0) of the weight matrix to return its feature vector. Besides, here we choose a multilayer GRU to implement the encoder. ``` #@save class Seq2SeqEncoder(d2l.Encoder): """The RNN encoder for sequence to sequence learning.""" def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0, **kwargs): super(Seq2SeqEncoder, self).__init__(**kwargs) # Embedding layer self.embedding = nn.Embedding(vocab_size, embed_size) self.rnn = rnn.GRU(num_hiddens, num_layers, dropout=dropout) def forward(self, X, *args): # The output `X` shape: (`batch_size`, `num_steps`, `embed_size`) X = self.embedding(X) # In RNN models, the first axis corresponds to time steps X = X.swapaxes(0, 1) state = self.rnn.begin_state(batch_size=X.shape[1], ctx=X.ctx) output, state = self.rnn(X, state) # `output` shape: (`num_steps`, `batch_size`, `num_hiddens`) # `state[0]` shape: (`num_layers`, `batch_size`, `num_hiddens`) return output, state ``` The returned variables of recurrent layers have been explained in :numref:`sec_rnn-concise`. Let us still use a concrete example to [**illustrate the above encoder implementation.**] Below we instantiate a two-layer GRU encoder whose number of hidden units is 16. Given a minibatch of sequence inputs `X` (batch size: 4, number of time steps: 7), the hidden states of the last layer at all the time steps (`output` return by the encoder's recurrent layers) are a tensor of shape (number of time steps, batch size, number of hidden units). ``` encoder = Seq2SeqEncoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2) encoder.initialize() X = np.zeros((4, 7)) output, state = encoder(X) output.shape ``` Since a GRU is employed here, the shape of the multilayer hidden states at the final time step is (number of hidden layers, batch size, number of hidden units). If an LSTM is used, memory cell information will also be contained in `state`. ``` len(state), state[0].shape ``` ## [**Decoder**] :label:`sec_seq2seq_decoder` As we just mentioned, the context variable $\mathbf{c}$ of the encoder's output encodes the entire input sequence $x_1, \ldots, x_T$. Given the output sequence $y_1, y_2, \ldots, y_{T'}$ from the training dataset, for each time step $t'$ (the symbol differs from the time step $t$ of input sequences or encoders), the probability of the decoder output $y_{t'}$ is conditional on the previous output subsequence $y_1, \ldots, y_{t'-1}$ and the context variable $\mathbf{c}$, i.e., $P(y_{t'} \mid y_1, \ldots, y_{t'-1}, \mathbf{c})$. To model this conditional probability on sequences, we can use another RNN as the decoder. At any time step $t^\prime$ on the output sequence, the RNN takes the output $y_{t^\prime-1}$ from the previous time step and the context variable $\mathbf{c}$ as its input, then transforms them and the previous hidden state $\mathbf{s}_{t^\prime-1}$ into the hidden state $\mathbf{s}_{t^\prime}$ at the current time step. As a result, we can use a function $g$ to express the transformation of the decoder's hidden layer: $$\mathbf{s}_{t^\prime} = g(y_{t^\prime-1}, \mathbf{c}, \mathbf{s}_{t^\prime-1}).$$ :eqlabel:`eq_seq2seq_s_t` After obtaining the hidden state of the decoder, we can use an output layer and the softmax operation to compute the conditional probability distribution $P(y_{t^\prime} \mid y_1, \ldots, y_{t^\prime-1}, \mathbf{c})$ for the output at time step $t^\prime$. Following :numref:`fig_seq2seq`, when implementing the decoder as follows, we directly use the hidden state at the final time step of the encoder to initialize the hidden state of the decoder. This requires that the RNN encoder and the RNN decoder have the same number of layers and hidden units. To further incorporate the encoded input sequence information, the context variable is concatenated with the decoder input at all the time steps. To predict the probability distribution of the output token, a fully-connected layer is used to transform the hidden state at the final layer of the RNN decoder. ``` class Seq2SeqDecoder(d2l.Decoder): """The RNN decoder for sequence to sequence learning.""" def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0, **kwargs): super(Seq2SeqDecoder, self).__init__(**kwargs) self.embedding = nn.Embedding(vocab_size, embed_size) self.rnn = rnn.GRU(num_hiddens, num_layers, dropout=dropout) self.dense = nn.Dense(vocab_size, flatten=False) def init_state(self, enc_outputs, *args): return enc_outputs[1] def forward(self, X, state): # The output `X` shape: (`num_steps`, `batch_size`, `embed_size`) X = self.embedding(X).swapaxes(0, 1) # `context` shape: (`batch_size`, `num_hiddens`) context = state[0][-1] # Broadcast `context` so it has the same `num_steps` as `X` context = np.broadcast_to(context, ( X.shape[0], context.shape[0], context.shape[1])) X_and_context = np.concatenate((X, context), 2) output, state = self.rnn(X_and_context, state) output = self.dense(output).swapaxes(0, 1) # `output` shape: (`batch_size`, `num_steps`, `vocab_size`) # `state[0]` shape: (`num_layers`, `batch_size`, `num_hiddens`) return output, state ``` To [**illustrate the implemented decoder**], below we instantiate it with the same hyperparameters from the aforementioned encoder. As we can see, the output shape of the decoder becomes (batch size, number of time steps, vocabulary size), where the last dimension of the tensor stores the predicted token distribution. ``` decoder = Seq2SeqDecoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2) decoder.initialize() state = decoder.init_state(encoder(X)) output, state = decoder(X, state) output.shape, len(state), state[0].shape ``` To summarize, the layers in the above RNN encoder-decoder model are illustrated in :numref:`fig_seq2seq_details`. ![Layers in an RNN encoder-decoder model.](../img/seq2seq-details.svg) :label:`fig_seq2seq_details` ## Loss Function At each time step, the decoder predicts a probability distribution for the output tokens. Similar to language modeling, we can apply softmax to obtain the distribution and calculate the cross-entropy loss for optimization. Recall :numref:`sec_machine_translation` that the special padding tokens are appended to the end of sequences so sequences of varying lengths can be efficiently loaded in minibatches of the same shape. However, prediction of padding tokens should be excluded from loss calculations. To this end, we can use the following `sequence_mask` function to [**mask irrelevant entries with zero values**] so later multiplication of any irrelevant prediction with zero equals to zero. For example, if the valid length of two sequences excluding padding tokens are one and two, respectively, the remaining entries after the first one and the first two entries are cleared to zeros. ``` X = np.array([[1, 2, 3], [4, 5, 6]]) npx.sequence_mask(X, np.array([1, 2]), True, axis=1) ``` (**We can also mask all the entries across the last few axes.**) If you like, you may even specify to replace such entries with a non-zero value. ``` X = np.ones((2, 3, 4)) npx.sequence_mask(X, np.array([1, 2]), True, value=-1, axis=1) ``` Now we can [**extend the softmax cross-entropy loss to allow the masking of irrelevant predictions.**] Initially, masks for all the predicted tokens are set to one. Once the valid length is given, the mask corresponding to any padding token will be cleared to zero. In the end, the loss for all the tokens will be multipled by the mask to filter out irrelevant predictions of padding tokens in the loss. ``` #@save class MaskedSoftmaxCELoss(gluon.loss.SoftmaxCELoss): """The softmax cross-entropy loss with masks.""" # `pred` shape: (`batch_size`, `num_steps`, `vocab_size`) # `label` shape: (`batch_size`, `num_steps`) # `valid_len` shape: (`batch_size`,) def forward(self, pred, label, valid_len): # `weights` shape: (`batch_size`, `num_steps`, 1) weights = np.expand_dims(np.ones_like(label), axis=-1) weights = npx.sequence_mask(weights, valid_len, True, axis=1) return super(MaskedSoftmaxCELoss, self).forward(pred, label, weights) ``` For [**a sanity check**], we can create three identical sequences. Then we can specify that the valid lengths of these sequences are 4, 2, and 0, respectively. As a result, the loss of the first sequence should be twice as large as that of the second sequence, while the third sequence should have a zero loss. ``` loss = MaskedSoftmaxCELoss() loss(np.ones((3, 4, 10)), np.ones((3, 4)), np.array([4, 2, 0])) ``` ## [**Training**] :label:`sec_seq2seq_training` In the following training loop, we concatenate the special beginning-of-sequence token and the original output sequence excluding the final token as the input to the decoder, as shown in :numref:`fig_seq2seq`. This is called *teacher forcing* because the original output sequence (token labels) is fed into the decoder. Alternatively, we could also feed the *predicted* token from the previous time step as the current input to the decoder. ``` #@save def train_seq2seq(net, data_iter, lr, num_epochs, tgt_vocab, device): """Train a model for sequence to sequence.""" net.initialize(init.Xavier(), force_reinit=True, ctx=device) trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr}) loss = MaskedSoftmaxCELoss() animator = d2l.Animator(xlabel='epoch', ylabel='loss', xlim=[10, num_epochs]) for epoch in range(num_epochs): timer = d2l.Timer() metric = d2l.Accumulator(2) # Sum of training loss, no. of tokens for batch in data_iter: X, X_valid_len, Y, Y_valid_len = [ x.as_in_ctx(device) for x in batch] bos = np.array( [tgt_vocab['<bos>']] * Y.shape[0], ctx=device).reshape(-1, 1) dec_input = np.concatenate([bos, Y[:, :-1]], 1) # Teacher forcing with autograd.record(): Y_hat, _ = net(X, dec_input, X_valid_len) l = loss(Y_hat, Y, Y_valid_len) l.backward() d2l.grad_clipping(net, 1) num_tokens = Y_valid_len.sum() trainer.step(num_tokens) metric.add(l.sum(), num_tokens) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, (metric[0] / metric[1],)) print(f'loss {metric[0] / metric[1]:.3f}, {metric[1] / timer.stop():.1f} ' f'tokens/sec on {str(device)}') ``` Now we can [**create and train an RNN encoder-decoder model**] for sequence to sequence learning on the machine translation dataset. ``` embed_size, num_hiddens, num_layers, dropout = 32, 32, 2, 0.1 batch_size, num_steps = 64, 10 lr, num_epochs, device = 0.005, 300, d2l.try_gpu() train_iter, src_vocab, tgt_vocab = d2l.load_data_nmt(batch_size, num_steps) encoder = Seq2SeqEncoder( len(src_vocab), embed_size, num_hiddens, num_layers, dropout) decoder = Seq2SeqDecoder( len(tgt_vocab), embed_size, num_hiddens, num_layers, dropout) net = d2l.EncoderDecoder(encoder, decoder) train_seq2seq(net, train_iter, lr, num_epochs, tgt_vocab, device) ``` ## [**Prediction**] To predict the output sequence token by token, at each decoder time step the predicted token from the previous time step is fed into the decoder as an input. Similar to training, at the initial time step the beginning-of-sequence ("&lt;bos&gt;") token is fed into the decoder. This prediction process is illustrated in :numref:`fig_seq2seq_predict`. When the end-of-sequence ("&lt;eos&gt;") token is predicted, the prediction of the output sequence is complete. ![Predicting the output sequence token by token using an RNN encoder-decoder.](../img/seq2seq-predict.svg) :label:`fig_seq2seq_predict` We will introduce different strategies for sequence generation in :numref:`sec_beam-search`. ``` #@save def predict_seq2seq(net, src_sentence, src_vocab, tgt_vocab, num_steps, device, save_attention_weights=False): """Predict for sequence to sequence.""" src_tokens = src_vocab[src_sentence.lower().split(' ')] + [ src_vocab['<eos>']] enc_valid_len = np.array([len(src_tokens)], ctx=device) src_tokens = d2l.truncate_pad(src_tokens, num_steps, src_vocab['<pad>']) # Add the batch axis enc_X = np.expand_dims(np.array(src_tokens, ctx=device), axis=0) enc_outputs = net.encoder(enc_X, enc_valid_len) dec_state = net.decoder.init_state(enc_outputs, enc_valid_len) # Add the batch axis dec_X = np.expand_dims(np.array([tgt_vocab['<bos>']], ctx=device), axis=0) output_seq, attention_weight_seq = [], [] for _ in range(num_steps): Y, dec_state = net.decoder(dec_X, dec_state) # We use the token with the highest prediction likelihood as the input # of the decoder at the next time step dec_X = Y.argmax(axis=2) pred = dec_X.squeeze(axis=0).astype('int32').item() # Save attention weights (to be covered later) if save_attention_weights: attention_weight_seq.append(net.decoder.attention_weights) # Once the end-of-sequence token is predicted, the generation of the # output sequence is complete if pred == tgt_vocab['<eos>']: break output_seq.append(pred) return ' '.join(tgt_vocab.to_tokens(output_seq)), attention_weight_seq ``` ## Evaluation of Predicted Sequences We can evaluate a predicted sequence by comparing it with the label sequence (the ground-truth). BLEU (Bilingual Evaluation Understudy), though originally proposed for evaluating machine translation results :cite:`Papineni.Roukos.Ward.ea.2002`, has been extensively used in measuring the quality of output sequences for different applications. In principle, for any $n$-grams in the predicted sequence, BLEU evaluates whether this $n$-grams appears in the label sequence. Denote by $p_n$ the precision of $n$-grams, which is the ratio of the number of matched $n$-grams in the predicted and label sequences to the number of $n$-grams in the predicted sequence. To explain, given a label sequence $A$, $B$, $C$, $D$, $E$, $F$, and a predicted sequence $A$, $B$, $B$, $C$, $D$, we have $p_1 = 4/5$, $p_2 = 3/4$, $p_3 = 1/3$, and $p_4 = 0$. Besides, let $\mathrm{len}_{\text{label}}$ and $\mathrm{len}_{\text{pred}}$ be the numbers of tokens in the label sequence and the predicted sequence, respectively. Then, BLEU is defined as $$ \exp\left(\min\left(0, 1 - \frac{\mathrm{len}_{\text{label}}}{\mathrm{len}_{\text{pred}}}\right)\right) \prod_{n=1}^k p_n^{1/2^n},$$ :eqlabel:`eq_bleu` where $k$ is the longest $n$-grams for matching. Based on the definition of BLEU in :eqref:`eq_bleu`, whenever the predicted sequence is the same as the label sequence, BLEU is 1. Moreover, since matching longer $n$-grams is more difficult, BLEU assigns a greater weight to a longer $n$-gram precision. Specifically, when $p_n$ is fixed, $p_n^{1/2^n}$ increases as $n$ grows (the original paper uses $p_n^{1/n}$). Furthermore, since predicting shorter sequences tends to obtain a higher $p_n$ value, the coefficient before the multiplication term in :eqref:`eq_bleu` penalizes shorter predicted sequences. For example, when $k=2$, given the label sequence $A$, $B$, $C$, $D$, $E$, $F$ and the predicted sequence $A$, $B$, although $p_1 = p_2 = 1$, the penalty factor $\exp(1-6/2) \approx 0.14$ lowers the BLEU. We [**implement the BLEU measure**] as follows. ``` def bleu(pred_seq, label_seq, k): #@save """Compute the BLEU.""" pred_tokens, label_tokens = pred_seq.split(' '), label_seq.split(' ') len_pred, len_label = len(pred_tokens), len(label_tokens) score = math.exp(min(0, 1 - len_label / len_pred)) for n in range(1, k + 1): num_matches, label_subs = 0, collections.defaultdict(int) for i in range(len_label - n + 1): label_subs[' '.join(label_tokens[i: i + n])] += 1 for i in range(len_pred - n + 1): if label_subs[' '.join(pred_tokens[i: i + n])] > 0: num_matches += 1 label_subs[' '.join(pred_tokens[i: i + n])] -= 1 score *= math.pow(num_matches / (len_pred - n + 1), math.pow(0.5, n)) return score ``` In the end, we use the trained RNN encoder-decoder to [**translate a few English sentences into French**] and compute the BLEU of the results. ``` engs = ['go .', "i lost .", 'he\'s calm .', 'i\'m home .'] fras = ['va !', 'j\'ai perdu .', 'il est calme .', 'je suis chez moi .'] for eng, fra in zip(engs, fras): translation, attention_weight_seq = predict_seq2seq( net, eng, src_vocab, tgt_vocab, num_steps, device) print(f'{eng} => {translation}, bleu {bleu(translation, fra, k=2):.3f}') ``` ## Summary * Following the design of the encoder-decoder architecture, we can use two RNNs to design a model for sequence to sequence learning. * When implementing the encoder and the decoder, we can use multilayer RNNs. * We can use masks to filter out irrelevant computations, such as when calculating the loss. * In encoder-decoder training, the teacher forcing approach feeds original output sequences (in contrast to predictions) into the decoder. * BLEU is a popular measure for evaluating output sequences by matching $n$-grams between the predicted sequence and the label sequence. ## Exercises 1. Can you adjust the hyperparameters to improve the translation results? 1. Rerun the experiment without using masks in the loss calculation. What results do you observe? Why? 1. If the encoder and the decoder differ in the number of layers or the number of hidden units, how can we initialize the hidden state of the decoder? 1. In training, replace teacher forcing with feeding the prediction at the previous time step into the decoder. How does this influence the performance? 1. Rerun the experiment by replacing GRU with LSTM. 1. Are there any other ways to design the output layer of the decoder? [Discussions](https://discuss.d2l.ai/t/345)
github_jupyter
# Seaborn In Action Seaborn is a data visualization library that is based on **Matplotlib**. It is tightly integrated with Pandas library and provides a high level interface for making attractive and informative statistical graphics in Python. This Notebook introduces the basic and essential functions in the seaborn library. Lets go ahead and import the relevant libraries for this tutorials ``` import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns sns.set() ?sns.relplot ``` ## Loading the Data and Inspection ``` cs = pd.read_csv('data/c_scores.csv') cs cs.sample(5) cs.info() ``` ## Scatter Plots We shall plot the **age** and **credit_amount** columns using the **jointplot** function. ``` sns.jointplot(x='age', y='credit_amount', data=cs) ``` Let's plot the **age** and **credit_amount** again but this time let's break that with **job**. For this, we shall used the **relplot()**. This functions provides access to several axes-level functions that show the relationship between two variables which can also come with semantic mappings. It also come with the **kind** parameter which can be used to specify whether you want a **lineplot** or **scatterplot**. The default is **scatterplot**. The visualization below shows the relation between credit amount given to people and their ages. In addition, I am comparing it over the kind of job. Seaborn uses coloring to show which of the points represent what kind of job. The **height** and the **aspect** parameter is used to adjust the height and width of the FacetGrid. The **hue** parameter helps to group variables that will produce element with different colors. **data** parameter represents the dataset of interest. ``` sns.relplot(x="age", y="credit_amount", height= 8, aspect=1, hue="job", data=cs) ``` We can also plot the above visualization where we compare what it looks like over two or more categorical elements. For example, in the below visualization, we shall compare the above visualization over **class** using the **col** parameter in the **relplot()** function. ``` sns.relplot(x="age", y="credit_amount", height= 8, aspect=1, hue="job", col='class',data=cs) ``` ## Boxplots A boxplot is used to show the distribution of numerical variables and facilitates comparisons across multiple categorical variables. We would like to visualize the distribution of **age** of the customers with respect to **class**. ``` sns.boxplot(x='class', y='age', data=cs) ``` Let's visualize the distribution of **credit_amount** with respect to **purpose** using **class** as the **hue** ``` fig, ax = plt.subplots(figsize=(18,7)) sns.boxplot(x='purpose', y='credit_amount', hue='class', ax = ax, data=cs) ``` ## Histogram A histrogram represents the distribution of data by forming bins along the range of the data and drawing lines to represents the number of observations that fall within each bin. Let's plot the histogram of **credit_amount**. ``` sns.distplot(cs['credit_amount']) ``` Let's plot the histogram of the **age** ``` sns.distplot(cs['age']) ``` Let's get the histogram of the **credit_amount** of the customers, this time across the **class** dimension as a faceted histogram. ``` facet = sns.FacetGrid(cs, height=6, col='class') facet = facet.map(sns.distplot, 'credit_amount', color='r') ``` It will however be a fantastic idea to compare the distribution of **class_amount** across **class** overlaid on the same plot. ``` facet = sns.FacetGrid(cs, height=6, hue='class') facet = facet.map(sns.distplot, 'credit_amount') ``` ## Line Plots To make meaningful line plots, we are going to generate a dataframe to be used to help us understand line plots. We will randomly generate some dates from (1970) to (1970+36) over 12 months period. We will then go ahead and select the first 36 rows for the **duration** and **age** columns to form our new dataframe. ``` new_series = pd.DataFrame({'time': pd.date_range('1970-12-31', periods=36, freq='12M'), 'duration': cs['duration'].iloc[0:36], 'age': cs['age'].iloc[0:36]}) new_series.head() ``` Next, we are going to move the **duration** and the **age** columns to rows so that we can plot both on the graph. We are going to do that using the the pandas **melt()** method. The **melt()** method allows us to unpivot a dataframe from a wide to a long format, optionally leaving the identifiers set. It takes in the dataframe you want to unpivot, the **id_vars**, identifier variable (could be single column or list of columns), the **var_name**, variable name (for the variable that are going to be unpivoted), and the **value_name**, the name of the value column. ``` series = pd.melt(new_series, id_vars=['time'], var_name='Variables', value_name='values') series.sample(10) lp = sns.lineplot(x='time', y='values', hue='Variables', data=series) #Position the legend out the graph lp.legend(bbox_to_anchor=(1.02, 1), loc=2, borderaxespad=0.0); lp.set(title='Line plot of Duration and Age', xlabel='Year', ylabel='Values') ``` ## Regression Plot In the regression plotting, we are going to use the **lmplot()**. This function combines the the **regplot()** and FacetGrid. It is intended as a convenient interface to fit regression models across subsets of datasets. We will use the famous iris flower dataset for the regression plot. It is available in the seaborn module. ``` iris = sns.load_dataset('iris') iris.sample(8) ``` Let's plot the **sepal_length** vs the **sepal_withth** only ``` g = sns.lmplot(x='petal_length', y='petal_width', order=1, data=iris) g.set_axis_labels("Petal Length(mm)", "Petal Width(mm)" ) ``` Using the species, lets break the regression line with respect to the species and fit a first order regression to each species' respective data point. ``` g = sns.lmplot(x='petal_length', y='petal_width', hue='species',height=8, order=1, data=iris) g.set_axis_labels("Petal Length(mm)", "Petal Width(mm)" ) ``` Now, let's use the **species** as the **col**, column parameter ``` g = sns.lmplot(x='petal_length', y='petal_width', col='species',height=10, order=1, data=iris) g.set_axis_labels("Petal Length(mm)", "Petal Width(mm)" ) ``` ### References 1. https://seaborn.pydata.org/index.html 2. https://www.featureranking.com/tutorials/python-tutorials/seaborn/
github_jupyter
<font size = "5"> **[Image Tools](2_Image_Tools.ipynb)** </font> <hr style="height:2px;border-top:4px solid #FF8200" /> # Selective Fourier Transform part of <font size = "4"> **pyTEMlib**, a **pycroscopy** library </font> Notebook by Gerd Duscher Materials Science & Engineering<br> Joint Institute of Advanced Materials<br> The University of Tennessee, Knoxville An introduction into Fourier Filtering of images. ## Install pyTEMlib If you have not done so in the [Introduction Notebook](_.ipynb), please test and install [pyTEMlib](https://github.com/gduscher/pyTEMlib) and other important packages with the code cell below. ``` import sys from pkg_resources import get_distribution, DistributionNotFound def test_package(package_name): """Test if package exists and returns version or -1""" try: version = (get_distribution(package_name).version) except (DistributionNotFound, ImportError) as err: version = '-1' return version # Colab setup ------------------ if 'google.colab' in sys.modules: !pip install git+https://github.com/pycroscopy/pyTEMlib/ -q # pyTEMlib setup ------------------ else: if test_package('sidpy') < '0.0.7': print('installing sidpy') !{sys.executable} -m pip install --upgrade sidpy -q if test_package('pyNSID') < '0.0.3': print('installing pyNSID') !{sys.executable} -m pip install --upgrade pyNSID -q if test_package('pyTEMlib') < '0.2022.10.1': print('installing pyTEMlib') !{sys.executable} -m pip install --upgrade pyTEMlib -q # ------------------------------ print('done') ``` ## Loading of necessary libraries Please note, that we only need to load the pyTEMlib library, which is based on sidpy Datsets. ``` %pylab notebook from matplotlib.widgets import RectangleSelector sys.path.insert(0,'../../') import pyTEMlib import pyTEMlib.file_tools as ft import pyTEMlib.image_tools as it print('pyTEMlib version: ', pyTEMlib.__version__) note_book_version = '2021.10.25' note_book_name='pyTEMib/notebooks/Imaging/Adaptive_Fourier_Filter' ``` ## Open File These datasets are stored in the pyNSID data format (extension: hf5) automatically. All results can be stored in that file. First we select the file ``` file_widget = ft.FileWidget() ``` Now, we open and plot them Select with the moue an area; rectangle will apear! ``` try: dataset.h5_dataset.file.close() except: pass dataset= ft.open_file(file_widget.file_name) print(file_widget.file_name) if dataset.data_type.name != 'IMAGE': print('We really would need an image here') dataset.plot() selector = RectangleSelector(dataset.view.axis, None,interactive=True , drawtype='box') def get_selection(dataset, extents): if (np.array(extents) <2).all(): return dataset xmin, xmax, ymin, ymax = selector.extents/(dataset.x[1]-dataset.x[0]) return dataset.like_data(dataset[int(xmin):int(xmax), int(ymin):int(ymax)]) selection = it.get_selection(dataset, selector.extents) selection.plot() ``` ## Power Spectrum of Image ``` power_spectrum = it.power_spectrum(selection, smoothing=1) power_spectrum.view_metadata() print('source: ', power_spectrum.source) power_spectrum.plot() ``` ## Spot Detection in Fourier Transform ``` # ------Input---------- spot_threshold=0.1 # --------------------- spots = it.diffractogram_spots(power_spectrum, spot_threshold=spot_threshold) spots = spots[np.linalg.norm(spots[:,:2],axis=1)<8,:] spots = spots[np.linalg.norm(spots[:,:2],axis=1)>0.5,:] power_spectrum.plot() plt.gca().scatter(spots[:,0],spots[:,1], color='red', alpha=0.4); #print(spots[:,:2]) #print(np.round(np.linalg.norm(spots[:,:2], axis=1),2)) #print(np.round(np.degrees(np.arctan2(spots[:,0], spots[:,1])+np.pi)%180,2)) angles=np.arctan2(spots[:,0], spots[:,1]) radius= np.linalg.norm(spots[:,:2], axis=1) args = angles>0 radii = radius[angles>0] angles = angles[angles>0] print(radii, np.degrees(angles)) #print(np.degrees(angles[1]-angles[0]), np.degrees(angles[2]-angles[0])) #print(1/radii) new_angles = np.round(np.degrees(angles+np.pi-angles[0]+0.0000001)%180,2) print(new_angles) print(np.degrees(angles[1]-angles[0]), np.degrees(angles[2]-angles[0])) angles=np.arctan2(spots[:,0], spots[:,1]) radius= np.linalg.norm(spots[:,:2], axis=1) args = angles>0 radii = radius[angles>0] angles = angles[angles>0] print(radii, np.degrees(angles)) # clockwise from up angles =(-np.degrees(np.arctan2(spots[:,0], spots[:,1]))+180) % 360 spots = spots[np.argsort(angles)] angles =(-np.degrees(np.arctan2(spots[:,0], spots[:,1]))+180) % 360 plane_distances = 1/np.linalg.norm(spots[:,:2],axis=1) rolled_angles= np.roll(angles,1) %360 rolled_angles[0] -= 360 relative_angles = angles - rolled_angles print(np.round(plane_distances,3)) print(np.round(relative_angles,1)) import pyTEMlib.kinematic_scattering as ks #Initialize the dictionary of the input tags_simulation = {} ### Define Crystal tags_simulation = ft.read_poscar('./POSCAR.mp-2418_PdSe2') ### Define experimental parameters: tags_simulation['acceleration_voltage_V'] = 200.0 *1000.0 #V tags_simulation['new_figure'] = False tags_simulation['plot FOV'] = 30 tags_simulation['convergence_angle_mrad'] = 0 tags_simulation['zone_hkl'] = np.array([0,0,1]) # incident neares zone axis: defines Laue Zones!!!! tags_simulation['mistilt'] = np.array([0,0,0]) # mistilt in degrees tags_simulation['Sg_max'] = .2 # 1/nm maximum allowed excitation error ; This parameter is related to the thickness tags_simulation['hkl_max'] = 6 # Highest evaluated Miller indices ###################################### # Diffraction Simulation of Crystal # ###################################### import itertools hkl_list = [list([0, 0, 0])] spot_dict = {} for hkl in itertools.product(range(6), repeat=3): if list(hkl) not in hkl_list: #print(hkl, hkl_list) tags_simulation['zone_hkl'] = hkl ks.kinematic_scattering(tags_simulation, verbose = False) if list(tags_simulation['nearest_zone_axes']['0']['hkl']) not in hkl_list: print('- ', tags_simulation['nearest_zone_axes']['0']['hkl']) spots = tags_simulation['allowed']['g'][np.linalg.norm(tags_simulation['allowed']['g'][:,:2], axis=1)<4.7,:2] angles=np.arctan2(spots[:,0], spots[:,1]) radius= np.linalg.norm(spots[:,:2], axis=1) args = angles>0 radii = radius[angles>0] angles = angles[angles>0] spot_dict[hkl] = {"radii": radii, "angles": angles} print(radii, np.degrees(angles%np.pi)) hkl_list.append(list(hkl)) spot_dict for hkl, refl in spot_dict.items(): if len(refl['radii'])>4: print(hkl, 1/refl['radii']) ``` ## Log the result ``` # results_channel = ft.log_results(dataset.h5_dataset.parent.parent, filtered_dataset) ``` A tree-like plot of the file ``` ft.h5_tree(dataset.h5_dataset.file) ``` ## Close File let's close the file but keep the filename ``` dataset.h5_dataset.file.close() ```
github_jupyter
``` import time import networkx as nx from nfp.preprocessing import features_graph import numpy as np import pandas as pd dataIni = pd.read_csv('Oads_Mo2C_catalysts_graphml.csv') dataIni['graphFileName'] = dataIni['graphFileName'].str.slice_replace(0,0,repl='Oads_Mo2C_graphml/') print(dataIni.graphFileName) # Prepare graph information for faster preprocessing def construct_graph_data(graphDF,numOfShell=2): """ Returns dict with entries 'n_atom' : number of atoms in the molecule 'n_bond' : number of bonds in the molecule 'connectivity' : (n_bond, 2) array of source atom, target atom pairs. """ dataList = [] for index,row in graphDF.iterrows(): graph = row.graphFileName s1 = graph.split("/")[-1] shortName = s1.split(".")[0] G = nx.read_graphml(graph) if numOfShell==2: nodes = ( node for node, data in G.nodes(data=True) if data.get("type") != "thirdCoordinationShell" ) G = G.subgraph(nodes) elif numOfShell==1: nodes = ( node for node, data in G.nodes(data=True) if data.get("type") != "thirdCoordinationShell" and data.get("type") != 'secondCoordinationShell' ) G = G.subgraph(nodes) n_atom = G.number_of_nodes() n_bond = 2 * G.number_of_edges() # If its an isolated atom, add a self-link if n_bond == 0: n_bond = 1 connectivity = np.zeros((n_bond, 2), dtype='int') nodeList = [] edgeList = [] revList = [] atomFeatList = [] bondFeatList = [] bond_index = 0 for n,node in enumerate(G.nodes): # Atom Classes start_index = list(G.nodes).index(node) nodeList.append(node) atomFeat = features_graph.atom_features_ver1(G.nodes[node]) atomFeatList.append(atomFeat) for m,edge in enumerate(G.edges): if node in edge: # Is the bond pointing at the target atom rev = list(G.nodes).index(list(G.edges)[m][0]) != start_index bondFeat = features_graph.bond_features_v1(G.edges[edge],flipped=rev) bondFeatList.append(bondFeat) edgeList.append(edge) revList.append(rev) # Connectivity if not rev: # Original direction connectivity[bond_index, 0] = list(G.nodes).index(list(G.edges)[m][0]) connectivity[bond_index, 1] = list(G.nodes).index(list(G.edges)[m][1]) else: # Reversed connectivity[bond_index, 0] = list(G.nodes).index(list(G.edges)[m][1]) connectivity[bond_index, 1] = list(G.nodes).index(list(G.edges)[m][0]) bond_index += 1 connectivity = connectivity.tolist() dataList.append([shortName,n_atom, n_bond, nodeList, edgeList, atomFeatList, bondFeatList, revList, connectivity]) return dataList start_time = time.time() structList = construct_graph_data(dataIni,numOfShell=2) dfGS = pd.DataFrame(structList,columns=['graphName','nAtoms','nBonds','nodes','edges','atomFeatures','bondFeatures','revBool','connectivity']) print('Finished in (s):',time.time()-start_time) #dfGS.to_csv('graph_structure_2ndNN_ini.csv.gz', index=None, compression='gzip') ```
github_jupyter
# Stage 1: Correlation for individual enhancers ``` import pandas as pd import numpy as np import time, re, datetime import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from scipy.stats import zscore import random from multiprocessing import Pool,cpu_count num_processors = cpu_count() print('Starting analysis; %d processors; %s' % (num_processors, datetime.datetime.today())) t00 =time.time() # np.random.seed(0) import sys sys.path.insert(0, '/cndd/fangming/CEMBA/snmcseq_dev') from __init__jupyterlab import * import snmcseq_utils today=datetime.datetime.today().strftime('%d-%m-%Y') use_kmers = False corr_type = 'Pearson' # corr_type = 'Spearman' features_use = 'mCG+ATAC' analysis_prefix = 'eran_model_{}'.format(features_use) output_fig = '/cndd2/fangming/projects/scf_enhancers/results/figures/{}_{{}}_{}.pdf'.format(analysis_prefix, today) output = '/cndd2/fangming/projects/scf_enhancers/results/{}_{{}}_{}'.format(analysis_prefix, today) # fn_load_prefix = 'RegressData/Regress_data_6143genes_19cells_' # fn_load_prefix = 'RegressData/Regress_data_6174genes_20cells_' fn_load_prefix = 'RegressData/Regress_data_9811genes_24cells_' # Load datasets save_vars = ['genes2enhu', 'rnau', 'df_mlevelu', 'df_atacu', 'genes'] # save_vars = ['rnau','genes'] for var in save_vars: fn = fn_load_prefix+var+'.pkl' cmd = '%s=pd.read_pickle("%s")' % (var, fn) exec(cmd) print('Loaded %s from %s' % (var, fn)) if use_kmers: with np.load(fn_load_prefix+'kmer_countsu.npz', allow_pickle=True) as x: kmer_countsu=x['kmer_countsu'] kmer_countsu = kmer_countsu/kmer_countsu.shape[1]/100 # Testing: kmer_countsu = kmer_countsu[:,:2] print('Kmers shape: ', kmer_countsu.shape) Nk=kmer_countsu.shape[1] print('Loaded kmers') else: Nk=0 # Cell type names df_cellnames = pd.read_csv( '/cndd/Public_Datasets/CEMBA/BICCN_minibrain_data/data_freeze/supp_info/clusters_final/cluster_annotation_scf_round2.tsv', sep='\t', index_col='cluster') genes2enhu = genes2enhu.iloc[[i in genes.index for i in genes2enhu['ensid']],:] genes2enhu.shape, genes2enhu.index.unique().shape celltypes = df_mlevelu.columns assert np.all(celltypes == df_atacu.columns) if (features_use=='mCG'): x = df_mlevelu.loc[genes2enhu['enh_pos'],:].to_numpy() elif (features_use=='ATAC'): x = df_atacu.loc[genes2enhu['enh_pos'],:].to_numpy() elif (features_use=='mCG_ATAC'): x1 = df_mlevelu.loc[genes2enhu['enh_pos'],:].to_numpy() x2 = df_atacu.loc[genes2enhu['enh_pos'],:].to_numpy() x = f_mcg(x1) * f_atac(x2) elif (features_use=='mCG+ATAC'): x1 = df_mlevelu.loc[genes2enhu['enh_pos'],:].to_numpy() x2 = df_atacu.loc[genes2enhu['enh_pos'],:].to_numpy() else: x = [] y = rnau.loc[genes2enhu['ensid'],:].to_numpy() print( rnau.shape, # rna by celltype df_mlevelu.shape, # enh by cell type df_atacu.shape, # enh by cell type genes.shape, # gene annotation genes2enhu.shape, # gene-enh pair x1.shape, # enh_mcg by cell type (mcg_enh for each enh-gene pair) how ? x2.shape, # enh_atac by cell type (mcg_enh for each enh-gene pair) how ? y.shape, # rna by cell type (rna for each enh-gene pair) ) def my_cc(x,y,ensid,doshuff=False,jshuff=0,corr_type='Pearson',use_abs=True, doshuffgene=False,verbose=False): """Calculate corr for each row of x and y x, y: enh_mcg/gene_rna (pair) vs celltype ensid: matched gene ensid for each row x, y contains no nan; but constant rows of x and y produces nan with zscoring """ t0=time.time() seed = int(time.time()*1e7 + jshuff) % 100 np.random.seed(seed) ngenes, ncells = y.shape print('Computing correlations for %d gene-enhancer pairs; jshuff=%d; ' % (ngenes, jshuff)) if doshuff: y = y[:,np.random.permutation(ncells)] # permute cells if doshuffgene: y = y[np.random.permutation(ngenes),:] # permute genes (pairs) if (corr_type=='Spearman'): y = np.argsort(y,axis=1) x = np.argsort(x,axis=1) xz = zscore(x, axis=1, nan_policy='propagate', ddof=0) yz = zscore(y, axis=1, nan_policy='propagate', ddof=0) xy_cc = np.nan_to_num(np.nanmean(xz*yz, axis=1)) # turn np.nan into zero xy_cc_df = pd.DataFrame(data=xy_cc, columns=['cc']) xy_cc_df['enh_num'] = np.arange(ngenes) xy_cc_df['ensid'] = ensid.values xy_cc_df['cc_abs'] = np.abs(xy_cc_df['cc']) if use_abs: # max abs_corr for each gene xy_cc_df = xy_cc_df.sort_values(['ensid','cc_abs'], ascending=[True,False]).drop_duplicates(['ensid']) else: # max corr for each gene xy_cc_df = xy_cc_df.sort_values(['ensid','cc'], ascending=[True,False]).drop_duplicates(['ensid']) best_cc = xy_cc_df['cc'] # corr (not abs) best_enh = xy_cc_df['enh_num'] # enh best_ensid = xy_cc_df['ensid'] # gene if verbose: print('t=%3.3f' % (time.time()-t0)) return best_cc,best_enh,best_ensid,xy_cc def my_cc_shuffgene(x, y, ensid, rnau, doshuff=False, jshuff=0, corr_type='Pearson', use_abs=True, doshuffgene=False, ): """ """ seed = int(time.time()*1e7 + jshuff) % 100 rnau_shuff = rnau.copy() rnau_shuff.index = rnau.index.values[ np.random.RandomState(seed=seed).permutation(len(rnau)) ] y_shuff = rnau_shuff.loc[ensid,:].to_numpy() return my_cc(x, y_shuff, ensid, doshuff, jshuff, corr_type, use_abs, doshuffgene, ) def corr_pipe(x, y, genes2enhu, rnau, corr_type,): """ """ # observed best_cc, best_enh, best_ensid, all_cc = my_cc(x,y,genes2enhu['ensid'],False,0,corr_type,True,False) print(best_cc.shape, best_enh.shape, best_ensid.shape, all_cc.shape) # shuffled nshuff = np.min((num_processors*16,128)) np.random.seed(0) with Pool(processes = num_processors) as p: best_cc_shuff_list = p.starmap(my_cc_shuffgene, [(x,y,genes2enhu['ensid'],rnau,False,jshuff,corr_type,True,False) for jshuff in range(nshuff)]) # significance alpha = 0.01; best_cc_shuff = np.hstack([b[0].values[:,np.newaxis] for b in best_cc_shuff_list]) # gene (best corr) by num_shuff best_cc_shuff_max = np.percentile(np.abs(best_cc_shuff), 100*(1-alpha), axis=1) # get 99% (robust max) across shuffles best_cc_shuff_mean = np.abs(best_cc_shuff).mean(axis=1) # get mean across shuffles for each gene sig = np.abs(best_cc).squeeze()>best_cc_shuff_max # corr greater than 99% of the shuffled fdr = (alpha*len(sig))/np.sum(sig) # fdr - alpha print(np.sum(sig), len(sig), alpha, fdr) return best_cc, best_enh, best_ensid, all_cc, best_cc_shuff, best_cc_shuff_max, best_cc_shuff_mean, sig, fdr import warnings warnings.filterwarnings('ignore') (best_cc_1, best_enh_1, best_ensid_1, all_cc_1, best_cc_shuff_1, best_cc_shuff_max_1, best_cc_shuff_mean_1, sig_1, fdr_1, ) = corr_pipe(x1, y, genes2enhu, rnau, corr_type,) (best_cc_2, best_enh_2, best_ensid_2, all_cc_2, best_cc_shuff_2, best_cc_shuff_max_2, best_cc_shuff_mean_2, sig_2, fdr_2, ) = corr_pipe(x2, y, genes2enhu, rnau, corr_type,) def plot_dists(best_cc, best_enh, best_ensid, all_cc, best_cc_shuff, best_cc_shuff_max, best_cc_shuff_mean, sig, fdr, alpha, feature): ngenes = best_cc.shape[0] fig, axs = plt.subplots(3,1,figsize=(5,10)) ax = axs[0] ax.scatter(best_cc, best_cc_shuff_mean, s=2,c=sig, cmap=ListedColormap(["gray",'red']), rasterized=True, ) ax.plot([-1,0,1],[1,0,1],'k--') ax.set_xlabel('Max %s correlation' % corr_type) ax.set_ylabel('Max %s correlation\n(Mean of shuffles)' % corr_type) ax.set_title('%s\n%d/%d=%3.1f%%\nsig. genes (p<%3.2g, FDR=%3.1f%%)' % ( feature, sig.sum(),ngenes, 100*sig.sum()/ngenes, alpha, fdr*100), ) ax = axs[1] bins = np.arange(-2,2,0.1) hist_config = { 'histtype': 'bar', 'edgecolor': 'none', 'alpha': 0.5, 'density': False, } _vec = best_cc.squeeze()/best_cc_shuff_mean.squeeze() cond_pos_sig = np.logical_and(sig, best_cc > 0) cond_neg_sig = np.logical_and(sig, best_cc <= 0) ax.hist(_vec, bins=bins, color='gray', label='All genes', **hist_config, ) ax.hist(_vec[sig], bins=bins, color='red', label='Significant', **hist_config, ) ax.axvline(-1, linestyle='--', color='k') ax.axvline(1, linestyle='--', color='k') ax.set_xlabel(corr_type+' correlation/(Mean abs. corr. of shuffles)') ax.set_ylabel('Number of genes') num_sig, num_pos_sig, num_neg_sig = (sig.sum(), cond_pos_sig.sum(), cond_neg_sig.sum(), ) ax.set_title("Num. pos={} ({:.1f}%)\nNum. neg={} ({:.1f}%)".format( num_pos_sig, num_pos_sig/num_sig*100, num_neg_sig, num_neg_sig/num_sig*100, )) ax.legend(bbox_to_anchor=(1,1)) ax = axs[2] bins = bins=np.arange(0,1,0.02) hist_config = { 'histtype': 'bar', 'edgecolor': 'none', 'alpha': 0.5, 'density': True, } ax.hist(np.abs(all_cc), bins=bins, color='C1', label='All enh-gene pairs', **hist_config, ) ax.hist(best_cc_shuff.reshape(-1,1), bins=bins, color='gray', label='Best (all shuffles)', **hist_config, ) ax.hist(best_cc_shuff_max, bins=bins, color='C2', label='Best (max. shuffle)', **hist_config, ) ax.hist(best_cc_shuff_mean, bins=bins, color='C0', label='Best (mean shuffle)', **hist_config, ) ax.hist(best_cc.squeeze(), bins=bins, color='C3', label='Best (data)', **hist_config, ) ax.legend(bbox_to_anchor=(1,1)) ax.set_xlabel(corr_type+' correlation') ax.set_ylabel('Density of genes') fig.subplots_adjust(hspace=0.9) fn_plot = output.format("genes_corr_"+feature+'_'+corr_type) snmcseq_utils.savefig(fig, fn_plot) print('Saved %s' % fn_plot) alpha = 0.01 feature = 'mCG' plot_dists(best_cc_1, best_enh_1, best_ensid_1, all_cc_1, best_cc_shuff_1, best_cc_shuff_max_1, best_cc_shuff_mean_1, sig_1, fdr_1, alpha, feature) feature = 'ATAC' plot_dists(best_cc_2, best_enh_2, best_ensid_2, all_cc_2, best_cc_shuff_2, best_cc_shuff_max_2, best_cc_shuff_mean_2, sig_2, fdr_2, alpha, feature) # np.savez( # output.format('GenesCorr_%s_%s.npz' % (features_use, today)), # best_cc=best_cc,best_enh=best_enh,best_ensid=best_ensid, # sig=sig, best_cc_shuff=best_cc_shuff) # print('Saved data; t=%3.3f; %s' % (time.time()-t00, datetime.datetime.today())) # check randomness # plt.scatter(np.arange(best_cc_shuff.shape[1]), best_cc_shuff[0]) # plt.scatter(np.arange(best_cc_shuff.shape[1]), best_cc_shuff[1]) # plt.scatter(np.arange(best_cc_shuff.shape[1]), best_cc_shuff[2]) # plt.title("num_processors = {}".format(num_processors)) # plt.xlabel('n shuffle') # plt.ylabel('corr for a gene-enh pair') genes2enhu.head() genes2enhu['cc'] = all_cc best_ensid_inv = pd.Series(best_ensid.index.values, index=best_ensid) i = best_ensid_inv.loc[genes2enhu.index].values genes2enhu['best_cc'] = genes2enhu.iloc[i,:]['cc'] i = pd.Series(np.arange(best_ensid.shape[0]), index=best_ensid) genes2enhu['best_cc_shuff_max'] = best_cc_shuff_max[i.loc[genes2enhu.index]] isig = sig[best_ensid_inv.loc[genes2enhu.index]].values genes2enhu['sig'] = (genes2enhu['cc'].abs() >= genes2enhu['best_cc_shuff_max'].abs()) genes2enhu['nonsig'] = (genes2enhu['cc'].abs() < genes2enhu['best_cc_shuff_max'].abs()) # How many enhancers are # best_cc_shuff_max nsig = genes2enhu.groupby(level=0).sum()[['sig','nonsig']] nsig['best_cc'] = best_cc.values plt.semilogy(nsig['best_cc'], nsig['sig'], '.', markersize=5); # top significant genes nsig['gene_name'] = genes2enhu.loc[nsig.index,:]['gene_name'].drop_duplicates() nsig.sort_values('sig').iloc[-10:,:] def my_cdfplot(ax, x, label=''): ax.semilogx(np.sort(np.abs(x)), np.linspace(0,1,len(x)), label='%s (%d)\nd=%3.1f±%3.1f kb' % (label, len(x), x.mean()/1000, x.std()/1000/np.sqrt(len(x)))) return fig, axs = plt.subplots(1, 2, figsize=(8,5)) ax = axs[0] hist_config = { 'histtype': 'bar', 'edgecolor': 'none', 'alpha': 1, 'density': False, } ax.hist(nsig['sig'].values, bins=np.arange(100), **hist_config ) ax.set_xlabel('Number of significant enhancers') ax.set_ylabel('Number of genes') ax.set_yscale('log') ax = axs[1] my_cdfplot(ax, nsig['sig'].values,) ax.set_xlabel('Number of significant enhancers') ax.set_ylabel('Cumulative fraction of genes') fig.tight_layout() snmcseq_utils.savefig(fig, output_fig.format('GenesCorr_NumSigEnh_%s_%s_%s.pdf' % (features_use, today, corr_type)) ) ``` # Stage 1.5: Compare ATAC and mC ``` print(all_cc_1.shape, best_cc_1.shape, sig_1.shape, best_cc_1[sig_1].shape, best_ensid_1.shape, best_enh_1.shape) # best_cc_1[sig_1] all_cc_2[best_enh_1[sig_1].index.values].shape fig, ax = plt.subplots() ax.scatter(all_cc_1, all_cc_2, color='lightgray', s=1, alpha=0.3, rasterized=True,) ax.scatter( all_cc_1[best_enh_1.index.values], # same as best_cc_1[sig_1] all_cc_2[best_enh_1.index.values], color='lightblue', label='best mCG', s=1, alpha=0.5, rasterized=True,) ax.scatter( all_cc_1[best_enh_2.index.values], # same as best_cc_1[sig_1] all_cc_2[best_enh_2.index.values], color='wheat', label='best ATAC', s=1, alpha=0.5, rasterized=True,) ax.scatter( all_cc_1[best_enh_1[sig_1].index.values], # same as best_cc_1[sig_1] all_cc_2[best_enh_1[sig_1].index.values], color='C0', label='sig. mCG', s=1, alpha=0.5, rasterized=True,) ax.scatter( all_cc_1[best_enh_2[sig_2].index.values], # same as best_cc_1[sig_1] all_cc_2[best_enh_2[sig_2].index.values], color='C1', label='sig. ATAC', s=1, alpha=0.5, rasterized=True,) ax.legend(bbox_to_anchor=(1,1)) ax.set_xlabel('mCG-RNA {} corr'.format(corr_type)) ax.set_ylabel('ATAC-RNA {} corr'.format(corr_type)) snmcseq_utils.savefig(fig, output_fig.format('mCG_ATAC_agreement_%s_%s_%s.pdf' % (features_use, today, corr_type)) ) plt.show() fig, ax = plt.subplots() ax.scatter( all_cc_1[best_enh_1.index.values], # same as best_cc_1[sig_1] all_cc_2[best_enh_1.index.values], color='lightgray', label='best', s=1, alpha=0.3, rasterized=True,) ax.scatter( all_cc_1[best_enh_2.index.values], # same as best_cc_1[sig_1] all_cc_2[best_enh_2.index.values], color='lightgray', label='best', s=1, alpha=0.5, rasterized=True,) ax.scatter( all_cc_1[best_enh_1[sig_1].index.values], # same as best_cc_1[sig_1] all_cc_2[best_enh_1[sig_1].index.values], color='C0', label='sig. mCG', s=1, alpha=0.5, rasterized=True,) ax.scatter( all_cc_1[best_enh_2[sig_2].index.values], # same as best_cc_1[sig_1] all_cc_2[best_enh_2[sig_2].index.values], color='C1', label='sig. ATAC', s=1, alpha=0.5, rasterized=True,) ax.legend(bbox_to_anchor=(1,1)) ax.set_xlabel('mCG-RNA {} corr'.format(corr_type)) ax.set_ylabel('ATAC-RNA {} corr'.format(corr_type)) snmcseq_utils.savefig(fig, output_fig.format('mCG_ATAC_agreement2_%s_%s_%s.pdf' % (features_use, today, corr_type)) ) plt.show() from matplotlib_venn import venn2 fig, ax = plt.subplots() venn2([set(best_ensid_1[sig_1].values), set(best_ensid_2[sig_2].values)], set_labels=('sig. mCG', 'sig. ATAC'), set_colors=('C0', 'C1'), ax=ax ) ax.set_title('Overlap of sig. genes') snmcseq_utils.savefig(fig, output_fig.format('mCG_ATAC_agreement3_%s_%s_%s.pdf' % (features_use, today, corr_type)) ) plt.show() fig, ax = plt.subplots() venn2([set(sig_1[sig_1].index.values), set(sig_2[sig_2].index.values)], set_labels=('sig. mCG', 'sig. ATAC'), set_colors=('C0', 'C1'), ax=ax ) ax.set_title('Overlap of sig. gene-enhancer pairs') snmcseq_utils.savefig(fig, output_fig.format('mCG_ATAC_agreement4_%s_%s_%s.pdf' % (features_use, today, corr_type)) ) plt.show() ``` # Stage 2: Regression modeling across sig. genes ``` # Are there any duplicate enhancers? _x = genes2enhu.iloc[(best_enh_1[sig_1].values),:] nenh_sig = len(_x) nenh_sig_unique = len(_x['enh_pos'].unique()) nenh_sig_genes_unique = len(_x['ensid'].unique()) print(nenh_sig, nenh_sig_unique, nenh_sig_genes_unique) # best_enh_1[sig_1] # get sig. mC enhancer-gene pairs (1 for each gene) only mc_u = df_mlevelu.loc[genes2enhu['enh_pos'],:].to_numpy()[best_enh_1[sig_1],:] atac_u = df_atacu.loc[genes2enhu['enh_pos'],:].to_numpy()[best_enh_1[sig_1],:] rna_u = rnau.loc[genes2enhu['ensid'],:].to_numpy()[best_enh_1[sig_1],:].copy() genes2enhu_u = genes2enhu.iloc[best_enh_1[sig_1],:].copy() genes2enhu_u = genes2enhu_u.drop('ensid',axis=1).reset_index() # genes2enhu.iloc[(best_enh_1[sig_1].values),:]['enh_pos'].shape # cc_mc_rna = np.array([np.corrcoef(x1,y1)[0,1] for (x1,y1) in zip(mc_u,rna_u)]) # cc_atac_rna = np.array([np.corrcoef(x1,y1)[0,1] for (x1,y1) in zip(atac_u,rna_u)]) # genes2enhu_u.loc[:,'cc_mc_rna'] = cc_mc_rna # genes2enhu_u.loc[:,'cc_atac_rna'] = cc_atac_rna # genes2enhu_u.sort_values('cc_mc_rna') # # genes2enhu_u['cc_atac_rna'] = cc_atac_rna # fig, ax = plt.subplots() # sig_pos = (genes2enhu_u['cc_mc_rna']<0) & (genes2enhu_u['cc_atac_rna']>0) # sig_neg = (genes2enhu_u['cc_mc_rna']>0) & (genes2enhu_u['cc_atac_rna']<-0) # ax.plot(cc_mc_rna, cc_atac_rna, '.', color='gray', label='%d significnat pairs' % np.sum(sig)) # ax.plot(cc_mc_rna[sig_pos], cc_atac_rna[sig_pos], 'r.', label='%d corr pairs' % np.sum(sig_pos)) # ax.plot(cc_mc_rna[sig_neg], cc_atac_rna[sig_neg], 'g.', label='%d anti-corr pairs' % np.sum(sig_neg)) # ax.set_xlabel('Correlation mCG vs. RNA') # ax.set_ylabel('Correlation ATAC vs. RNA') # ax.legend(bbox_to_anchor=(1,1)) # print('We found %d significant enhancer-gene links, covering %d unique enhancers and %d unique genes' % # (nenh_sig, nenh_sig_unique, nenh_sig_genes_unique)) # print('%d of these have the expected correlation (negative for mCG, positive for ATAC)' % # (np.sum(sig_pos))) # print('%d of these have the opposite correlation (positive for mCG, negative for ATAC)' % # (np.sum(sig_neg))) # snmcseq_utils.savefig(fig, output_fig.format( # 'EnhancerRegression_SigEnhancers_scatter_mCG_ATAC_corr_%dGenes_%dCelltypes_%s' % # (genes2enhu.ensid.unique().shape[0], len(celltypes), today) # )) # fig, ax = plt.subplots(figsize=(7,4)) # my_cdfplot(ax, genes2enhu['dtss'], label='All pairs') # my_cdfplot(ax, genes2enhu_u['dtss'], label='Best pair for each gene') # my_cdfplot(ax, genes2enhu_u['dtss'][sig_pos], label='Positive corr') # my_cdfplot(ax, genes2enhu_u['dtss'][sig_neg], label='Negative corr') # ax.legend(bbox_to_anchor=(1, 0.8)) # ax.set_xlim([1e3,3e5]) # ax.set_xlabel('Distance of enhancer from TSS') # ax.set_ylabel('Cumulative fraction') # ax.set_yticks(ticks=[0,.25,.5,.75,1]); # snmcseq_utils.savefig(fig, output_fig.format( # 'EnhancerRegression_SigEnhancers_dTSS_cdf_%dGenes_%dCelltypes_%s' % # (genes2enhu.ensid.unique().shape[0], len(celltypes), today) # )) # Ordinary linear regression with CV from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_validate from sklearn.metrics import r2_score, make_scorer from sklearn.preprocessing import PolynomialFeatures X = np.concatenate((mc_u,atac_u),axis=1).copy() y = np.log10(rna_u+1).copy() X = zscore(X, axis=0) y = zscore(y, axis=0) y = y - np.mean(y,axis=1,keepdims=True) # X = X[sig_pos,:] # y = y[sig_pos,:] mdl = LinearRegression(fit_intercept=True, normalize=True) ngenes,ncells = y.shape print('%d genes, %d celltypes' % (ngenes,ncells)) intxn_order = 3 my_r2 = make_scorer(r2_score) res_cv = {} cv = 5 for i,yi in enumerate(y.T): # Regression using only mCG and ATAC from the same cell type Xu = X[:,[i,i+ncells]] Xu = np.concatenate((X[:,[i,i+ncells]], # np.mean(X[:,:ncells],axis=1,keepdims=True), # np.mean(X[:,ncells:],axis=1,keepdims=True), ),axis=1) # Xu = PolynomialFeatures(degree=3, include_bias=False).fit_transform(Xu) res_cvi = cross_validate(mdl,Xu,yi,cv=cv, scoring=my_r2, return_train_score=True, verbose=0) if i==0: print('Simple model: %d parameters' % Xu.shape[1]) dof_simple=Xu.shape[1] for m in res_cvi: if (m in res_cv): res_cv[m] = np.vstack((res_cv[m], res_cvi[m])) else: res_cv[m]=res_cvi[m] # Regression using mCG and ATAC from the same cell type, as well as the mean across all cell types # res_cvi = cross_validate(mdl,X,yi,cv=cv, # scoring=my_r2, # return_train_score=True, # verbose=0) Xu = np.concatenate((X[:,[i,i+ncells]], np.mean(X[:,:ncells],axis=1,keepdims=True), np.mean(X[:,ncells:],axis=1,keepdims=True), ),axis=1) Xu = PolynomialFeatures(degree=intxn_order, include_bias=False).fit_transform(Xu) res_cvi = cross_validate(mdl, Xu, yi, cv=cv, scoring=my_r2, return_train_score=True, verbose=0) if i==0: print('Complex model: %d parameters' % Xu.shape[1]) dof_complex=Xu.shape[1] for m1 in res_cvi: m = m1+'_all' if (m in res_cv): res_cv[m] = np.vstack((res_cv[m], res_cvi[m1])) else: res_cv[m]=res_cvi[m1] cellnames = df_cellnames.loc[celltypes]['annot'] # Show the OLS results def myplot(ax, x, label='', fmt=''): x[x<0] = 0 # xu = np.sqrt(x) xu = x ax.errorbar(cellnames, xu.mean(axis=1), xu.std(axis=1)/np.sqrt(cv), label=label, fmt=fmt) return fig, ax = plt.subplots(figsize=(8,6)) myplot(ax, res_cv['train_score'], fmt='rs-', label='Train simple model:\nRNA~mCG+ATAC\n(%d params)' % dof_simple) myplot(ax, res_cv['test_score'], fmt='ro-', label='Test') myplot(ax, res_cv['train_score_all'], fmt='bs--', label='Train complex model:\nRNA~mCG+ATAC+mean(mCG)+mean(ATAC)+%dth order intxn\n(%d params)' % (intxn_order, dof_complex)) myplot(ax, res_cv['test_score_all'], fmt='bo--', label='Test') ax.legend(bbox_to_anchor=(1, 1)) ax.set_xlabel('Cell type') ax.set_ylabel('Score (R^2)') ax.xaxis.set_tick_params(rotation=90) ax.grid(axis='y') ax.set_title('%d genes, separate model for each of %d celltypes' % y.shape) snmcseq_utils.savefig(fig, output_fig.format( 'EnhancerRegression_SigEnhancers_OLS_%dGenes_%dCelltypes_%s' % (genes2enhu.ensid.unique().shape[0], len(celltypes), today) )) # # Multi-task LASSO regression with CV # from sklearn.linear_model import MultiTaskLassoCV # t0=time.time() # mdl = MultiTaskLassoCV(fit_intercept=True, normalize=True, cv=cv, # selection='random', # random_state=0) # X = np.concatenate((mc_u,atac_u),axis=1).copy() # y = np.log10(rna_u+1).copy() # X = zscore(X[sig_pos,:], axis=0) # y = zscore(np.log10(y[sig_pos,:]+1), axis=0) # reg = mdl.fit(X,y) # print('Done fitting LASSO, t=%3.3f s' % (time.time()-t0)) # plt.errorbar(reg.alphas_, reg.mse_path_.mean(axis=1), reg.mse_path_.std(axis=1)) # plt.vlines(reg.alpha_, plt.ylim()[0], plt.ylim()[1], 'k') # plt.xscale('log') # Single task LASSO with CV, interaction terms from sklearn.linear_model import LassoCV Xu_all = [] for i,yi in enumerate(y.T): Xu = np.concatenate((X[:,[i,i+ncells]], np.mean(X[:,:ncells],axis=1,keepdims=True), np.mean(X[:,ncells:],axis=1,keepdims=True), ),axis=1) Xu_all.append(Xu.T) Xu_all = np.dstack(Xu_all).reshape(4,-1).T Xu_fit = PolynomialFeatures(degree=intxn_order, include_bias=False) Xu_all = Xu_fit.fit_transform(Xu_all) feature_names = Xu_fit.get_feature_names(input_features=['mC','A','mCm','Am']) print(Xu_all.shape, y.shape) yu = y.ravel() print(Xu_all.shape, yu.shape) t0=time.time() mdl = LassoCV(fit_intercept=True, normalize=True, cv=cv, selection='random', random_state=0, n_jobs=8) reg = mdl.fit(Xu_all,yu) print('Done fitting LASSO, t=%3.3f s' % (time.time()-t0)) plt.errorbar(reg.alphas_, reg.mse_path_.mean(axis=1), reg.mse_path_.std(axis=1)) plt.vlines(reg.alpha_, plt.ylim()[0], plt.ylim()[1], 'k') plt.xscale('log') plt.xlabel('LASSO Regularization (lambda)') plt.ylabel('MSE') yhat = reg.predict(Xu_all).reshape(y.shape) cc = [np.corrcoef(y1,y1hat)[0,1] for (y1,y1hat) in zip(y.T,yhat.T)] fig, ax = plt.subplots(figsize=(10,5)) ax.plot(cellnames, np.power(cc, 2), 'o-', color='C1', label='LASSO fit, single model for all cell types') # myplot(ax, res_cv['test_score_all'], label='Test (RNA~mCG+ATAC+mean(mCG)+mean(ATAC)+Intxn)', fmt='o--') myplot(ax, res_cv['train_score'], fmt='rs-', label='Train simple model:\nRNA~mCG+ATAC\n(%d params)' % dof_simple) myplot(ax, res_cv['test_score'], fmt='ro-', label='Test') myplot(ax, res_cv['train_score_all'], fmt='bs--', label='Train complex model:\nRNA~mCG+ATAC+mean(mCG)+mean(ATAC)+%dth order intxn\n(%d params)' % (intxn_order, dof_complex)) myplot(ax, res_cv['test_score_all'], fmt='bo--', label='Test') ax.legend(bbox_to_anchor=(1, 0.8)) ax.set_xlabel('Cell type') ax.set_ylabel('Score (R^2)') ax.xaxis.set_tick_params(rotation=90) ax.grid(axis='y') ax.set_ylim([0,0.8]) ax.set_title('Model for %d genes across %d celltypes' % y.shape) snmcseq_utils.savefig(fig, output_fig.format( 'EnhancerRegression_SigEnhancers_CompareLASSO_%dGenes_%dCelltypes_%s' % (genes2enhu.ensid.unique().shape[0], len(celltypes), today) )) fig, ax = plt.subplots(figsize=(10,5)) show = np.abs(reg.coef_)>0.01 show = np.argsort(np.abs(reg.coef_))[-30:][::-1] ax.bar(np.array(feature_names)[show], reg.coef_[show]) ax.xaxis.set_tick_params(rotation=90) ax.set_ylabel('Regression coefficient') ax.grid(axis='y') snmcseq_utils.savefig(fig, output_fig.format( 'EnhancerRegression_SigEnhancers_LASSO_CorrCoef_%dGenes_%dCelltypes_%s' % (genes2enhu.ensid.unique().shape[0], len(celltypes), today) )) ``` # Apply the nonlinear model to all enhancer ``` mc_u = df_mlevelu.loc[genes2enhu['enh_pos'],:].to_numpy() atac_u = df_atacu.loc[genes2enhu['enh_pos'],:].to_numpy() genes2enhu_u = genes2enhu.copy() genes2enhu_u = genes2enhu_u.drop('ensid',axis=1).reset_index() rna_u = rnau.loc[genes2enhu['ensid'],:].to_numpy() rna_u.shape, mc_u.shape, atac_u.shape X = np.concatenate((mc_u,atac_u),axis=1).copy() y = np.log10(rna_u+1).copy() X = zscore(X, axis=0) y = zscore(y, axis=0) y = y - np.mean(y,axis=1,keepdims=True) X.shape, y.shape Xu_all = [] for i,yi in enumerate(y.T): Xu = np.concatenate((X[:,[i,i+ncells]], np.mean(X[:,:ncells],axis=1,keepdims=True), np.mean(X[:,ncells:],axis=1,keepdims=True), ),axis=1) Xu_all.append(Xu.T) Xu_all = np.dstack(Xu_all).reshape(4,-1).T Xu_fit = PolynomialFeatures(degree=intxn_order, include_bias=False).fit(Xu_all) feature_names = Xu_fit.get_feature_names(input_features=['mC','A','mCm','Am']) Xu_all = PolynomialFeatures(degree=intxn_order, include_bias=False).fit_transform(Xu_all) Xu_all.shape, y.shape yhat = reg.predict(Xu_all).reshape(y.shape) x = df_mlevelu.loc[genes2enhu['enh_pos'],:].to_numpy() best_cc,best_enh,best_ensid,all_cc = my_cc(-x,y,genes2enhu['ensid'],False,0,corr_type) (~np.isfinite(best_cc2)).sum() best_cc2,best_enh2,best_ensid2,all_cc2 = my_cc(yhat,y,genes2enhu['ensid'],False,0,corr_type) plt.figure(figsize=(10,10)) plt.plot(np.abs(all_cc[best_enh]), np.abs(all_cc2[best_enh]), '.', markersize=1, rasterized=True) plt.plot(np.abs(all_cc[best_enh2]), np.abs(all_cc2[best_enh2]), '.', markersize=1, rasterized=True) plt.plot([0,1],[0,1],'k') np.abs(best_cc2)/(np.abs(best_cc)+1e-6) best_cc2.shape, best_cc.shape plt.hist(np.abs(best_cc2).values/np.abs(best_cc).values, bins=np.arange(0.7,1.3,0.01)); print(np.abs(best_cc2).values/np.abs(best_cc).values.mean()) # For each gene, find all enhancers with significant cc df = pd.DataFrame(data=all_cc, columns=['cc'], index=genes2enhu[['ensid','enh_pos']]) df['ensid'] = genes2enhu['ensid'].values df['enh_pos'] = genes2enhu['enh_pos'].values df['cc2'] = all_cc2 df['good_pairs'] = df['cc']>0.6 df['good_pairs2'] = df['cc2']>0.6 npairs_df=df.groupby('ensid')[['good_pairs','good_pairs2']].sum() plt.loglog(npairs_df['good_pairs']+1,npairs_df['good_pairs2']+1,'.') plt.plot([1,1e3],[1,1e3],'k') np.mean((npairs_df['good_pairs2']+1)/(npairs_df['good_pairs']+1)) ``` # Average over all the enhancers linked to a single gene ``` def myz(x): z = zscore(x, axis=1, nan_policy='omit', ddof=0) return z def make_df(z): z_df = pd.DataFrame(data=z, columns=df_mlevelu.columns, index=rnau.index) return z_df multiEnh = {} multiEnh['rna'] = myz(rnau.values); multiEnh['rna_hat_1Enh'] = myz(yhat[best_enh2,:]) multiEnh['rna_hat_AllEnh'] = myz(yhat[best_enh2,:]) multiEnh['rna_hat_AllSigEnh'] = np.zeros(yhat[best_enh2,:].shape)+np.nan; t0=time.time() for i,c in enumerate(celltypes): df = pd.DataFrame(data=yhat[:,i], columns=['yhat']) df['ensid'] = genes2enhu.loc[:,'ensid'].values multiEnh['rna_hat_AllEnh'][:,i] = df.groupby('ensid')['yhat'].mean() df = df.loc[genes2enhu.sig.values,:] multiEnh['rna_hat_AllSigEnh'][sig,i] = df.groupby('ensid')['yhat'].mean() multiEnh['rna'] = make_df(multiEnh['rna']); multiEnh['rna_hat_1Enh'] = make_df(multiEnh['rna_hat_1Enh']); multiEnh['rna_hat_AllEnh'] = make_df(multiEnh['rna_hat_AllEnh']) multiEnh['rna_hat_AllSigEnh'] = make_df(multiEnh['rna_hat_AllSigEnh']) print(time.time()-t0) cc_1Enh = np.diag(np.corrcoef(multiEnh['rna'].values, multiEnh['rna_hat_1Enh'].values, rowvar=False)[:ncells,ncells:]) cc_AllEnh = np.diag(np.corrcoef(multiEnh['rna'].values, multiEnh['rna_hat_AllEnh'].values, rowvar=False)[:ncells,ncells:]) cc_AllSigEnh = np.diag(np.corrcoef(multiEnh['rna'].values[sig,:], multiEnh['rna_hat_AllSigEnh'].values[sig,:], rowvar=False)[:ncells,ncells:]) plt.plot(cellnames, cc_1Enh, label='1 enhancer') plt.plot(cellnames, cc_AllEnh, label='All enhancers') plt.plot(cellnames, cc_AllSigEnh, label='Significant enhancers') plt.legend(bbox_to_anchor=(1,1)) plt.xticks(rotation=90); plt.ylabel('Correlation across genes') def cc_gene(x,y): c = np.nan_to_num([np.corrcoef(x1,y1)[0,1] for (x1,y1) in zip(x,y)]) return c cc_1Enh = cc_gene(multiEnh['rna'].values, multiEnh['rna_hat_1Enh'].values) cc_AllEnh = cc_gene(multiEnh['rna'].values, multiEnh['rna_hat_AllEnh'].values) cc_AllSigEnh = cc_gene(multiEnh['rna'].values, multiEnh['rna_hat_AllSigEnh'].values) fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(2,2,1) ax.plot(np.abs(cc_1Enh), np.abs(cc_AllEnh), '.', markersize=1, rasterized=True) ax.set_xlabel('Corr with 1 best enhancer') ax.set_ylabel('Corr with avg. prediction\nbased on all enhancers') ax = fig.add_subplot(2,2,2) ax.plot(np.abs(cc_1Enh), np.abs(cc_AllSigEnh), '.', markersize=1, rasterized=True) ax.set_xlabel('Corr with 1 best enhancer') ax.set_ylabel('Corr with avg. prediction\nbased on sig. enhancers') ax = fig.add_subplot(2,1,2) bins = np.arange(-1,1,1/100) hist_config = { 'histtype': 'bar', 'edgecolor': 'none', 'alpha': 0.5, 'density': False, } ax.hist(np.abs(cc_AllEnh)-np.abs(cc_1Enh), bins=bins, label='All enhancers-Best enhancer', **hist_config, ) ax.hist(np.abs(cc_AllSigEnh)-np.abs(cc_1Enh), bins=bins, label='Sig enhancers-Best enhancer', **hist_config, ) ax.legend(bbox_to_anchor=(1,1)) ax.set_xlabel('Difference in correlation') ax.set_ylabel('Number of genes') fig.subplots_adjust(wspace=0.5, hspace=0.3) snmcseq_utils.savefig(fig, output_fig.format( 'EnhancerRegression_Correlation_1Enh_vs_AllEnh_%dGenes_%dCelltypes_%s' % (genes2enhu.ensid.unique().shape[0], len(celltypes), today)) ) ``` # Nonlinear model fitting ``` import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) X = np.concatenate((mc_u,atac_u),axis=1).copy() y = np.log10(rna_u+1).copy() ngenes,ncells = y.shape X.shape, y.shape # Define a class for the NN architecture Ngenes, Nc = y.shape Nx = X.shape[1] N1 = 128 N2 = 32 N3 = 0 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(Nx, N1); self.fc2 = nn.Linear(N1, N2); # self.fc3 = nn.Linear(N2, N3); self.fc4 = nn.Linear(N2, Nc); def forward(self, x): x = F.relu(self.fc1(x)) # Out: N x N1 x = F.relu(self.fc2(x)) # Out: N x N2 # x = F.relu(self.fc3(x)) # Out: N x N3 x = self.fc4(x) # Out: N x C return x # Initialize def myinit(): global net, optimizer, criterion, scheduler, loss_test, loss_train, test, train, ensids net = Net() net.to(device) # # Initialize the kmer weights to 0 and turn off learning # net.fc1_kmers.requires_grad_(False) # net.fc1_kmers.weight.fill_(0) # net.fc1_kmers.bias.fill_(0) criterion = nn.MSELoss(reduction='sum') optimizer = optim.Adam(net.parameters(), lr=lr) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.25) loss_test=np.array([]) loss_train = np.array([]) # Train/Test split test = (np.random.rand(Ngenes,1)<0.2) train = [not i for i in test] test = np.random.permutation(np.nonzero(test)[0]).squeeze() train = np.random.permutation(np.nonzero(train)[0]).squeeze() ensids = rnau.index.values return def train_epoch(epoch): nsamp = 0 running_loss = 0.0 running_time = 0.0 net.train() t0train = time.time() for i in range(0, len(train), batch_size): tstart = time.time() indices = train[i:i+batch_size] # Input should be of size: (batch, channels, samples) batch_X = torch.tensor(X[indices,:],dtype=torch.float) batch_y = torch.tensor(y[indices,:],dtype=torch.float) # Send training data to CUDA if device is not "cpu": batch_X = batch_X.to(device) batch_y = batch_y.to(device) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(batch_X) loss = criterion(outputs, batch_y) loss.backward() optimizer.step() running_loss += loss.item() running_time += time.time()-tstart nsamp += len(indices) if (time.time()-t0train)>5: print('Epoch %d, i=%d/%d, LR=%3.5g, loss=%3.8f, t=%3.3f, %3.5f s/sample' % (epoch, i, len(train), optimizer.state_dict()['param_groups'][0]['lr'], running_loss/nsamp, running_time, running_time/nsamp)) t0train=time.time() return running_loss/nsamp def test_epoch(epoch): net.eval() running_loss_test = 0.0 nsamp = 0 yyhat = {'y':[], 'yhat':[]} for i in range(0, len(test), batch_size): indices = test[i:i+batch_size] # Input should be of size: (batch, channels, samples) batch_X = torch.tensor(X[indices,:],dtype=torch.float) batch_y = torch.tensor(y[indices,:],dtype=torch.float) # Send training data to CUDA if device is not "cpu": batch_X = batch_X.to(device) batch_y = batch_y.to(device) # forward + backward + optimize outputs = net(batch_X) loss = criterion(outputs, batch_y) running_loss_test += loss.item() nsamp += len(indices) yyhat['yhat'].append(outputs.detach().cpu().numpy()) yyhat['y'].append(batch_y.detach().cpu().numpy()) return running_loss_test/nsamp lr = 0.0002 myinit() train.shape, test.shape import glob from IPython import display def test_net(indices): net.eval() yyhat = {'y':[], 'yhat':[]} for i in range(0, len(indices), batch_size): i = indices[i:i+batch_size] # Input should be of size: (batch, channels, samples) batch_X = torch.tensor(X[indices,:],dtype=torch.float) batch_y = torch.tensor(y[indices,:],dtype=torch.float) # Send training data to CUDA if device is not "cpu": batch_X = batch_X.to(device) outputs = net(batch_X) yyhat['yhat'].append(outputs.detach().cpu().numpy()) yyhat['y'].append(batch_y.numpy()) yyhat['yhat'] = np.concatenate(yyhat['yhat'],axis=0) yyhat['y'] = np.concatenate(yyhat['y'],axis=0) cc = np.zeros((Nc,1)) for i in range(yyhat['y'].shape[1]): cc[i,0] = np.corrcoef(yyhat['y'][:,i], yyhat['yhat'][:,i])[0,1] return yyhat, cc def make_plot1(save=False): plt.figure(figsize=(15,4)) plt.clf() plt.subplot(1,3,1) plt.semilogx(loss_train[2:],'o-',label='Train') plt.plot(loss_test[2:],'o-',label='Test') plt.legend() plt.xlabel('Epochs') plt.ylabel('Loss') plt.title(fn_save) plt.subplot(1,3,2) plt.plot(yyhat_test['y'].T, yyhat_test['yhat'].T,'.'); plt.plot([0,3],[0,3],'k--') plt.xlabel('True RNA expression') plt.ylabel('Estimated RNA expression') plt.subplot(1,3,3) plt.plot(np.arange(Nc), cc) plt.ylabel('R^2?') plt.xlabel('Cell type') plt.legend(['Train','Test']) if save: fn_plot = output_fig.format(fn_save.replace('.torch','')+'_corrcoef').replace('pdf', 'png') plt.savefig(fn_plot) print('Saved plot: '+fn_plot) plt.tight_layout() plt.show(); def make_plot2(save=False): plt.figure(figsize=(20,20)) for i in range(Nc): plt.subplot(5,6,i+1) plt.plot([0,2],[0,2],'k--') plt.plot(yyhat_train['y'][:,i], yyhat_train['yhat'][:,i],'.'); plt.plot(yyhat_test['y'][:,i], yyhat_test['yhat'][:,i],'.'); # cc = np.corrcoef(yyhat['y'][:,i], yyhat['yhat'][:,i])[0,1] plt.title('r=%3.3f train/%3.3f test' % (cc[i,0], cc[i,1])) if save: fn_plot = output_fig.format(fn_save.replace('.torch','')+'_scatter').replace('pdf', 'png') plt.savefig(fn_plot) print('Saved plot: '+fn_plot) plt.tight_layout() plt.show(); num_epochs1 = 1000 fn_id = len(glob.glob('./RegressEnh*.pt'))+1 # Generate a unique ID for this run fn_save = 'RegressEnh%0.4d_%s_N_%d_%d_%d.%s.pt' % (fn_id, ('UseKmers' if use_kmers else 'NoKmers'), N1,N2,N3,today) t0 = time.time() batch_size = 16 for epoch in range(num_epochs1): # loop over the dataset multiple times # while epoch<num_epochs1: new_loss_train = train_epoch(epoch); loss_train = np.append(loss_train, new_loss_train) new_loss_test = test_epoch(epoch); loss_test = np.append(loss_test,new_loss_test) scheduler.step(new_loss_test) print('**** Phase1 epoch %d, LR=%3.5g, loss_train=%3.8f, loss_test=%3.8f, time = %3.5f s/epoch' % ( len(loss_train), optimizer.param_groups[0]['lr'], loss_train[-1], loss_test[-1], (time.time()-t0)) ) if (time.time()-t0)>60 or (epoch==num_epochs1-1): if (epoch>0): cc = np.zeros((Nc,2)) yyhat_train, cc[:,[0]] = test_net(random.sample(train.tolist(), 500)) yyhat_test, cc[:,[1]] = test_net(random.sample(test.tolist(), 500)) display.clear_output(wait=True) display.display(plt.gcf()) make_plot1(save=True) # make_plot2(save=True) display.display(plt.gcf()) t0=time.time() torch.save({ 'epoch': epoch, 'model_state_dict': net.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss_train': loss_train, 'loss_test': loss_test, }, fn_save) print('Saved data: %s' % fn_save) output_fig # test.max() # plt.hist2d(df['log_rna'], mdl.predict(), bins=(50,50), cmap=plt.cm.Reds); # plt.scatter(df['log_rna'], mdl.predict(),s=1) plt.hist(np.log(rnau.loc[genes2enhu['ensid'][best_enh],:].iloc[:,3]+1), bins=100); ``` ### Fangming follow-ups
github_jupyter
If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets. Right now this requires the current master branch of both. Uncomment the following cell and run it. ``` #! pip install git+https://github.com/huggingface/transformers.git #! pip install git+https://github.com/huggingface/datasets.git ``` If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries. To be able to share your model with the community, there are a few more steps to follow. First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then uncomment the following cell and input your username and password (this only works on Colab, in a regular notebook, you need to do this in a terminal): ``` from huggingface_hub import notebook_login notebook_login() ``` Then you need to install Git-LFS and setup Git if you haven't already. Uncomment the following instructions and adapt with your name and email: ``` # !apt install git-lfs # !git config --global user.email "[email protected]" # !git config --global user.name "Your Name" ``` Make sure your version of Transformers is at least 4.8.1 since the functionality was introduced in that version: ``` import transformers print(transformers.__version__) ``` If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets. Uncomment the following cell and run it. # Fine-tuning a model on a multiple choice task In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model to a multiple choice task, which is the task of selecting the most plausible inputs in a given selection. The dataset used here is [SWAG](https://www.aclweb.org/anthology/D18-1009/) but you can adapt the pre-processing to any other multiple choice dataset you like, or your own data. SWAG is a dataset about commonsense reasoning, where each example describes a situation then proposes four options that could go after it. This notebook is built to run with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a version with a mutiple choice head. Depending on you model and the GPU you are using, you might need to adjust the batch size to avoid out-of-memory errors. Set those two parameters, then the rest of the notebook should run smoothly: ``` model_checkpoint = "bert-base-uncased" batch_size = 16 ``` ## Loading the dataset We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the data. This can be easily done with the functions `load_dataset`. ``` from datasets import load_dataset, load_metric ``` `load_dataset` will cache the dataset to avoid downloading it again the next time you run this cell. ``` datasets = load_dataset("swag", "regular") ``` The `dataset` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasetdict), which contains one key for the training, validation and test set (with more keys for the mismatched validation and test set in the special case of `mnli`). ``` datasets ``` To access an actual element, you need to select a split first, then give an index: ``` datasets["train"][0] ``` To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset. ``` from datasets import ClassLabel import random import pandas as pd from IPython.display import display, HTML def show_random_elements(dataset, num_examples=10): assert num_examples <= len( dataset ), "Can't pick more elements than there are in the dataset." picks = [] for _ in range(num_examples): pick = random.randint(0, len(dataset) - 1) while pick in picks: pick = random.randint(0, len(dataset) - 1) picks.append(pick) df = pd.DataFrame(dataset[picks]) for column, typ in dataset.features.items(): if isinstance(typ, ClassLabel): df[column] = df[column].transform(lambda i: typ.names[i]) display(HTML(df.to_html())) show_random_elements(datasets["train"]) ``` Each example in the dataset has a context composed of a first sentence (in the field `sent1`) and an introduction to the second sentence (in the field `sent2`). Then four possible endings are given (in the fields `ending0`, `ending1`, `ending2` and `ending3`) and the model must pick the right one (indicated in the field `label`). The following function lets us visualize a give example a bit better: ``` def show_one(example): print(f"Context: {example['sent1']}") print(f" A - {example['sent2']} {example['ending0']}") print(f" B - {example['sent2']} {example['ending1']}") print(f" C - {example['sent2']} {example['ending2']}") print(f" D - {example['sent2']} {example['ending3']}") print(f"\nGround truth: option {['A', 'B', 'C', 'D'][example['label']]}") show_one(datasets["train"][0]) show_one(datasets["train"][15]) ``` ## Preprocessing the data Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires. To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure: - we get a tokenizer that corresponds to the model architecture we want to use, - we download the vocabulary used when pretraining this specific checkpoint. That vocabulary will be cached, so it's not downloaded again the next time we run the cell. ``` from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) ``` You can directly call this tokenizer on one sentence or a pair of sentences: ``` tokenizer("Hello, this one sentence!", "And this sentence goes with it.") ``` Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested. To preprocess our dataset, we will thus need the names of the columns containing the sentence(s). The following dictionary keeps track of the correspondence task to column names: We can them write the function that will preprocess our samples. The tricky part is to put all the possible pairs of sentences in two big lists before passing them to the tokenizer, then un-flatten the result so that each example has four input ids, attentions masks, etc. When calling the `tokenizer`, we use the argument `truncation=True`. This will ensure that an input longer that what the model selected can handle will be truncated to the maximum length accepted by the model. ``` ending_names = ["ending0", "ending1", "ending2", "ending3"] def preprocess_function(examples): # Repeat each first sentence four times to go with the four possibilities of second sentences. first_sentences = [[context] * 4 for context in examples["sent1"]] # Grab all second sentences possible for each context. question_headers = examples["sent2"] second_sentences = [ [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers) ] # Flatten everything first_sentences = sum(first_sentences, []) second_sentences = sum(second_sentences, []) # Tokenize tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True) # Un-flatten return { k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items() } ``` This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists of lists for each key: a list of all examples (here 5), then a list of all choices (4) and a list of input IDs (length varying here since we did not apply any padding): ``` examples = datasets["train"][:5] features = preprocess_function(examples) print( len(features["input_ids"]), len(features["input_ids"][0]), [len(x) for x in features["input_ids"][0]], ) ``` To check we didn't do anything group when grouping all possibilites then unflattening, let's have a look at the decoded inputs for a given example: ``` idx = 3 [tokenizer.decode(features["input_ids"][idx][i]) for i in range(4)] ``` We can compare it to the ground truth: ``` show_one(datasets["train"][3]) ``` This seems alright, so we can apply this function on all the examples in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command. ``` encoded_datasets = datasets.map(preprocess_function, batched=True) ``` Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again. Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. ## Fine-tuning the model Now that our data is ready, we can download the pretrained model and fine-tune it. Since all our task is about mutliple choice, we use the `AutoModelForMultipleChoice` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us. ``` from transformers import TFAutoModelForMultipleChoice model = TFAutoModelForMultipleChoice.from_pretrained(model_checkpoint) ``` The warning is telling us we are throwing away some weights (the `vocab_transform` and `vocab_layer_norm` layers) and randomly initializing some other (the `pre_classifier` and `classifier` layers). This is absolutely normal in this case, because we are removing the head used to pretrain the model on a masked language modeling objective and replacing it with a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do. Next, we set some names and hyperparameters for the model. The first two variables are used so we can push the model to the [Hub](https://huggingface.co/models) at the end of training. Remove the two of them if you didn't follow the installation steps at the top of the notebook, otherwise you can change the value of `push_to_hub_model_id` to something you would prefer. ``` model_name = model_checkpoint.split("/")[-1] push_to_hub_model_id = f"{model_name}-finetuned-swag" learning_rate = 5e-5 batch_size = batch_size num_train_epochs = 2 weight_decay = 0.01 ``` Next we need to tell our `Dataset` how to form batches from the pre-processed inputs. We haven't done any padding yet because we will pad each batch to the maximum length inside the batch (instead of doing so with the maximum length of the whole dataset). This will be the job of the *data collator*. A data collator takes a list of examples and converts them to a batch (by, in our case, applying padding). Since there is no data collator in the library that works on our specific problem, we will write one, adapted from the `DataCollatorWithPadding`: ``` from dataclasses import dataclass from transformers.tokenization_utils_base import ( PreTrainedTokenizerBase, PaddingStrategy, ) from typing import Optional, Union import tensorflow as tf @dataclass class DataCollatorForMultipleChoice: """ Data collator that will dynamically pad the inputs for multiple choice received. """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None def __call__(self, features): label_name = "label" if "label" in features[0].keys() else "labels" labels = [feature.pop(label_name) for feature in features] batch_size = len(features) num_choices = len(features[0]["input_ids"]) flattened_features = [ [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features ] flattened_features = sum(flattened_features, []) batch = self.tokenizer.pad( flattened_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="tf", ) # Un-flatten batch = { k: tf.reshape(v, (batch_size, num_choices, -1)) for k, v in batch.items() } # Add back labels batch["labels"] = tf.convert_to_tensor(labels, dtype=tf.int64) return batch ``` When called on a list of examples, it will flatten all the inputs/attentions masks etc. in big lists that it will pass to the `tokenizer.pad` method. This will return a dictionary with big tensors (of shape `(batch_size * 4) x seq_length`) that we then unflatten. We can check this data collator works on a list of features, we just have to make sure to remove all features that are not inputs accepted by our model (something the `Trainer` will do automatically for us after): ``` accepted_keys = ["input_ids", "attention_mask", "label"] features = [ {k: v for k, v in encoded_datasets["train"][i].items() if k in accepted_keys} for i in range(10) ] batch = DataCollatorForMultipleChoice(tokenizer)(features) encoded_datasets["train"].features["attention_mask"].feature.feature ``` Again, all those flatten/un-flatten are sources of potential errors so let's make another sanity check on our inputs: ``` [tokenizer.decode(batch["input_ids"][8][i].numpy().tolist()) for i in range(4)] show_one(datasets["train"][8]) ``` All good! Now we can use this collator as a collation function for our dataset. The best way to do this is with the `to_tf_dataset()` method. This converts our dataset to a `tf.data.Dataset` that Keras can take as input. It also applies our collation function to each batch. ``` data_collator = DataCollatorForMultipleChoice(tokenizer) train_set = encoded_datasets["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], shuffle=True, batch_size=batch_size, collate_fn=data_collator, ) validation_set = encoded_datasets["validation"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], shuffle=False, batch_size=batch_size, collate_fn=data_collator, ) ``` Now we can create our model. First, we specify an optimizer. Using the `create_optimizer` function we can get a nice `AdamW` optimizer with weight decay and a learning rate decay schedule set up for free - but to compute that schedule, it needs to know how long training will take. ``` from transformers import create_optimizer total_train_steps = (len(encoded_datasets["train"]) // batch_size) * num_train_epochs optimizer, schedule = create_optimizer( init_lr=learning_rate, num_warmup_steps=0, num_train_steps=total_train_steps ) ``` All Transformers models have a `loss` output head, so we can simply leave the loss argument to `compile()` blank to train on it. ``` import tensorflow as tf model.compile(optimizer=optimizer) ``` Now we can train our model. We can also add a callback to sync up our model with the Hub - this allows us to resume training from other machines and even test the model's inference quality midway through training! Make sure to change the `username` if you do. If you don't want to do this, simply remove the callbacks argument in the call to `fit()`. ``` from transformers.keras_callbacks import PushToHubCallback username = "Rocketknight1" callback = PushToHubCallback( output_dir="./mc_model_save", tokenizer=tokenizer, hub_model_id=f"{username}/{push_to_hub_model_id}", ) model.fit( train_set, validation_data=validation_set, epochs=num_train_epochs, callbacks=[callback], ) ``` One downside of using the internal loss, however, is that we can't use Keras metrics with it. So let's compute accuracy after the fact, to see how our model is performing. First, we need to get our model's predicted answers on the validation set. ``` predictions = model.predict(validation_set)["logits"] labels = encoded_datasets["validation"]["label"] ``` And now we can compute our accuracy with Numpy. ``` import numpy as np preds = np.argmax(predictions, axis=1) print({"accuracy": (preds == labels).astype(np.float32).mean().item()}) ``` If you used the callback above, you can now share this model with all your friends, family, favorite pets: they can all load it with the identifier `"your-username/the-name-you-picked"` so for instance: ```python from transformers import AutoModelForMultipleChoice model = AutoModelForMultipleChoice.from_pretrained("your-username/my-awesome-model") ```
github_jupyter
# How to recover a known planet in Kepler data This tutorial demonstrates the basic steps required to recover a transiting planet candidate in the Kepler data. We will show how you can recover the signal of [Kepler-10b](https://en.wikipedia.org/wiki/Kepler-10b), the first rocky planet that was discovered by Kepler! Kepler-10 is a Sun-like (G-type) star approximately 600 light years away in the constellation of Cygnus. In this tutorial, we will download the pixel data of Kepler-10, extract a lightcurve, and recover the planet. Kepler pixel data is distributed in "Target Pixel Files". You can read more about them in our tutorial [here](http://lightkurve.keplerscience.org/tutorials/target-pixel-files.html). The `lightkurve` package provides a `KeplerTargetPixelFile` class, which enables you to load and interact with data in this format. The class can take a path (local or url), or you can load data straight from the [MAST archive](https://archive.stsci.edu/kepler/), which holds all of the Kepler and K2 data archive. We'll download the Kepler-10 light curve using the `from_archive` function, as shown below. *(Note: we're adding the keyword `quarter=3` to download only the data from the third Kepler quarter. There were 17 quarters during the Kepler mission.)* ``` from lightkurve import KeplerTargetPixelFile tpf = KeplerTargetPixelFile.from_archive("Kepler-10", quarter=3) ``` Let's use the `plot` method and pass along an aperture mask and a few plotting arguments. ``` tpf.plot(scale='log'); ``` The target pixel file contains one bright star with approximately 50,000 counts. Now, we will use the ``to_lightcurve`` method to create a simple aperture photometry lightcurve using the mask defined by the pipeline which is stored in `tpf.pipeline_mask`. ``` lc = tpf.to_lightcurve(aperture_mask=tpf.pipeline_mask) ``` Let's take a look at the output lightcurve. ``` lc.plot(); ``` Now let's use the `flatten` method, which applies a Savitzky-Golay filter, to remove long-term variability that we are not interested in. We'll use the `return_trend` keyword so that it returns both the corrected `KeplerLightCurve` object and a new `KeplerLightCurve` object called 'trend'. This contains only the long term variability. ``` flat, trend = lc.flatten(window_length=301, return_trend=True) ``` Let's plot the trend estimated by the Savitzky-Golay filter: ``` ax = lc.plot() #plot() returns a matplotlib axis trend.plot(ax, color='red'); #which we can pass to the next plot() to use the same plotting window ``` and the flat lightcurve: ``` flat.plot(); ``` Now, let's run a period search function using the Box-Least Squares algorithm (http://adsabs.harvard.edu/abs/2002A%26A...391..369K). We will shortly have a built in BLS implementation, but until then you can download and install it separately from lightkurve using `pip install git+https://github.com/mirca/transit-periodogram.git` ``` from transit_periodogram import transit_periodogram import numpy as np import matplotlib.pyplot as plt periods = np.arange(0.3, 1.5, 0.0001) durations = np.arange(0.005, 0.15, 0.001) power, _, _, _, _, _, _ = transit_periodogram(time=flat.time, flux=flat.flux, flux_err=flat.flux_err, periods=periods, durations=durations) best_fit = periods[np.argmax(power)] print('Best Fit Period: {} days'.format(best_fit)) flat.fold(best_fit).plot(alpha=0.4); ```
github_jupyter
**Note**: Click on "*Kernel*" > "*Restart Kernel and Run All*" in [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/) *after* finishing the exercises to ensure that your solution runs top to bottom *without* any errors. If you cannot run this file on your machine, you may want to open it [in the cloud <img height="12" style="display: inline-block" src="../static/link/to_mb.png">](https://mybinder.org/v2/gh/webartifex/intro-to-python/develop?urlpath=lab/tree/01_elements/01_exercises.ipynb). # Chapter 1: Elements of a Program (Coding Exercises) The exercises below assume that you have read the [first part <img height="12" style="display: inline-block" src="../static/link/to_nb.png">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/01_elements/00_content.ipynb) of Chapter 1. The `...`'s in the code cells indicate where you need to fill in code snippets. The number of `...`'s within a code cell give you a rough idea of how many lines of code are needed to solve the task. You should not need to create any additional code cells for your final solution. However, you may want to use temporary code cells to try out some ideas. ## Printing Output **Q1**: *Concatenate* `greeting` and `audience` below with the `+` operator and print out the resulting message `"Hello World"` with only *one* call of the built-in [print() <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/functions.html#print) function! Hint: You may have to "add" a space character in between `greeting` and `audience`. ``` greeting = "Hello" audience = "World" print(...) ``` **Q2**: How is your answer to **Q1** an example of the concept of **operator overloading**? < your answer > **Q3**: Read the documentation on the built-in [print() <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/functions.html#print) function! How can you print the above message *without* concatenating `greeting` and `audience` first in *one* call of [print() <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/functions.html#print)? Hint: The `*objects` in the documentation implies that we can put several *expressions* (i.e., variables) separated by commas within the same call of the [print() <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/functions.html#print) function. ``` print(...) ``` **Q4**: What does the `sep=" "` mean in the documentation on the built-in [print() <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/functions.html#print) function? Adjust and use it to print out the three names referenced by `first`, `second`, and `third` on *one* line separated by *commas* with only *one* call of the [print() <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/functions.html#print) function! ``` first = "Anthony" second = "Berta" third = "Christian" print(...) ``` **Q5**: Lastly, what does the `end="\n"` mean in the documentation? Adjust and use it within the `for`-loop to print the numbers `1` through `10` on *one* line with only *one* call of the [print() <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/functions.html#print) function! ``` for number in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]: print(...) ```
github_jupyter
# Introduction to Machine Learning (The examples in this notebook were inspired by my work for EmergentAlliance, the Scikit-Learn documentation and Jason Brownlee's "Machine Learning Mastery with Python") In this short intro course we will focus on predictive modeling. That means that we want to use the models to make predictions, e.g. a system's future behaviour or a system's response to specific inputs, aka classification and regression. So from all the various types of machine learning categories we will look at **supervised learning**. So we will train a model based on labelled training data. For example when training an image recognition model for recognizing cats vs dogs you need to label a lot of pictures for training purpose upfront. ![](Bereiche-des-Machine-Learnings.png) The other categories cover **unsupervised learning**, e.g. clustering and **Reinforcement learning**, e.g. Deepmind's AlphaGo. ![Alt Text](deepmind_parkour.0.gif.mp4) ## Datasets: We will look at two different datasets: 1. Iris Flower Dataset 2. Boston Housing Prices These datasets are so called toy datasets, well known machine learning examples, and already included in the Python machine learning library scikitlearn https://scikit-learn.org/stable/datasets/toy_dataset.html. The Iris Flower dataset is an example for a classification problem, whereas the Boston Housing Price dataset is a regression example. ## What does a ML project always look like? * Idea --> Problem Definition / Hypothesis formulation * Analyze and Visualize your data - Understand your data (dimensions, data types, class distributions (bias!), data summary, correllations, skewness) - Visualize your data (box and whisker / violine / distribution / scatter matrix) * Data Preprocessing including data cleansing, data wrangling, data compilation, normalization, standardization * Apply algorithms and make predictions * Improve, validate and present results ## Let's get started Load some libraries ``` import pandas as pd # data analysis import numpy as np # math operations on arrays and vectors import matplotlib.pyplot as plt # plotting # display plots directly in the notebook %matplotlib inline import sklearn # the library we use for all ML related functions, algorithms ``` ## Example 1: Iris flower dataset https://scikit-learn.org/stable/datasets/toy_dataset.html#iris-dataset 4 numeric, predictive attributes (sepal length in cm, sepal width in cm, petal length in cm, petal width in cm) and the class (Iris-Setosa, Iris-Versicolour, Iris-Virginica) **Hypothesis:** One can predict the class of Iris Flower based on their attributes. Here this is just one sentence, but formulating this hypothesis is a non-trivial, iterative task, which is the basis for data and feature selection and extremely important for the overall success! ### 1. Load the data ``` # check here again with autocompletion --> then you can see all availbale datasets # https://scikit-learn.org/stable/datasets/toy_dataset.html from sklearn.datasets import load_iris (data, target) =load_iris(return_X_y=True, as_frame=True) data target ``` We will combine this now into one dataframe and check the classes ``` data["class"]=target data ``` ### 2. Understand your data ``` data.describe() ``` This is a classification problem, so we will check the class distribution. This is important to avoid bias due to over- oder underrepresentation of classes. Well known example of this problem are predictive maintenance (very less errors compared to normal runs, Amazon's hiring AI https://www.reuters.com/article/us-amazon-com-jobs-automation-insight-idUSKCN1MK08G) ``` class_counts = data.groupby('class').size() class_counts ``` Now let's check for correlations Correlation means the relationship between two variables and how they may or may not change together. There are different methods available (--> check with ?data.corr) ``` correlations = data.corr(method='pearson') correlations ``` Let's do a heatmap plot for the correlation matrix (pandas built-in) ``` correlations.style.background_gradient(cmap='coolwarm').set_precision(2) ``` Now we will also check the skewness of the distributions, assuming a normal Gaussian distribution. The skew results show a positive (right) or negative (left) skew. Values closer to zero show less skew. ``` skew=data.skew() skew ``` ## 2. Visualize your data - Histogram - Paiplot - Density ``` data.hist() data.plot(kind="density", subplots=True, layout=(3,2),sharex=False) ``` Another nice plot is the box and whisker plot, visualizing the quartiles of a distribution ``` data.plot(kind="box", subplots=True, layout=(3,2),sharex=False) ``` Another option are the seaborn violine plots, which give a more intuitive feeling about the distribution of values ``` import seaborn as sns sns.violinplot(data=data,x="class", y="sepal length (cm)") ``` And last but not least a scatterplot matrix, similar to the pairplot we did already in the last session. This should also give insights about correllations. ``` sns.pairplot(data) ``` ## 3. Data Preprocessing For this dataset, there are already some steps we don't need to take, like: Conglomeration of multiple datasources to one table, including the adaption of formats and granularities. Also we don't need to take care for missing values or NaN's. But among preprocessing there are as well - Rescaling - Normalization The goal of these transformtions is bringing the data into a format, which is most beneficial for the later applied algorithms. So for example optimization algorithms for multivariate optimizations perform better, when all attributes / parameters have the same scale. And other methods assume that input variables have a Gaussian distribution, so it is better to transform the input parameters to meet these requirements. At first we look at **rescaling**. This is done to rescale all attributes (parameters) into the same range, most of the times this is the range [0,1]. For applying these preprocessing steps at first we need to transform the dataframe into an array and split the arry in input and output values, here the descriptive parameters and the class. ``` # transform into array array = data.values array # separate array into input and output components X = array[:,0:4] Y = array[:,4] # Now we apply the MinMaxScaler with a range of [0,1], so that afterwards all columns have a min of 0 and a max of 1. from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range=(0, 1)) rescaledX = scaler.fit_transform(X) rescaledX ``` Now we will apply Normalization by using the Standard Scaler, which means that each column (each attribute / parameter) will be transformed, such that afterwards each attribute has a standard distribution with mean = 0 and std. dev. = 1. Given the distribution of the data, each value in the dataset will have the mean value subtracted, and then divided by the standard deviation of the whole dataset (or feature in the multivariate case) ``` from sklearn.preprocessing import StandardScaler scaler = StandardScaler().fit(X) rescaledX = scaler.transform(X) rescaledX ``` ## 4. Feature Selection (Parameter Sensitivity) Now we come to an extremely interesting part, which is about finding out which parameters do really have an impact onto my outputs. This is the first time we can validate our assumptions. So we will get a qualitative and a quantitative answer to the question which parameters are important. This is also important as having irrelevant features in your data can decrease the accuracy of many models and increases the training time. ``` # Feature Extraction with Univariate Statistical Tests (Chi-squared for classification) from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 # feature extraction test = SelectKBest(score_func=chi2, k=3) fit = test.fit(X, Y) # summarize scores print(fit.scores_) features = fit.transform(X) # summarize selected features print(features[0:5,:]) ``` Here we can see the scores of the features. The higher the score, the more impact they have. As we have selected to take 3 attributes into account, we can see the values of the three selected features (sepal length (cm), sepal width (cm), petal length (cm), petal width (cm)). This result also makes sense, when remembering the correlation heatmap... Another very interesting transformation, which fulfills the same job as feature extraction in terms of data reduction is the PCA. Here the complete dataset is transformed into a reduced dataset (you set the number of resulting principal components). A Singular Value Decomposition of the data is performed to project it to a lower dimensional space. ``` from sklearn.decomposition import PCA pca = PCA(n_components=3) fit = pca.fit(X) # summarize components print("Explained Variance: %s" % fit.explained_variance_ratio_) print(fit.components_) ``` Of course there are even more possibilities, especially when you consider that the application of ML algorithms itself will give the feature importance. So there are multiple built-in methods available in sklearn. ## 5. Apply ML algorithms - The first step is to split our data into **training and testing data**. We need to have a separate testing dataset, which was not used for training purpose to validate the performance and accuracy of our trained model. - **Which algorithm to take?** There is no simple answer to that. Based on your problem (classification vs regression), there are different clases of algorithms, but you cannot know beforehand whoch algorithm will perform best on your data. So it is alwyas a good idea to try different algorithms and check the performance. - How to evaluate the performance? There are different metrics available to check the **performance of a ML model** ``` # specifying the size of the testing data set # seed: reproducable random split --> especially important when comparing different algorithms with each other. from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression test_size = 0.33 seed = 7 # we set a seed to get a reproducable split - especially important when you want to compare diff. algorithms with each other X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state=seed) model = LogisticRegression(solver='liblinear') model.fit(X_train, Y_train) result = model.score(X_test, Y_test) print("Accuracy: %.3f%%" % (result*100.0)) # Let's compare the accuracy, when we use the same data for training and testing model = LogisticRegression(solver='liblinear') model.fit(X, Y) result = model.score(X, Y) print("Accuracy: %.3f%%" % (result*100.0)) # get importance model = LogisticRegression(solver='liblinear') model.fit(X_train, Y_train) importance = model.coef_[0] # summarize feature importance for i,v in enumerate(importance): print('Feature: %0d, Score: %.5f' % (i,v)) # print("Feature: "+str(i)+", Score: "+str(v)) # plot feature importance plt.bar([x for x in range(len(importance))], importance) # decision tree for feature importance on a regression problem from sklearn.datasets import make_regression from sklearn.tree import DecisionTreeRegressor model = DecisionTreeRegressor() # fit the model model.fit(X_train, Y_train) # get importance importance = model.feature_importances_ # summarize feature importance for i,v in enumerate(importance): print('Feature: %0d, Score: %.5f' % (i,v)) # plot feature importance plt.bar([x for x in range(len(importance))], importance) ``` ### Test-Train-Splits Performing just one test-train-split and checking the performance or feature importance might be not good enough, as the result could be very good or very bad by coincidence due to this specific split. So the easiest solution is to repeat this process several times and check the averaged accuracy or use some of the ready-to-use built-in tools in scikit-learn, like KFold, cross-val-score, LeaveOneOut, ShuffleSplit. ### Which ML model to use? Here is just a tiny overview of some mosdels one can use for classification and regression problems. For more models, which are just built-in in sciki-learn, please refer to https://scikit-learn.org/stable/index.html and https://machinelearningmastery.com - Logistic / Linear Regression - k-nearest neighbour - Classification and Regression Trees - Support Vector Machines - Neural Networks In the following we will just use logistic regression (https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression) for our classification example and linear regression (https://scikit-learn.org/stable/modules/linear_model.html#generalized-linear-regression) for our regression example. ### ML model evaluation For evaluating the model performance, there are different metrics available, depending on your type of problem (classification vs regression) For classification, there are for example: - Classification accuracy - Logistic Loss - Confusion Matrix - ... For regression, there are for example: - Mean Absolute Error - Mean Squared Error (R)MSE - R^2 So the accuracy alone does by far not tell you the whole story, you need to check other metrics as well! The confusion matrix is a handy presentation of the accuracy of a model with two or more classes. The table presents predictions on the x-axis and true outcomes on the y-axis. --> false negative, false positive https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/ ``` from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.metrics import confusion_matrix #Lets have a look at our classification problem: kfold = KFold(n_splits=10, random_state=7, shuffle=True) model = LogisticRegression(solver='liblinear') # Classification accuracy: scoring = 'accuracy' results = cross_val_score(model, X, Y, cv=kfold, scoring=scoring) print("Accuracy: %.3f (%.3f)" % (results.mean(), results.std())) # Logistic Loss scoring = 'neg_log_loss' results = cross_val_score(model, X, Y, cv=kfold, scoring=scoring) print("Logloss: %.3f (%.3f)" % (results.mean(), results.std())) # Confusion Matrix model.fit(X_train, Y_train) predicted = model.predict(X_test) matrix = confusion_matrix(Y_test, predicted) print(matrix) ``` ## Regression Example: Boston Housing Example ``` import sklearn from sklearn.datasets import load_boston data =load_boston(return_X_y=False) print(data.DESCR) df=pd.DataFrame(data.data) df.columns=data.feature_names df df["MEDV"]=data.target df ``` Now we start again with our procedure: * Hypothesis * Understand and visualize the data * Preprocessing * Feature Selection * Apply Model * Evaluate Results Our **Hypothesis** here is, that we can actually predict the price of a house based on attributes of the geographic area, population and the property. ``` df.describe() sns.pairplot(df[["DIS","RM","CRIM","LSTAT","MEDV"]]) from sklearn.linear_model import LinearRegression # Now we do the # preprocessing # feature selection # training-test-split # ML model application # evaluation array = df.values X = array[:,0:13] Y = array[:,13] # preprocessing scaler = StandardScaler().fit(X) rescaledX = scaler.transform(X) # feature selection test = SelectKBest(k=6) fit = test.fit(rescaledX, Y) features = fit.transform(X) # train-test-split X_train, X_test, Y_train, Y_test = train_test_split(features, Y, test_size=0.3, random_state=5) # build model kfold = KFold(n_splits=10, random_state=7, shuffle=True) model = LinearRegression() model.fit(X_train,Y_train) acc = model.score(X_test, Y_test) # evaluate model model = LinearRegression() scoring = 'neg_mean_squared_error' results = cross_val_score(model, X, Y, cv=kfold, scoring=scoring) print("Accuracy: %.3f%%" % (acc*100.0)) print("MSE: %.3f (%.3f)" % (results.mean(), results.std())) # And now: # Make predictions # make predictions # model.predict(new_data) ``` ### What comes next? ---> Hyperparameter optimization. For advanced ML algorithms you have to provide options and settings by yourself. These of course also have an impact onto your model performance and accuracy. Here you can perform so-called grid searches to find the optimal settings for your dataset. **GridSearchCV** ## What does a typical project look like: * Data engineering - **A LOT** * Applying actual ML algorithms - 5% of the time. (If you have your dataset ready to apply algorithms you have already done like 100% of the work. Of course afterwards you still need to validate and present your results) ![](HealthRiskIndex.png) ### Example: Emergent Alliance - Health Risk Index for Europe https://emergentalliance.org What we wanted to do: Predict the risk of getting infected, when travelling to a specific region. We actually spent weeks formulating and reformulatin our hypothesis to (re-)consider influencing attributes, trying to distinguish between causes and effects. In the end we spent most of the time with data engineering for: Population density, intensive care units, mobility, case numbers, sentiment, acceptance of governemnt orders. The biggest amount of time was spent on checking data sources, getting the data, reading data dictionaries and understanding the data, creating automatic downloads and data pipelines, data preprocessig, bringing the preprocessed data into a database. We had to fight lots of issues with data quality and data granularity (time and geographic) for different countries. Also afterwards the visual and textual processing and presentation took quite some time (writing blogs, building dashboards, cleaning up databases, ...) ## Image Recognition It is actually quite easy to build a simple classification model (cats vs dogs), so when you are interested in applying something like this maybe to your experimental data (bubble column pictures or postprocessing contour plots), here are some links to get started: https://medium.com/@nina95dan/simple-image-classification-with-resnet-50-334366e7311a https://medium.com/abraia/getting-started-with-image-recognition-and-convolutional-neural-networks-in-5-minutes-28c1dfdd401
github_jupyter
<!--<img width=700px; src="../img/logoUPSayPlusCDS_990.png"> --> <p style="margin-top: 3em; margin-bottom: 2em;"><b><big><big><big><big>Introduction to Pandas</big></big></big></big></b></p> ``` %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt pd.options.display.max_rows = 8 ``` # 1. Let's start with a showcase #### Case 1: titanic survival data ``` df = pd.read_csv("data/titanic.csv") df.head() ``` Starting from reading this dataset, to answering questions about this data in a few lines of code: **What is the age distribution of the passengers?** ``` df['Age'].hist() ``` **How does the survival rate of the passengers differ between sexes?** ``` df.groupby('Sex')[['Survived']].aggregate(lambda x: x.sum() / len(x)) ``` **Or how does it differ between the different classes?** ``` df.groupby('Pclass')['Survived'].aggregate(lambda x: x.sum() / len(x)).plot(kind='bar') ``` All the needed functionality for the above examples will be explained throughout this tutorial. #### Case 2: air quality measurement timeseries AirBase (The European Air quality dataBase): hourly measurements of all air quality monitoring stations from Europe Starting from these hourly data for different stations: ``` data = pd.read_csv('data/20000101_20161231-NO2.csv', sep=';', skiprows=[1], na_values=['n/d'], index_col=0, parse_dates=True) data.head() ``` to answering questions about this data in a few lines of code: **Does the air pollution show a decreasing trend over the years?** ``` data['1999':].resample('M').mean().plot(ylim=[0,120]) data['1999':].resample('A').mean().plot(ylim=[0,100]) ``` **What is the difference in diurnal profile between weekdays and weekend?** ``` data['weekday'] = data.index.weekday data['weekend'] = data['weekday'].isin([5, 6]) data_weekend = data.groupby(['weekend', data.index.hour])['BASCH'].mean().unstack(level=0) data_weekend.plot() ``` We will come back to these example, and build them up step by step. # 2. Pandas: data analysis in python For data-intensive work in Python the [Pandas](http://pandas.pydata.org) library has become essential. What is `pandas`? * Pandas can be thought of as *NumPy arrays with labels* for rows and columns, and better support for heterogeneous data types, but it's also much, much more than that. * Pandas can also be thought of as `R`'s `data.frame` in Python. * Powerful for working with missing data, working with time series data, for reading and writing your data, for reshaping, grouping, merging your data, ... It's documentation: http://pandas.pydata.org/pandas-docs/stable/ ** When do you need pandas? ** When working with **tabular or structured data** (like R dataframe, SQL table, Excel spreadsheet, ...): - Import data - Clean up messy data - Explore data, gain insight into data - Process and prepare your data for analysis - Analyse your data (together with scikit-learn, statsmodels, ...) <div class="alert alert-warning"> <b>ATTENTION!</b>: <br><br> Pandas is great for working with heterogeneous and tabular 1D/2D data, but not all types of data fit in such structures! <ul> <li>When working with array data (e.g. images, numerical algorithms): just stick with numpy</li> <li>When working with multidimensional labeled data (e.g. climate data): have a look at [xarray](http://xarray.pydata.org/en/stable/)</li> </ul> </div> # 2. The pandas data structures: `DataFrame` and `Series` A `DataFrame` is a **tablular data structure** (multi-dimensional object to hold labeled data) comprised of rows and columns, akin to a spreadsheet, database table, or R's data.frame object. You can think of it as multiple Series object which share the same index. <img align="left" width=50% src="img/schema-dataframe.svg"> ``` df ``` ### Attributes of the DataFrame A DataFrame has besides a `index` attribute, also a `columns` attribute: ``` df.index df.columns ``` To check the data types of the different columns: ``` df.dtypes ``` An overview of that information can be given with the `info()` method: ``` df.info() ``` Also a DataFrame has a `values` attribute, but attention: when you have heterogeneous data, all values will be upcasted: ``` df.values ``` Apart from importing your data from an external source (text file, excel, database, ..), one of the most common ways of creating a dataframe is from a dictionary of arrays or lists. Note that in the IPython notebook, the dataframe will display in a rich HTML view: ``` data = {'country': ['Belgium', 'France', 'Germany', 'Netherlands', 'United Kingdom'], 'population': [11.3, 64.3, 81.3, 16.9, 64.9], 'area': [30510, 671308, 357050, 41526, 244820], 'capital': ['Brussels', 'Paris', 'Berlin', 'Amsterdam', 'London']} df_countries = pd.DataFrame(data) df_countries ``` ### One-dimensional data: `Series` (a column of a DataFrame) A Series is a basic holder for **one-dimensional labeled data**. ``` df['Age'] age = df['Age'] ``` ### Attributes of a Series: `index` and `values` The Series has also an `index` and `values` attribute, but no `columns` ``` age.index ``` You can access the underlying numpy array representation with the `.values` attribute: ``` age.values[:10] ``` We can access series values via the index, just like for NumPy arrays: ``` age[0] ``` Unlike the NumPy array, though, this index can be something other than integers: ``` df = df.set_index('Name') df age = df['Age'] age age['Dooley, Mr. Patrick'] ``` but with the power of numpy arrays. Many things you can do with numpy arrays, can also be applied on DataFrames / Series. Eg element-wise operations: ``` age * 1000 ``` A range of methods: ``` age.mean() ``` Fancy indexing, like indexing with a list or boolean indexing: ``` age[age > 70] ``` But also a lot of pandas specific methods, e.g. ``` df['Embarked'].value_counts() ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>What is the maximum Fare that was paid? And the median?</li> </ul> </div> ``` df["Fare"].max() df["Fare"].median() ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>Calculate the average survival ratio for all passengers (note: the 'Survived' column indicates whether someone survived (1) or not (0)).</li> </ul> </div> ``` survived_0 = df[df["Survived"] == 0]["Survived"].count() survived_1 = df[df["Survived"] == 1]["Survived"].count() total = df["Survived"].count() survived_0_ratio = survived_0/total survived_1_ratio = survived_1/total print(survived_0_ratio) print(survived_1_ratio) # Method 2 print(df["Survived"].mean()) ``` # 3. Data import and export A wide range of input/output formats are natively supported by pandas: * CSV, text * SQL database * Excel * HDF5 * json * html * pickle * sas, stata * (parquet) * ... ``` #pd.read #df.to ``` Very powerful csv reader: ``` pd.read_csv? ``` Luckily, if we have a well formed csv file, we don't need many of those arguments: ``` df = pd.read_csv("data/titanic.csv") df.head() ``` <div class="alert alert-success"> <b>EXERCISE</b>: Read the `data/20000101_20161231-NO2.csv` file into a DataFrame `no2` <br><br> Some aspects about the file: <ul> <li>Which separator is used in the file?</li> <li>The second row includes unit information and should be skipped (check `skiprows` keyword)</li> <li>For missing values, it uses the `'n/d'` notation (check `na_values` keyword)</li> <li>We want to parse the 'timestamp' column as datetimes (check the `parse_dates` keyword)</li> </ul> </div> ``` no2 = pd.read_csv("./data/20000101_20161231-NO2.csv", sep=";", skiprows=[1], index_col =[0], na_values=["n/d"], parse_dates=True ) no2 ``` # 4. Exploration Some useful methods: `head` and `tail` ``` no2.head(3) no2.tail() ``` `info()` ``` no2.info() ``` Getting some basic summary statistics about the data with `describe`: ``` no2.describe() ``` Quickly visualizing the data ``` no2.plot(kind='box', ylim=[0,250]) no2['BASCH'].plot(kind='hist', bins=50) ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>Plot the age distribution of the titanic passengers</li> </ul> </div> ``` df["Age"].hist() ``` The default plot (when not specifying `kind`) is a line plot of all columns: ``` no2.plot(figsize=(12,6)) ``` This does not say too much .. We can select part of the data (eg the latest 500 data points): ``` no2[-500:].plot(figsize=(12,6)) ``` Or we can use some more advanced time series features -> see further in this notebook! # 5. Selecting and filtering data <div class="alert alert-warning"> <b>ATTENTION!</b>: <br><br> One of pandas' basic features is the labeling of rows and columns, but this makes indexing also a bit more complex compared to numpy. <br><br> We now have to distuinguish between: <ul> <li>selection by **label**</li> <li>selection by **position**</li> </ul> </div> ``` df = pd.read_csv("data/titanic.csv") ``` ### `df[]` provides some convenience shortcuts For a DataFrame, basic indexing selects the columns. Selecting a single column: ``` df['Age'] ``` or multiple columns: ``` df[['Age', 'Fare']] ``` But, slicing accesses the rows: ``` df[10:15] ``` ### Systematic indexing with `loc` and `iloc` When using `[]` like above, you can only select from one axis at once (rows or columns, not both). For more advanced indexing, you have some extra attributes: * `loc`: selection by label * `iloc`: selection by position ``` df = df.set_index('Name') df.loc['Bonnell, Miss. Elizabeth', 'Fare'] df.loc['Bonnell, Miss. Elizabeth':'Andersson, Mr. Anders Johan', :] ``` Selecting by position with `iloc` works similar as indexing numpy arrays: ``` df.iloc[0:2,1:3] ``` The different indexing methods can also be used to assign data: ``` df.loc['Braund, Mr. Owen Harris', 'Survived'] = 100 df ``` ### Boolean indexing (filtering) Often, you want to select rows based on a certain condition. This can be done with 'boolean indexing' (like a where clause in SQL) and comparable to numpy. The indexer (or boolean mask) should be 1-dimensional and the same length as the thing being indexed. ``` df['Fare'] > 50 df[df['Fare'] > 50] ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>Based on the titanic data set, select all rows for male passengers and calculate the mean age of those passengers. Do the same for the female passengers</li> </ul> </div> ``` df = pd.read_csv("data/titanic.csv") # %load snippets/01-pandas_introduction63.py male_mean_age = df[df["Sex"] == "male"]["Age"].mean() female_mean_age = df[df["Sex"] == "female"]["Age"].mean() print(male_mean_age) print(female_mean_age) print(male_mean_age == female_mean_age) # by loc male_mean_age = df.loc[df["Sex"] == "male", "Age"].mean() female_mean_age = df.loc[df["Sex"] == "female", "Age"].mean() print(male_mean_age) print(female_mean_age) print(male_mean_age == female_mean_age) ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>Based on the titanic data set, how many passengers older than 70 were on the Titanic?</li> </ul> </div> ``` len(df[df["Age"] >= 70]) ``` # 6. The group-by operation ### Some 'theory': the groupby operation (split-apply-combine) ``` df = pd.DataFrame({'key':['A','B','C','A','B','C','A','B','C'], 'data': [0, 5, 10, 5, 10, 15, 10, 15, 20]}) df ``` ### Recap: aggregating functions When analyzing data, you often calculate summary statistics (aggregations like the mean, max, ...). As we have seen before, we can easily calculate such a statistic for a Series or column using one of the many available methods. For example: ``` df['data'].sum() ``` However, in many cases your data has certain groups in it, and in that case, you may want to calculate this statistic for each of the groups. For example, in the above dataframe `df`, there is a column 'key' which has three possible values: 'A', 'B' and 'C'. When we want to calculate the sum for each of those groups, we could do the following: ``` for key in ['A', 'B', 'C']: print(key, df[df['key'] == key]['data'].sum()) ``` This becomes very verbose when having multiple groups. You could make the above a bit easier by looping over the different values, but still, it is not very convenient to work with. What we did above, applying a function on different groups, is a "groupby operation", and pandas provides some convenient functionality for this. ### Groupby: applying functions per group The "group by" concept: we want to **apply the same function on subsets of your dataframe, based on some key to split the dataframe in subsets** This operation is also referred to as the "split-apply-combine" operation, involving the following steps: * **Splitting** the data into groups based on some criteria * **Applying** a function to each group independently * **Combining** the results into a data structure <img src="img/splitApplyCombine.png"> Similar to SQL `GROUP BY` Instead of doing the manual filtering as above df[df['key'] == "A"].sum() df[df['key'] == "B"].sum() ... pandas provides the `groupby` method to do exactly this: ``` df.groupby('key').sum() df.groupby('key').aggregate(np.sum) # 'sum' ``` And many more methods are available. ``` df.groupby('key')['data'].sum() ``` ### Application of the groupby concept on the titanic data We go back to the titanic passengers survival data: ``` df = pd.read_csv("data/titanic.csv") df.head() ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>Calculate the average age for each sex again, but now using groupby.</li> </ul> </div> ``` # %load snippets/01-pandas_introduction76.py df.groupby("Sex")["Age"].mean() ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>Calculate the average survival ratio for all passengers.</li> </ul> </div> ``` # df.groupby("Survived")["Survived"].count() df["Survived"].mean() # %load snippets/01-pandas_introduction77.py ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>Calculate this survival ratio for all passengers younger that 25 (remember: filtering/boolean indexing).</li> </ul> </div> ``` # %load snippets/01-pandas_introduction78.py df[df["Age"] <= 25]["Survived"].mean() df25 = df[df['Age'] <= 25] df25['Survived'].sum() / len(df25['Survived']) ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>What is the difference in the survival ratio between the sexes?</li> </ul> </div> ``` # %load snippets/01-pandas_introduction79.py df.groupby("Sex")["Survived"].mean() ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>Or how does it differ between the different classes? Make a bar plot visualizing the survival ratio for the 3 classes.</li> </ul> </div> ``` # %load snippets/01-pandas_introduction80.py df.groupby("Pclass")["Survived"].mean().plot(kind = "bar") ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>Make a bar plot to visualize the average Fare payed by people depending on their age. The age column is devided is separate classes using the `pd.cut` function as provided below.</li> </ul> </div> ``` df['AgeClass'] = pd.cut(df['Age'], bins=np.arange(0,90,10)) # %load snippets/01-pandas_introduction82.py df.groupby("AgeClass")["Fare"].mean().plot(kind="bar") ``` # 7. Working with time series data ``` no2 = pd.read_csv('data/20000101_20161231-NO2.csv', sep=';', skiprows=[1], na_values=['n/d'], index_col=0, parse_dates=True) ``` When we ensure the DataFrame has a `DatetimeIndex`, time-series related functionality becomes available: ``` no2.index ``` Indexing a time series works with strings: ``` no2["2010-01-01 09:00": "2010-01-01 12:00"] ``` A nice feature is "partial string" indexing, so you don't need to provide the full datetime string. E.g. all data of January up to March 2012: ``` no2['2012-01':'2012-03'] ``` Time and date components can be accessed from the index: ``` no2.index.hour no2.index.year ``` ## Converting your time series with `resample` A very powerfull method is **`resample`: converting the frequency of the time series** (e.g. from hourly to daily data). Remember the air quality data: ``` no2.plot() ``` The time series has a frequency of 1 hour. I want to change this to daily: ``` no2.head() no2.resample('D').mean().head() ``` Above I take the mean, but as with `groupby` I can also specify other methods: ``` no2.resample('D').max().head() ``` The string to specify the new time frequency: http://pandas.pydata.org/pandas-docs/dev/timeseries.html#offset-aliases These strings can also be combined with numbers, eg `'10D'`. Further exploring the data: ``` no2.resample('M').mean().plot() # 'A' # no2['2012'].resample('D').plot() # %load snippets/01-pandas_introduction95.py ``` <div class="alert alert-success"> <b>EXERCISE</b>: The evolution of the yearly averages with, and the overall mean of all stations <ul> <li>Use `resample` and `plot` to plot the yearly averages for the different stations.</li> <li>The overall mean of all stations can be calculated by taking the mean of the different columns (`.mean(axis=1)`).</li> </ul> </div> ``` # %load snippets/01-pandas_introduction96.py ``` <div class="alert alert-success"> <b>EXERCISE</b>: how does the *typical monthly profile* look like for the different stations? <ul> <li>Add a 'month' column to the dataframe.</li> <li>Group by the month to obtain the typical monthly averages over the different years.</li> </ul> </div> First, we add a column to the dataframe that indicates the month (integer value of 1 to 12): ``` # %load snippets/01-pandas_introduction97.py ``` Now, we can calculate the mean of each month over the different years: ``` # %load snippets/01-pandas_introduction98.py # %load snippets/01-pandas_introduction99.py ``` <div class="alert alert-success"> <b>EXERCISE</b>: The typical diurnal profile for the different stations <ul> <li>Similar as for the month, you can now group by the hour of the day.</li> </ul> </div> ``` # %load snippets/01-pandas_introduction100.py ``` <div class="alert alert-success"> <b>EXERCISE</b>: What is the difference in the typical diurnal profile between week and weekend days for the 'BASCH' station. <ul> <li>Add a column 'weekday' defining the different days in the week.</li> <li>Add a column 'weekend' defining if a days is in the weekend (i.e. days 5 and 6) or not (True/False).</li> <li>You can groupby on multiple items at the same time. In this case you would need to group by both weekend/weekday and hour of the day.</li> </ul> </div> Add a column indicating the weekday: ``` no2.index.weekday? # %load snippets/01-pandas_introduction102.py ``` Add a column indicating week/weekend ``` # %load snippets/01-pandas_introduction103.py ``` Now we can groupby the hour of the day and the weekend (or use `pivot_table`): ``` # %load snippets/01-pandas_introduction104.py # %load snippets/01-pandas_introduction105.py # %load snippets/01-pandas_introduction106.py # %load snippets/01-pandas_introduction107.py ``` <div class="alert alert-success"> <b>EXERCISE</b>: What are the number of exceedances of hourly values above the European limit 200 µg/m3 ? Count the number of exceedances of hourly values above the European limit 200 µg/m3 for each year and station after 2005. Make a barplot of the counts. Add an horizontal line indicating the maximum number of exceedances (which is 18) allowed per year? <br><br> Hints: <ul> <li>Create a new DataFrame, called `exceedances`, (with boolean values) indicating if the threshold is exceeded or not</li> <li>Remember that the sum of True values can be used to count elements. Do this using groupby for each year.</li> <li>Adding a horizontal line can be done with the matplotlib function `ax.axhline`.</li> </ul> </div> ``` # re-reading the data to have a clean version no2 = pd.read_csv('data/20000101_20161231-NO2.csv', sep=';', skiprows=[1], na_values=['n/d'], index_col=0, parse_dates=True) # %load snippets/01-pandas_introduction109.py # %load snippets/01-pandas_introduction110.py # %load snippets/01-pandas_introduction111.py ``` # 9. What I didn't talk about - Concatenating data: `pd.concat` - Merging and joining data: `pd.merge` - Reshaping data: `pivot_table`, `melt`, `stack`, `unstack` - Working with missing data: `isnull`, `dropna`, `interpolate`, ... - ... ## Further reading * Pandas documentation: http://pandas.pydata.org/pandas-docs/stable/ * Books * "Python for Data Analysis" by Wes McKinney * "Python Data Science Handbook" by Jake VanderPlas * Tutorials (many good online tutorials!) * https://github.com/jorisvandenbossche/pandas-tutorial * https://github.com/brandon-rhodes/pycon-pandas-tutorial * Tom Augspurger's blog * https://tomaugspurger.github.io/modern-1.html
github_jupyter
``` print('Meu nome é: Gabriel Moraes Barros ') print('Meu RA é: 192801') %matplotlib inline import matplotlib.pyplot as plot from IPython import display import sys import numpy as np import numpy.random as nr import keras from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array from keras.models import Sequential from keras.layers import Convolution2D, MaxPooling2D from keras.layers import Conv2D from keras.layers import Activation, Dropout, Flatten, Dense from keras.optimizers import (SGD, RMSprop, Adam, Adadelta, Adagrad) print('Keras ', keras.__version__) import os os.makedirs('../models', exist_ok=True) nr.seed(20170603) !ls ../utils sys.path.append('../utils') from my_keras_utilities import (get_available_gpus, load_model_and_history, save_model_and_history, TrainingPlotter, train_network) ``` ### Testa se um modulo foi importado ``` 'my_keras_utilities' in sys.modules ``` try: train_network(model_week05, model_name, train_generator, validation_generator, **fit_params); except AttributeError: print('nope') ``` import keras.backend as K K.set_image_data_format('channels_first') K.set_floatx('float32') print('Backend: {}'.format(K.backend())) print('Data format: {}'.format(K.image_data_format())) !nvidia-smib !ls ../Task\ 5 ``` ## Função auxiliar ``` class MyCb(TrainingPlotter): def on_epoch_end(self, epoch, logs={}): super().on_epoch_end(epoch, logs) def train_network(model, model_name, train_generator, validation_generator, train_steps=10, valid_steps=10, opt='rmsprop', nepochs=50, patience=50, reset=False, ploss=1.0): do_plot = (ploss > 0.0) model_fn = model_name + '.model' if reset and os.path.isfile(model_fn): os.unlink(model_name + '.model') if not os.path.isfile(model_fn): # initialize the optimizer and model print("[INFO] compiling model...") model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) # History, checkpoint, earlystop, plot losses: cb = [ModelCheckpoint(model_file, monitor='val_acc', verbose=0, save_best_only=True, mode='auto', period=1), MyCb(n=1, filepath=model_name, patience=patience, plot_losses=do_plot), ReduceLROnPlateau(monitor='val_loss', factor=0.9, patience=7, verbose=0, mode='auto', epsilon=0.00001, cooldown=0, min_lr=0) ] else: print("[INFO] loading model...") model, cb = load_model_and_history(model_name) cb.patience = patience past_epochs = cb[1].get_nepochs() tr_epochs = nepochs - past_epochs if do_plot: vv = 0 fig = plot.figure(figsize=(15,6)) plot.ylim(0.0, ploss) plot.xlim(0, nepochs) plot.grid(True) else: vv = 2 print("[INFO] training for {} epochs ...".format(tr_epochs)) try: model.fit_generator(train_generator, steps_per_epoch=train_steps, validation_data=validation_generator, validation_steps=valid_steps, epochs=nepochs, verbose=vv, callbacks=[cb[1]]) except KeyboardInterrupt: pass model, histo = load_model_and_history(model_name) return model, cb def test_network(model_name, validation_generator, nb_validation_samples): model, histo = load_model_and_history(model_name) print('Model from epoch {}'.format(histo.best_epoch)) print("[INFO] evaluating in the test data set ...") loss, accuracy = model.evaluate_generator(validation_generator, nb_validation_samples) print("\n[INFO] accuracy on the test data set: {:.2f}% [{:.5f}]".format(accuracy * 100, loss)) ``` ## Subindo o dataset ``` #auternar o comentário, se estiver no client ou no remote data = np.load('/etc/jupyterhub/ia368z_2s2017/datasets/cifar10-redux.npz') #data = np.load('../Task 5/cifar10-redux.npz') X_train = data['X_train'] y_train = data['y_train'] X_test = data['X_test'] y_test = data['y_test'] X_train.dtype, y_train.dtype, X_test.dtype, y_test.dtype ``` ### Separando o conjunto de treinamento em validação e treinamento, numa proporção 80/20 % ``` p=np.random.permutation(len(X_train)) percent_factor=0.85 new_train_x = X_train[p] new_train_y = y_train[p] new_X_train = new_train_x[0:(np.floor(len(new_train_x)*percent_factor))] new_y_train = new_train_y[0:(np.floor(len(new_train_y)*percent_factor))] new_X_val = new_train_x[(np.ceil(len(new_train_x)*percent_factor)):] new_y_val = new_train_y[(np.ceil(len(new_train_y)*percent_factor)):] print('X_train.shape',new_X_train.shape) print('y_train.shape',new_y_train.shape) print('X_val.shape',new_X_val.shape) print('y_val.shape',new_y_val.shape) print('y_test shape ',y_test.shape) print('X_test.shape:',X_test.shape) print('Número de diferentes classes',len(np.unique(y_test))) ``` Normalizando os dados ``` a=0 print(np.mean(X_train)) ``` #Guaranteeing that it only runs once if (a==0): X_test = X_test.astype('float32') new_X_train = new_X_train.astype('float32') new_X_val = new_X_val.astype('float32') new_X_val /= 255. new_X_train /= 255. X_test /= 255. a=1 print(np.mean(new_X_train)) print(np.mean(new_X_val)) print(np.mean(X_test)) ``` from keras.utils import np_utils ## Transforma o vetor de labels para o formato de one-hot encoding. n_classes = 3 y_train_oh = np_utils.to_categorical(new_y_train-3, n_classes) y_val_oh = np_utils.to_categorical(new_y_val-3, n_classes) y_test_oh = np_utils.to_categorical(y_test-3, n_classes) print(y_train_oh.shape) print(y_val_oh.shape) print(y_test_oh.shape) ``` ## Fazendo o data augmentation ``` print(X_train.shape) print(X_test.shape) print('new x train shape', new_X_train.shape) print('y train oh shape', y_train_oh.shape) print('new x val shape', new_X_val.shape) print('y val oh shape', y_val_oh.shape) from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array nb_train_samples = new_train_x.shape[0] nb_val_samples = new_X_val.shape[0] print('nb val samples',nb_val_samples) nb_test_samples = X_test.shape[0] # dimensions of our images. img_width, img_height = 32, 32 batch_size=100 # this is the augmentation configuration we will use for training aug_datagen = ImageDataGenerator( rescale=1./255, # sempre faz o rescale shear_range=0.2, # sorteio entre 0 e 0.2 distribuição uniforme zoom_range=0.2, # sorteio entre 0 e 0.2 horizontal_flip=True) # sorteio 50% non_aug_datagen = ImageDataGenerator( rescale=1./255) train_generator = aug_datagen.flow( x = new_X_train, y = y_train_oh, # as amostras de treinamento batch_size=batch_size,shuffle=False # batch size do SGD ) validation_generator = non_aug_datagen.flow( x = new_X_val, y = y_val_oh, # as amostras de validação batch_size=batch_size, shuffle = False) test_generator = non_aug_datagen.flow( x = X_test, y = y_test_oh, # as amostras de validação batch_size=batch_size, shuffle = False) ``` #Conjunto de treinaemnto samples_train = train_datagen.flow(new_X_train) n_samples_train = nb_train_samples/batch_size #Conjunto de teste samples_test = train_datagen.flow(X_test) n_samples_test = nb_test_samples/batch_size #Conjunto de validacao samples_val = train_datagen.flow(new_X_val) n_samples_val = nb_val_samples/batch_size ``` n_classes = len(np.unique(y_test)) print(n_classes) ``` ## Treinamento # Transfer_Learning ## Subindo a VGG-16 ``` print(y_train_oh.shape) from keras.applications.vgg16 import VGG16 modelvgg = VGG16(include_top=False, weights='imagenet',classes=y_train_oh.shape[1]) train_feature = modelvgg.predict_generator(generator=train_generator, steps=int(np.round(train_generator.n / batch_size))) print(train_feature.shape, train_feature.dtype) validation_features = modelvgg.predict_generator(generator = validation_generator, steps=int(np.round(validation_generator.n / batch_size))) print(validation_features.shape, validation_features.dtype) train_feature.shape ``` topmodel.summary() modelvgg.summary() ``` train_feature.shape[1:] train_feat = train_feature.reshape(1700,512) print(train_feat.shape) modelvgg.output def model_build(): img_rows, img_cols = 32, 32 # Dimensões das imagens #imagens com 3 canais e 32x32 input_shape = (3, img_rows, img_cols) # Definindo a rede model = Sequential() #primeira conv model.add(Conv2D(32, (3, 3), input_shape=input_shape)) model.add(Activation('relu')) #segunda conv model.add(Conv2D(32,(3,3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) # Aqui os features deixam de ser imagens model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(n_classes)) model.add(Activation('softmax')) return model model_week05 = model_build() model_week05.load_weights('../my_cifar_dataplus_model_weights.h5') print("done") model_week05.summary() print(model_week05.layers) print(len(model_week05.layers)) weights10 = model_week05.layers[10].get_weights() print(weights10[0].shape,weights10[1].shape) weights7 = model_week05.layers[7].get_weights() print(weights7[0].shape,weights7[1].shape) w2, b2 = weights10 w1, b1 = weights7 topmodel = Sequential() topmodel.add(layer=keras.layers.Flatten(input_shape=(1,1,512))) # topmodel.add(layer=keras.layers.Dense(units=256, activation='relu', name='d256')) topmodel.add(layer=keras.layers.Dense(units=128, name='d256',)) topmodel.add(Activation('relu')) topmodel.add(layer=keras.layers.Dropout(rate=.5)) topmodel.add(layer=keras.layers.Dense(units=3, name='d3')) topmodel.add(Activation('softmax')) # topmodel.compile(optimizer=keras.optimizers.SGD(lr=.05, momentum=.9, nesterov=True), topmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) topmodel.summary() print(topmodel.layers) print(len(topmodel.layers)) topmodel.layers[20].set_weights([w1, b1]) topmodel.layers[5].set_weights([wei]) !ls ../ ``` w1, b1, w2, b2 = load_model('../my_cifar_dataplus_model_weights.h5').get_weights() w1, b1, w2, b2 = load_model('../my_cifar_dataplus_model_weights').get_weights()m m mmm ``` model2.load_weights('../my_cifar_dataplus_model_weights.h5') ``` # Aqui os features deixam de ser imagens model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(n_classes)) model.add(Activation('softmax')) topmodel = Sequential() # topmodel.add(layer=keras.layers.Flatten(input_shape=feat_train.shape[1:])) # topmodel.add(layer=keras.layers.Dense(units=256, activation='relu', name='d256')) topmodel.add(layer=keras.layers.Dense(units=256, activation='relu', name='d256', input_shape=(1,1,512))) topmodel.add(layer=keras.layers.Dropout(rate=.5)) topmodel.add(layer=keras.layers.Dense(units=3, activation='softmax', name='d3')) # topmodel.compile(optimizer=keras.optimizers.SGD(lr=.05, momentum=.9, nesterov=True), topmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) #train_features = modelvgg.predict(new_X_train) train_features = modelvgg.predict(new_X_train) print('train_features shape and type',train_features.shape,train_features.dtype) validation_features = modelvgg.predict(new_X_val) print('validation_features shape and type',validation_features.shape,train_features.dtype) test_features = modelvgg.predict(X_test) print('test_features shape and type',test_features.shape,train_features.dtype) ``` modelvgg.layers.pop(18) modelvgg.layers.pop(17) modelvgg.layers.pop(16) modelvgg.layers.pop(15) #modelvgg.summary() train_features = modelvgg.predict(new_X_train) print('train_features shape and type',train_features.shape,train_features.dtype) validation_features = modelvgg.predict(new_X_val) print('validation_features shape and type',validation_features.shape,train_features.dtype) test_features = modelvgg.predict(X_test) print('test_features shape and type',test_features.shape,train_features.dtype) train_features.shape[1:] model_name = '../cifar_redux_augmented_vgg' modelVGG = Sequential() modelVGG.add(Flatten(input_shape= train_features.shape[1:])) modelVGG.add(Dense(120)) modelVGG.add(Activation('relu')) modelVGG.add(Dropout(0.5)) modelVGG.add(Dense(3)) modelVGG.add(Activation('softmax')) modelVGG.summary() ``` ## Treinando class MyCb(TrainingPlotter): ``` class MyCb(TrainingPlotter): def on_epoch_end(self, epoch, logs={}): super().on_epoch_end(epoch, logs) def train_network(model, model_name, Xtra, ytra, Xval, yval, opt='rmsprop', batch_size=100, nepochs=50, patience=50, reset=False, ploss=1.0): do_plot = (ploss > 0.0) model_fn = model_name + '.model' if reset and os.path.isfile(model_fn): os.unlink(model_name + '.model') if not os.path.isfile(model_fn): # initialize the optimizer and model print("[INFO] compiling model...") model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) # History, checkpoint, earlystop, plot losses: cb = MyCb(n=1, filepath=model_name, patience=patience, plot_losses=do_plot) else: print("[INFO] loading model...") model, cb = load_model_and_history(model_name) cb.patience = patience past_epochs = cb.get_nepochs() tr_epochs = nepochs - past_epochs if do_plot: vv = 0 fig = plot.figure(figsize=(15,6)) plot.ylim(0.0, ploss) plot.xlim(0, nepochs) plot.grid(True) else: vv = 2 print("[INFO] training for {} epochs ...".format(tr_epochs)) try: model.fit(Xtra, ytra, batch_size=batch_size, epochs=tr_epochs, verbose=vv, validation_data=(Xval,yval), callbacks=[cb]) except KeyboardInterrupt: pass model, histo = load_model_and_history(model_name) return model, cb def test_network(model_name, Xtest, ytest, batch_size=40): model, histo = load_model_and_history(model_name) print('Model from epoch {}'.format(histo.best_epoch)) print("[INFO] evaluating in the test data set ...") loss, accuracy = model.evaluate(Xtest, ytest, batch_size=batch_size, verbose=1) print("\n[INFO] accuracy on the test data set: {:.2f}% [{:.5f}]".format(accuracy * 100, loss)) print('train_features.shape',train_features.shape) print('validation_features.shape',validation_features.shape) print('test_features.shape',test_features.shape) fit_params = { 'opt': 'adam', # SGD(lr=0.01, momentum=0.9, nesterov=True), 'nepochs': 100, 'patience': 30, 'ploss': 1.5, 'reset': True, } train_network(modelVGG, model_name, train_features, y_train_oh, validation_features, y_val_oh, **fit_params); test_network(model_name, test_features,y_test_oh,X_test.shape[0]) from keras.applications.vgg16 import VGG16 print("[INFO] creating model...") #vgg = VGG16(include_top=False, weights='imagenet', input_shape=(img_height, img_width, 3)) vgg = VGG16(include_top=False, weights='imagenet') vgg.summary() ``` ## Construção da rede neural ``` print(train_features.shape) print(new_X_train.shape) img_height, img_width = new_X_train.shape[2],new_X_train.shape[3] print(img_height,img_width) !ls .. from keras.models import Model from keras.models import load_model model_name = '../cifar10_vgg_finetune' # modelo da rede atual top_model_name = '../cifar_redux_augmented_vgg' nb_classes=3 def build_net(top_model_name): from keras.applications.vgg16 import VGG16 print("[INFO] creating model...") #vgg = VGG16(include_top=False, weights='imagenet', input_shape=(img_height, img_width, 3)) #vgg = VGG16(include_top=False, weights='imagenet', input_shape=(3,img_height, img_width)) vgg = VGG16(include_top=False, weights='imagenet', classes=nb_classes, pooling='max') print(vgg.output) # build a classifier model and put on top of the convolutional model #x = Flatten()(vgg.output) x = Dense(120, activation='relu', name='dense1')(vgg.output) x = Dropout(0.5)(x) x = Dense(3, activation='relu', name='d1')(x) x = Dropout(0.5)(x) x = Dense(1, activation='sigmoid', name='d2')(x) #x = Dense(40, activation='relu', name='dense1')(vgg.output) # x = Dropout(0.5)(x) #x = Dense(120, activation='relu', name='dense2')(x) #x = Dropout(0.2)(x) #x = Dense(nb_classes, activation='softmax', name='dense3')(x) #model = Model(inputs=vgg.input, outputs=x model = Model(inputs=vgg.input, outputs=x) print(model.layers) print(len(model.layers)) # print('Model layers:') # for i, layer in enumerate(model.layers): # print(' {:2d} {:15s} {}'.format(i, layer.name, layer)) # modelo da rede densa treinada no notebook anterior top_model_name = top_model_name # Carrego os pesos treinados anteriormente #w1, b1, w2, b2 = load_model(top_model_name).get_weights() w1, b1, w2, b2 = modelVGG.get_weights() print(w1.shape,b1.shape,w2.shape,b2.shape) # Coloco nas camadas densas finais da rede model.layers[20].set_weights([w1, b1]) model.layers[22].set_weights([w2, b2]) # Torno não-treináveis as primeiras 15 camadas # da rede (os pesos não serão alterados) for layer in model.layers[:15]: layer.trainable = False return model model = build_net(top_model_name) #model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) batch_size = 40 print(new_X_train.shape, y_train_oh.shape,new_X_val.shape, y_val_oh.shape) ``` (1600, 32, 32, 3) (1600, 3) (400, 32, 32, 3) (400, 3) ``` h = model.fit(new_X_train.reshape(1700,3,32,32), y_train_oh, validation_data=(new_X_val.reshape(300,3,32,32), y_val_oh), batch_size=batch_size, epochs=100, ) h = model.fit(X_train[train_i], y_train_oh[train_i], validation_data=(X_train[val_i], y_train_oh[val_i]), batch_size=batch_size, epochs=400, callbacks=[early_stopping, checkpointer, reduce_lr], verbose=1) model_name = '../cifar10_vgg_finetune' fit_params = { 'opt': 'adam', # SGD(lr=0.01, momentum=0.9, nesterov=True), 'nepochs': 100, 'patience': 30, 'ploss': 1.5, 'reset': True, } train_network(model, model_name, train_features, y_train_oh, validation_features, y_val_oh, **fit_params); ```
github_jupyter
# Data Distribution vs. Sampling Distribution: What You Need to Know This notebook is accompanying the article [Data Distribution vs. Sampling Distribution: What You Need to Know](https://www.ealizadeh.com/blog/statistics-data-vs-sampling-distribution/). Subscribe to **[my mailing list](https://www.ealizadeh.com/subscribe/)** to receive my posts on statistics, machine learning, and interesting Python libraries and tips & tricks. You can also follow me on **[Medium](https://medium.com/@ealizadeh)**, **[LinkedIn](https://www.linkedin.com/in/alizadehesmaeil/)**, and **[Twitter]( https://twitter.com/es_alizadeh)**. Copyright © 2021 [Esmaeil Alizadeh](https://ealizadeh.com) ``` from IPython.display import Image Image("https://www.ealizadeh.com/wp-content/uploads/2021/01/data_dist_sampling_dist_featured_image.png", width=1200) ``` --- It is important to distinguish between the data distribution (aka population distribution) and the sampling distribution. The distinction is critical when working with the central limit theorem or other concepts like the standard deviation and standard error. In this post we will go over the above concepts and as well as bootstrapping to estimate the sampling distribution. In particular, we will cover the following: - Data distribution (aka population distribution) - Sampling distribution - Central limit theorem (CLT) - Standard error and its relation with the standard deviation - Bootstrapping --- ## Data Distribution Much of the statistics deals with inferring from samples drawn from a larger population. Hence, we need to distinguish between the analysis done the original data as opposed to analyzing its samples. First, let's go over the definition of the data distribution: 💡 **Data distribution:** *The frequency distribution of individual data points in the original dataset.* ### Generate Data Let's first generate random skewed data that will result in a non-normal (non-Gaussian) data distribution. The reason behind generating non-normal data is to better illustrate the relation between data distribution and the sampling distribution. So, let's import the Python plotting packages and generate right-skewed data. ``` # Plotting packages and initial setup import seaborn as sns sns.set_theme(palette="pastel") sns.set_style("white") import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams["figure.dpi"] = 150 savefig_options = dict(format="png", dpi=150, bbox_inches="tight") from scipy.stats import skewnorm from sklearn.preprocessing import MinMaxScaler num_data_points = 10000 max_value = 100 skewness = 15 # Positive values are right-skewed skewed_random_data = skewnorm.rvs(a=skewness, loc=max_value, size=num_data_points, random_state=1) skewed_data_scaled = MinMaxScaler().fit_transform(skewed_random_data.reshape(-1, 1)) ``` Plotting the data distribution ``` fig, ax = plt.subplots(figsize=(10, 6)) ax.set_title("Data Distribution", fontsize=24, fontweight="bold") sns.histplot(skewed_data_scaled, bins=30, stat="density", kde=True, legend=False, ax=ax) # fig.savefig("original_skewed_data_distribution.png", **savefig_options) ``` ## Sampling Distribution In the sampling distribution, you draw samples from the dataset and compute a statistic like the mean. It's very important to differentiate between the data distribution and the sampling distribution as most confusion comes from the operation done on either the original dataset or its (re)samples. 💡 **Sampling distribution:** *The frequency distribution of a sample statistic (aka metric) over many samples drawn from the dataset[katex]^{[1]}[/katex]. Or to put it simply, the distribution of sample statistics is called the sampling distribution.* The algorithm to obtain the sampling distribution is as follows: 1. Draw a sample from the dataset. 2. Compute a statistic/metric of the drawn sample in Step 1 and save it. 3. Repeat Steps 1 and 2 many times. 4. Plot the distribution (histogram) of the computed statistic. ``` import numpy as np import random sample_size = 50 sample_means = [] random.seed(1) # Setting the seed for reproducibility of the result for _ in range(2000): sample = random.sample(skewed_data_scaled.tolist(), sample_size) sample_means.append(np.mean(sample)) print( f"Mean: {np.mean(sample_means).round(5)}" ) fig, ax = plt.subplots(figsize=(10, 6)) ax.set_title("Sampling Distribution", fontsize=24, fontweight="bold") sns.histplot(sample_means, bins=30, stat="density", kde=True, legend=False) # fig.savefig("sampling_distribution.png", **savefig_options) ``` Above sampling distribution is basically the histogram of the mean of each drawn sample (in above, we draw samples of 50 elements over 2000 iterations). The mean of the above sampling distribution is around 0.23, as can be noted from computing the mean of all samples means. ⚠️ *Do not confuse the sampling distribution with the sample distribution. The sampling distribution considers the distribution of sample statistics (e.g. mean), whereas the sample distribution is basically the distribution of the sample taken from the population.* ## Central Limit Theorem (CLT) 💡 **Central Limit Theorem:** *As the sample size gets larger, the sampling distribution tends to be more like a normal distribution (bell-curve shape).* *In CLT, we analyze the sampling distribution and not a data distribution, an important distinction to be made.* CLT is popular in hypothesis testing and confidence interval analysis, and it's important to be aware of this concept, even though with the use of bootstrap in data science, this theorem is less talked about or considered in the practice of data science$^{[1]}$. More on bootstrapping is provided later in the post. ## Standard Error (SE) The [standard error](https://en.wikipedia.org/wiki/Standard_error) is a metric to describe *the variability of a statistic in the sampling distribution*. We can compute the standard error as follows: $$ \text{Standard Error} = SE = \frac{s}{\sqrt{n}} $$ where $s$ denotes the standard deviation of the sample values and $n$ denotes the sample size. It can be seen from the formula that *as the sample size increases, the SE decreases*. We can estimate the standard error using the following approach$^{[1]}$: 1. Draw a new sample from a dataset. 2. Compute a statistic/metric (e.g., mean) of the drawn sample in Step 1 and save it. 3. Repeat Steps 1 and 2 several times. 4. An estimate of the standard error is obtained by computing the standard deviation of the previous steps' statistics. While the above approach can be used to estimate the standard error, we can use bootstrapping instead, which is preferable. I will go over that in the next section. ⚠️ *Do not confuse the standard error with the standard deviation. The standard deviation captures the variability of the individual data points (how spread the data is), unlike the standard error that captures a sample statistic's variability.* ## Bootstrapping Bootstrapping is an easy way of estimating the sampling distribution by randomly drawing samples from the population (with replacement) and computing each resample's statistic. Bootstrapping does not depend on the CLT or other assumptions on the distribution, and it is the standard way of estimating SE$^{[1]}$. Luckily, we can use [`bootstrap()`](https://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap/) functionality from the [MLxtend library](https://rasbt.github.io/mlxtend/) (You can read [my post](https://www.ealizadeh.com/blog/mlxtend-library-for-data-science/) on MLxtend library covering other interesting functionalities). This function also provides the flexibility to pass a custom sample statistic. ``` from mlxtend.evaluate import bootstrap avg, std_err, ci_bounds = bootstrap( skewed_data_scaled, num_rounds=1000, func=np.mean, # A function to compute a sample statistic can be passed here ci=0.95, seed=123 # Setting the seed for reproducibility of the result ) print( f"Mean: {avg.round(5)} \n" f"Standard Error: +/- {std_err.round(5)} \n" f"CI95: [{ci_bounds[0].round(5)}, {ci_bounds[1].round(5)}]" ) ``` ## Conclusion The main takeaway is to differentiate between whatever computation you do on the original dataset or the sampling of the dataset. Plotting a histogram of the data will result in data distribution, whereas plotting a sample statistic computed over samples of data will result in a sampling distribution. On a similar note, the standard deviation tells us how the data is spread, whereas the standard error tells us how a sample statistic is spread out. Another takeaway is that even if the original data distribution is non-normal, the sampling distribution is normal (central limit theorem). Thanks for reading! ___If you liked this post, you can [join my mailing list here](https://www.ealizadeh.com/subscribe/) to receive more posts about Data Science, Machine Learning, Statistics, and interesting Python libraries and tips & tricks. You can also follow me on my [website](https://ealizadeh.com/), [Medium](https://medium.com/@ealizadeh), [LinkedIn](https://www.linkedin.com/in/alizadehesmaeil/), or [Twitter](https://twitter.com/es_alizadeh).___ # References [1] P. Bruce & A. Bruce (2017), Practical Statistics for Data Scientists, First Edition, O’Reilly # Useful Links [MLxtend: A Python Library with Interesting Tools for Data Science Tasks](https://www.ealizadeh.com/blog/mlxtend-library-for-data-science/)
github_jupyter
## Test "best of two" classifier This notebook test a classifier that operates in two layers: - First we use a SVM classifier to label utterances with high degree of certainty. - Afterwards we use heuristics to complete the labeling ``` import os import sys import pandas as pd import numpy as np import random import pickle import matplotlib.pyplot as plt root_path = os.path.dirname(os.path.abspath(os.getcwd())) sys.path.append(root_path) from sklearn.svm import SVC from sklearn.svm import LinearSVC from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from src import phase_classification as pc data_path = os.path.join(root_path,'data') tables_path = os.path.join(data_path,'tables') results_path = os.path.join(root_path,'results') output_path =os.path.join(results_path,'tables') import importlib importlib.reload(pc) WITH_STEMMING = True #REMOVE_STOPWORDS = True SEED = 10 NUM_TOPICS = 60 random.seed(SEED) t = 0 CLASS_W = False test_i = '[test1]' file_name = test_i+'IBL_topic_distribution_by_utterance_before_after_{}_{}.xlsx'.format(WITH_STEMMING,NUM_TOPICS) df_data = pd.read_excel(os.path.join(tables_path,'test','before_after',file_name)) the_keys = list(set(df_data['phase'])) total_samples = 0 class_samples = {} for key in the_keys: n = list(df_data.phase.values).count(key) #print("key {}, total {}".format(key,n)) total_samples += n class_samples[key] = n print(total_samples) for key in the_keys: print("key {}, samples: {}, prop: {}".format(key,class_samples[key],round(class_samples[key]*1.0/total_samples,2))) filter_rows = list(range(180))+[187,188] row_label = 180 df_data.head(2) dfs_all,_ = pc.split_df_discussions(df_data,.0,SEED) X_all,y_all_1 = pc.get_joined_data_from_df(dfs_all,filter_rows,row_label) CLASS_W name_classifier = 'classifier_svm_linear_combination_svc_ba_cw_{}.pickle'.format(CLASS_W) with open(os.path.join(data_path,name_classifier),'rb') as f: svc = pickle.load(f) coeff = pickle.load(f) t = pickle.load(f) #t = 0.59 output_first_layer_1 = pc.first_layer_classifier(X_all,t,svc) comparison = list(zip(output_first_layer_1,y_all_1)) df_data['first_layer'] = output_first_layer_1 second_layer_1 = pc.second_layer_combination_test(X_all,coeff,svc) second_layer_1.count(-1) df_data['second_layer'] = second_layer_1 df_data.to_excel(os.path.join(output_path,'[second_layer]'+file_name)) labels = ["Phase {}".format(i) for i in range(1,6)] df = pd.DataFrame(confusion_matrix(y_all_1, second_layer_1),columns=["Predicted {}".format(i) for i in labels]) df.index = labels print(classification_report(y_all_1, second_layer_1)) df print('Accuracy of SVM classifier on training set: {:.2f}' .format(svc.score(X_all, y_all_1))) ``` ### Test 2 ``` test_i = '[test2]' file_name = test_i+'IBL_topic_distribution_by_utterance_before_after_{}_{}.xlsx'.format(WITH_STEMMING,NUM_TOPICS) df_data = pd.read_excel(os.path.join(tables_path,'test','before_after',file_name)) the_keys = list(set(df_data['phase'])) total_samples = 0 class_samples = {} for key in the_keys: n = list(df_data.phase.values).count(key) #print("key {}, total {}".format(key,n)) total_samples += n class_samples[key] = n print(total_samples) for key in the_keys: print("key {}, samples: {}, prop: {}".format(key,class_samples[key],round(class_samples[key]*1.0/total_samples,2))) dfs_all,_ = pc.split_df_discussions(df_data,.0,SEED) X_all,y_all_2 = pc.get_joined_data_from_df(dfs_all,filter_rows,row_label) output_first_layer_2 = pc.first_layer_classifier(X_all,t,name_classifier) comparison = list(zip(output_first_layer_2,y_all_2)) df_data['first_layer'] = output_first_layer_2 second_layer_2 = pc.second_layer_combination_test(X_all,coeff,svc) df_data['second_layer'] = second_layer_2 df_data.to_excel(os.path.join(output_path,'[second_layer]'+file_name)) second_layer_2.count(-1) labels = ["Phase {}".format(i) for i in range(1,6)] df = pd.DataFrame(confusion_matrix(y_all_2, second_layer_2),columns=["Predicted {}".format(i) for i in labels]) df.index = labels print(classification_report(y_all_2, second_layer_2)) df print('Accuracy of SVM classifier on training set: {:.2f}' .format(svc.score(X_all, y_all_2))) y_all = y_all_1+y_all_2 pred = second_layer_1 + second_layer_2 df = pd.DataFrame(confusion_matrix(y_all, pred),columns=["Predicted {}".format(i) for i in labels]) df.index = labels print(classification_report(y_all, pred)) df print("Accuracy {0:.3f}".format(np.sum(confusion_matrix(y_all, pred).diagonal())/len(y_all))) bs = [pc.unit_vector(x) for x in y_all] y_pred = [pc.unit_vector(x) for x in pred] np.sqrt(np.sum([np.square(y_pred[i]-bs[i]) for i in range(len(y_all))])/(len(y_all)*2)) ```
github_jupyter
``` import torch import torchvision import matplotlib.pyplot as plt import numpy as np ``` # Classifying Digits with K-Nearest-Neighbors (KNN) This is a very simple implementation of classifying images using the k-nearest-neighbors algorithm. The accuracy is pretty good for how simple the algorithm is. The parameters can be tinkered with but at the time of writing I am using k = 5, training data size = 10000, testing data size = 1000. Let's set these parameters, read in the data, then view one of the images and the label associated with it. Afterwards I'll explain the algorithm. ``` k = 5 batch_size_train = 10000 batch_size_test = 1000 train_mnist = torchvision.datasets.MNIST('C:/projects/summer2020/vision/digits/', train=True, download=True, transform=torchvision.transforms.ToTensor()) train_loader = torch.utils.data.DataLoader(train_mnist, batch_size=batch_size_train, shuffle=True) test_mnist = torchvision.datasets.MNIST('C:/projects/summer2020/vision/digits/', train=False, download=True, transform=torchvision.transforms.ToTensor()) test_loader = torch.utils.data.DataLoader(test_mnist,batch_size=batch_size_test, shuffle=True) train_set = enumerate(train_loader) _, (train_imgs, train_targets) = next(train_set) test_set = enumerate(test_loader) _, (test_imgs, test_targets) = next(test_set) plt.imshow(train_imgs[0][0], cmap='gray', interpolation='none') plt.title("Ground Truth: {}".format(train_targets[0])) plt.xticks([]) plt.yticks([]) ``` The k-nearest-neighbors algorithm is not very efficient and my implementation is even less efficient. I was aiming for simplicity over efficiency. We loop through each test image and find the distance to every training image. Distance is measured as Euclidean (p=2). We take the k nearest images and record the ground truth digit corresponding with the image. The predicted label is based on the majority of labels from k nearest images. The majority I chose to use is the median. It is very basic in that it is the central value/label. The effectiveness of this method of majority depends on our value of k. We compare the prediction with the ground truth of the test set which produces our prediction accuracy. ``` n_test = test_imgs.shape[0] n_train = train_imgs.shape[0] pred_test_targets = torch.zeros_like(test_targets) for i in range(n_test): test_img = test_imgs[i] distances = [torch.dist(test_img, train_imgs[j], p=2) for j in range(n_train)] nearest_indices = np.array(distances).argsort()[:5] pred_test_targets[i] = train_targets[nearest_indices].median() accuracy = np.divide(sum(pred_test_targets == test_targets), len(test_targets)) print('Prediction accuracy: {}'.format(accuracy)) ```
github_jupyter
# Distributed data parallel BERT training with TensorFlow2 and SMDataParallel HSMDataParallel is a new capability in Amazon SageMaker to train deep learning models faster and cheaper. SMDataParallel is a distributed data parallel training framework for TensorFlow, PyTorch, and MXNet. This notebook example shows how to use SMDataParallel with TensorFlow(version 2.3.1) on [Amazon SageMaker](https://aws.amazon.com/sagemaker/) to train a BERT model using [Amazon FSx for Lustre file-system](https://aws.amazon.com/fsx/lustre/) as data source. The outline of steps is as follows: 1. Stage dataset in [Amazon S3](https://aws.amazon.com/s3/). Original dataset for BERT pretraining consists of text passages from BooksCorpus (800M words) (Zhu et al. 2015) and English Wikipedia (2,500M words). Please follow original guidelines by NVidia to prepare training data in hdf5 format - https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/README.md#getting-the-data 2. Create Amazon FSx Lustre file-system and import data into the file-system from S3 3. Build Docker training image and push it to [Amazon ECR](https://aws.amazon.com/ecr/) 4. Configure data input channels for SageMaker 5. Configure hyper-prarameters 6. Define training metrics 7. Define training job, set distribution strategy to SMDataParallel and start training **NOTE:** With large traning dataset, we recommend using (Amazon FSx)[https://aws.amazon.com/fsx/] as the input filesystem for the SageMaker training job. FSx file input to SageMaker significantly cuts down training start up time on SageMaker because it avoids downloading the training data each time you start the training job (as done with S3 input for SageMaker training job) and provides good data read throughput. **NOTE:** This example requires SageMaker Python SDK v2.X. ## Amazon SageMaker Initialization Initialize the notebook instance. Get the aws region, sagemaker execution role. The IAM role arn used to give training and hosting access to your data. See the [Amazon SageMaker Roles](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the sagemaker.get_execution_role() with the appropriate full IAM role arn string(s). As described above, since we will be using FSx, please make sure to attach `FSx Access` permission to this IAM role. ``` %%time ! python3 -m pip install --upgrade sagemaker import sagemaker from sagemaker import get_execution_role from sagemaker.estimator import Estimator import boto3 sagemaker_session = sagemaker.Session() bucket = sagemaker_session.default_bucket() role = get_execution_role() # provide a pre-existing role ARN as an alternative to creating a new role print(f'SageMaker Execution Role:{role}') client = boto3.client('sts') account = client.get_caller_identity()['Account'] print(f'AWS account:{account}') session = boto3.session.Session() region = session.region_name print(f'AWS region:{region}') ``` ## Prepare SageMaker Training Images 1. SageMaker by default use the latest [Amazon Deep Learning Container Images (DLC)](https://github.com/aws/deep-learning-containers/blob/master/available_images.md) TensorFlow training image. In this step, we use it as a base image and install additional dependencies required for training BERT model. 2. In the Github repository https://github.com/HerringForks/DeepLearningExamples.git we have made TensorFlow2-SMDataParallel BERT training script available for your use. This repository will be cloned in the training image for running the model training. ### Build and Push Docker Image to ECR Run the below command build the docker image and push it to ECR. ``` image = "tf2-smdataparallel-bert-sagemaker" # Example: tf2-smdataparallel-bert-sagemaker tag = "latest" # Example: latest !pygmentize ./Dockerfile !pygmentize ./build_and_push.sh %%time ! chmod +x build_and_push.sh; bash build_and_push.sh {region} {image} {tag} ``` ## Preparing FSx Input for SageMaker 1. Download and prepare your training dataset on S3. 2. Follow the steps listed here to create a FSx linked with your S3 bucket with training data - https://docs.aws.amazon.com/fsx/latest/LustreGuide/create-fs-linked-data-repo.html. Make sure to add an endpoint to your VPC allowing S3 access. 3. Follow the steps listed here to configure your SageMaker training job to use FSx https://aws.amazon.com/blogs/machine-learning/speed-up-training-on-amazon-sagemaker-using-amazon-efs-or-amazon-fsx-for-lustre-file-systems/ ### Important Caveats 1. You need use the same `subnet` and `vpc` and `security group` used with FSx when launching the SageMaker notebook instance. The same configurations will be used by your SageMaker training job. 2. Make sure you set appropriate inbound/output rules in the `security group`. Specically, opening up these ports is necessary for SageMaker to access the FSx filesystem in the training job. https://docs.aws.amazon.com/fsx/latest/LustreGuide/limit-access-security-groups.html 3. Make sure `SageMaker IAM Role` used to launch this SageMaker training job has access to `AmazonFSx`. ## SageMaker TensorFlow Estimator function options In the following code block, you can update the estimator function to use a different instance type, instance count, and distrubtion strategy. You're also passing in the training script you reviewed in the previous cell. **Instance types** SMDataParallel supports model training on SageMaker with the following instance types only: 1. ml.p3.16xlarge 1. ml.p3dn.24xlarge [Recommended] 1. ml.p4d.24xlarge [Recommended] **Instance count** To get the best performance and the most out of SMDataParallel, you should use at least 2 instances, but you can also use 1 for testing this example. **Distribution strategy** Note that to use DDP mode, you update the the `distribution` strategy, and set it to use `smdistributed dataparallel`. ### Training script In the Github repository https://github.com/HerringForks/deep-learning-models.git we have made reference TensorFlow-SMDataParallel BERT training script available for your use. Clone the repository. ``` # Clone herring forks repository for reference implementation BERT with TensorFlow2-SMDataParallel !rm -rf deep-learning-models !git clone --recursive https://github.com/HerringForks/deep-learning-models.git import boto3 import sagemaker sm = boto3.client('sagemaker') notebook_instance_name = sm.list_notebook_instances()['NotebookInstances'][3]['NotebookInstanceName'] print(notebook_instance_name) if notebook_instance_name != 'dsoaws': print('****** ERROR: MUST FIND THE CORRECT NOTEBOOK ******') exit() notebook_instance = sm.describe_notebook_instance(NotebookInstanceName=notebook_instance_name) notebook_instance security_group_id = notebook_instance['SecurityGroups'][0] print(security_group_id) subnet_id = notebook_instance['SubnetId'] print(subnet_id) from sagemaker.tensorflow import TensorFlow print(account) print(region) print(image) print(tag) instance_type = "ml.p3dn.24xlarge" # Other supported instance type: ml.p3.16xlarge, ml.p4d.24xlarge instance_count = 2 # You can use 2, 4, 8 etc. docker_image = f"{account}.dkr.ecr.{region}.amazonaws.com/{image}:{tag}" # YOUR_ECR_IMAGE_BUILT_WITH_ABOVE_DOCKER_FILE username = 'AWS' subnets = [subnet_id] # Should be same as Subnet used for FSx. Example: subnet-0f9XXXX security_group_ids = [security_group_id] # Should be same as Security group used for FSx. sg-03ZZZZZZ job_name = 'smdataparallel-bert-tf2-fsx-2p3dn' # This job name is used as prefix to the sagemaker training job. Makes it easy for your look for your training job in SageMaker Training job console. # TODO: Copy data to FSx/S3 !pip install datasets # For loading datasets from datasets import list_datasets, load_dataset # To see all available dataset names print(list_datasets()) # To load a dataset wiki = load_dataset("wikipedia", "20200501.en", split='train') file_system_id = '<FSX_ID>' # FSx file system ID with your training dataset. Example: 'fs-0bYYYYYY' SM_DATA_ROOT = '/opt/ml/input/data/train' hyperparameters={ "train_dir": '/'.join([SM_DATA_ROOT, 'tfrecords/train/max_seq_len_128_max_predictions_per_seq_20_masked_lm_prob_15']), "val_dir": '/'.join([SM_DATA_ROOT, 'tfrecords/validation/max_seq_len_128_max_predictions_per_seq_20_masked_lm_prob_15']), "log_dir": '/'.join([SM_DATA_ROOT, 'checkpoints/bert/logs']), "checkpoint_dir": '/'.join([SM_DATA_ROOT, 'checkpoints/bert']), "load_from": "scratch", "model_type": "bert", "model_size": "large", "per_gpu_batch_size": 64, "max_seq_length": 128, "max_predictions_per_seq": 20, "optimizer": "lamb", "learning_rate": 0.005, "end_learning_rate": 0.0003, "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "gradient_accumulation_steps": 1, "learning_rate_decay_power": 0.5, "warmup_steps": 2812, "total_steps": 2000, "log_frequency": 10, "run_name" : job_name, "squad_frequency": 0 } estimator = TensorFlow(entry_point='albert/run_pretraining.py', role=role, image_uri=docker_image, source_dir='deep-learning-models/models/nlp', framework_version='2.3.1', py_version='py3', instance_count=instance_count, instance_type=instance_type, sagemaker_session=sagemaker_session, subnets=subnets, hyperparameters=hyperparameters, security_group_ids=security_group_ids, debugger_hook_config=False, # Training using SMDataParallel Distributed Training Framework distribution={'smdistributed':{ 'dataparallel':{ 'enabled': True } } } ) ``` # Configure FSx Input for the SageMaker Training Job ``` from sagemaker.inputs import FileSystemInput #YOUR_MOUNT_PATH_FOR_TRAINING_DATA # NOTE: '/fsx/' will be the root mount path. Example: '/fsx/albert'''' file_system_directory_path='/fsx/' file_system_access_mode='rw' file_system_type='FSxLustre' train_fs = FileSystemInput(file_system_id=file_system_id, file_system_type=file_system_type, directory_path=file_system_directory_path, file_system_access_mode=file_system_access_mode) data_channels = {'train': train_fs} # Submit SageMaker training job estimator.fit(inputs=data_channels, job_name=job_name) ```
github_jupyter
# Continuous Control --- In this notebook I will implement the distributed disttributional deep determenstic policy gradients (D4PG) algorithm. #### different algorithm can also be used to solve this problem such as : ''' 1 - Deep determenstic policy gradients (DDPG) 2 - Proximal policy optimization (PPO) 3 - Asynchronous Advantage Actor-Critic (A3C) 4 - Trust Region Policy Optimization (TRPO) and many more ''' # 1. Start the Environment The environments corresponding to both versions of the environment are already saved in the work dirictory and can be accessed at the file paths provided below. Please select one of the two options below for loading the environment. Note: This implementation applies to the second option, where the environment consists of 20 agents. ``` import random import time import torch import torch.nn as nn import numpy as np from collections import deque import matplotlib.pyplot as plt %matplotlib inline device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") from unityagents import UnityEnvironment from d4pg_agent import Agent # select this option to load version 1 (with a single agent) of the environment #env = UnityEnvironment(file_name='Reacher_v1_Windows_x86_64/Reacher.exe') # select this option to load version 2 (with 20 agents) of the environment env = UnityEnvironment(file_name='Reacher_v2_Windows_x86_64/Reacher.exe') ``` Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python. ``` # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] ``` # 2. Examine the State and Action Spaces Run the code cell below to print some information about the environment. ``` # reset the environment env_info = env.reset(train_mode=True)[brain_name] # number of agents num_agents = len(env_info.agents) print('Number of agents:', num_agents) # size of each action action_size = brain.vector_action_space_size print('Size of each action:', action_size) # examine the state space states = env_info.vector_observations state_size = states.shape[1] print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size)) print('The state for the first agent looks like:', states[0]) ``` # 3. Take Random Actions in the Environment In the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment. Note that **in this coding environment, you will not be able to watch the agents while they are training**, and you should set `train_mode=True` to restart the environment. ``` for i in range(10): env_info = env.reset(train_mode=True)[brain_name] # reset the environment states = env_info.vector_observations # get the current state (for each agent) scores = np.zeros(num_agents) # initialize the score (for each agent) while True: actions = np.random.randn(num_agents, action_size) # select an action (for each agent) actions = np.clip(actions, -1, 1) # all actions between -1 and 1 env_info = env.step(actions)[brain_name] # send all actions to tne environment next_states = env_info.vector_observations # get next state (for each agent) rewards = env_info.rewards # get reward (for each agent) dones = env_info.local_done # see if episode finished scores += env_info.rewards # update the score (for each agent) states = next_states # roll over states to next time step if np.any(dones): # exit loop if episode finished break print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores))) ``` # 4. Implementation Now i will implement the D4PG Algorithm ``` agent = Agent(state_size=state_size, action_size=action_size,num_agents=num_agents, seed=7) # Training the agent over a number of episodes until we reach the desired average reward, which is > 30 def d4pg(n_episodes=2000): scores = [] scores_deque = deque(maxlen=100) rolling_average_score = [] for i_episode in range(1, n_episodes+1): env_info = env.reset(train_mode=True)[brain_name] # reset the environment state = env_info.vector_observations # get the current state (for each agent) agent.reset() score = np.zeros(num_agents) for timestep in range(1000): action = agent.act(state) env_info = env.step(action)[brain_name] # send all actions to the environment next_state = env_info.vector_observations # get next state (for each agent) reward = env_info.rewards # get reward (for each agent) done = env_info.local_done # to see if episode finished score += reward agent.step(state, action, reward, next_state, done) state = next_state if np.any(done): # see if any episode finished break score = np.mean(score) scores_deque.append(score) scores.append(score) rolling_average_score.append(np.mean(scores_deque)) print('\rEpisode {}\tAverage Score: {:.2f}\tScore: {:.2f}'.format(i_episode, np.mean(scores_deque), score), end="") if i_episode % 10 == 0: print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque))) if 30 < current_score and 99 < len(scores_deque): print('Target average reward achieved!') torch.save(agent.actor_local.state_dict(), 'checkpoint_actor_local.pth') # save local actor torch.save(agent.critic_local.state_dict(), 'checkpoint_critic_local.pth') # save local critic break return scores, rolling_average_score scores, rolling_average_score = ddpg() fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(1, len(scores)+1), scores) plt.plot(np.arange(1, len(rolling_average_score)+1), rolling_average_score) plt.ylabel('Score') plt.xlabel('Episode #') plt.show() # Here you can test the performance of the agents # load the actor critic models device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") agent.actor_local.load_state_dict(torch.load("checkpoints/checkpoint_actor_local.pth", map_location=device)) agent.critic_local.load_state_dict(torch.load("checkpoints/checkpoint_critic_local.pth", map_location=device)) for i in range(10): env_info = env.reset(train_mode=True)[brain_name] # reset the environment states = env_info.vector_observations # get the current state (for each agent) scores = np.zeros(num_agents) # initialize the score (for each agent) while True: actions = agent.act(states) env_info = env.step(actions)[brain_name] # send all actions to tne environment next_states = env_info.vector_observations # get next state (for each agent) rewards = env_info.rewards # get reward (for each agent) dones = env_info.local_done # see if episode finished scores += env_info.rewards # update the score (for each agent) states = next_states # roll over states to next time step if np.any(dones): # exit loop if episode finished break print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores))) # close the enviroment env.close() ``` ### As you can see, the average reward is reached after 294 episodes and there are several approaches to improve the results and train the agents much faster: 1- Prioritized experience replay 2- Modification of the hyperparameters which play a crucial role 3- Use of another algorithm such as A3C.
github_jupyter
# NewEgg.Com WebScraping Program For Laptops - Beta v1.0 ### - April 2020 --- ``` # Import dependencies. import os import re import time import glob import random import datetime import requests import pandas as pd from re import search from splinter import Browser from playsound import playsound from bs4 import BeautifulSoup as soup ``` ## Functions & Classes Setup --- ``` # Build a function to return date throughout the program. def return_dt(): global current_date current_date = str(datetime.datetime.now()).replace(':','.').replace(' ','_')[:-7] return current_date """ NewEgg WebScraper function that scrapes data, saves it into a csv file, and creates Laptop objects. """ def newegg_page_scraper(containers, turn_page): page_nums = [] general_category = [] product_categories = [] images = [] product_brands = [] product_models = [] product_links = [] item_numbers = [] promotions = [] prices = [] shipping_terms = [] # Put this to avoid error that was being generated global gen_category """ Loop through all the containers on the HTML, and scrap the following content into the following lists """ for con in containers: try: page_counter = turn_page page_nums.append(int(turn_page)) gen_category = target_page_soup.find_all('div', class_="nav-x-body-top-bar fix")[0].text.split('\n')[5] general_category.append(gen_category) prod_category = target_page_soup.find_all('h1', class_="page-title-text")[0].text product_categories.append(prod_category) image = con.a.img["src"] images.append(image) prd_title = con.find_all('a', class_="item-title")[0].text product_models.append(prd_title) product_link = con.find_all('a', class_="item-title")[0]['href'] product_links.append(product_link) shipping = con.find_all('li', class_='price-ship')[0].text.strip().split()[0] if shipping != "Free": shipping = shipping.replace('$', '') shipping_terms.append(shipping) else: shipping = 0.00 shipping_terms.append(shipping) brand_name = con.find_all('a', class_="item-brand")[0].img["title"] product_brands.append(brand_name) except (IndexError, ValueError) as e: # If there are no item_brand container, take the Brand from product details. product_brands.append(con.find_all('a', class_="item-title")[0].text.split()[0]) try: current_promo = con.find_all("p", class_="item-promo")[0].text promotions.append(current_promo) except: promotions.append('null') try: price = con.find_all('li', class_="price-current")[0].text.split()[0].replace('$','').replace(',', '') prices.append(price) except: price = 'null / out of stock' prices.append(price) try: item_num = con.find_all('a', class_="item-title")[0]['href'].split('p/')[1].split('?')[0] item_numbers.append(item_num) except (IndexError) as e: item_num = con.find_all('a', class_="item-title")[0]['href'].split('p/')[1] item_numbers.append(item_num) # Convert all of the lists into a dataframe df = pd.DataFrame({ 'item_number': item_numbers, 'general_category': general_category, 'product_category': product_categories, 'brand': product_brands, 'model_specifications': product_models, 'price': prices, 'current_promotions': promotions, 'shipping': shipping_terms, 'page_number': page_nums, 'product_links': product_links, 'image_link': images }) # Rearrange the dataframe columns into the following order. df = df[['item_number', 'general_category','product_category', 'page_number' ,'brand','model_specifications' ,'current_promotions' ,'price' ,'shipping' ,'product_links','image_link']] # Convert the dataframe into a dictionary. global scraped_dict scraped_dict = df.to_dict('records') # Grab the subcategory "Laptop/Notebooks" and eliminate any special characters that may cause errors. global pdt_category pdt_category = df['product_category'].unique()[0] # Eliminate special characters in a string if it exists. pdt_category = ''.join(e for e in pdt_category if e.isalnum()) """ Count the number of items scraped by getting the length of a all the models for sale. This parameter is always available for each item-container in the HTML """ global items_scraped items_scraped = len(df['model_specifications']) """ Save the results into a csv file using Pandas """ df.to_csv(f'./processing/{current_date}_{pdt_category}_{items_scraped}_scraped_page{turn_page}.csv') # Return these variables as they will be used. return scraped_dict, items_scraped, pdt_category # Function to return the total results pages. def results_pages(target_page_soup): # Use BeautifulSoup to extract the total results page number results_pages = target_page_soup.find_all('span', class_="list-tool-pagination-text")[0].text.strip() # Find and extract total pages + and add 1 to ensure proper length of total pages. global total_results_pages total_results_pages = int(re.split("/", results_pages)[1]) return total_results_pages """ Build a function to concatenate all pages that were scraped and saved in the processing folder. Save the final output (1 csv file) all the results """ def concatenate(total_results_pages): path = f'./processing\\' scraped_pages = glob.glob(path + "/*.csv") concatenate_pages = [] counter = 0 for page in scraped_pages: df = pd.read_csv(page, index_col=0, header=0) concatenate_pages.append(df) compiled_data = pd.concat(concatenate_pages, axis=0, ignore_index=True) total_items_scraped = len(compiled_data['brand']) concatenated_output = compiled_data.to_csv(f"./finished_outputs/{current_date}_{total_items_scraped}_scraped_{total_results_pages}_pages_.csv") return """ Built a function to clear out the entire processing files folder to avoid clutter. Or the user can keep the processing files (page by page) for their own analysis. """ def clean_processing_fldr(): path = f'./processing\\' scraped_pages = glob.glob(path + "/*.csv") if len(scraped_pages) < 1: print("There are no files in the folder to clear. \n") else: print(f"Clearing out a total of {len(scraped_pages)} scraped pages in the processing folder... \n") clear_processing_files = [] for page in scraped_pages: os.remove(page) print('Clearing of "Processing" folder complete. \n') return def random_a_tag_mouse_over3(): x = random.randint(6, 10) def rdm_slp_5_9(x): time.sleep(x) print(f"Mimic Humans - Sleeping for {x} seconds. ") return x working_try_atags = [] finally_atags = [] working_atags = [] not_working_atags = [] try_counter = 0 finally_counter = 0 time.sleep(1) # Mouse over to header of the page "Laptops" browser.find_by_tag("h1").mouse_over() number_of_a_tags = len(browser.find_by_tag("a")) # My observation has taught me that most of the actual laptop clickable links on the grid are in the <a> range 2000 to 2100. if number_of_a_tags > 1900: print(f"Found {number_of_a_tags} <a> tags when parsing html... ") random_90_percent_plug = (random.randint(90, 94)/100.00) start_a_tag = int(round((number_of_a_tags * random_90_percent_plug))) end_a_tag = int(round((number_of_a_tags * .96))) else: # After proving you're human, clickable <a>'s reduced 300, so adjusting mouse_over for that scenario print(f"Found {number_of_a_tags} <a> tags when parsing html... ") random_40_percent_plug = (random.randint(40, 44)/100.00) start_a_tag = int(round((number_of_a_tags * random_40_percent_plug))) end_a_tag = int(round((number_of_a_tags * .46))) step = random.randint(13, 23) for i in range(start_a_tag, end_a_tag, step): try: # try this as normal part of the program - SHORT rdm_slp_5_9(x) browser.find_by_tag("a")[i+2].mouse_over() time.sleep(3) except: # Execute this when there is an exception print("EXCEPTION raised during mouse over. Going to break loop and proceed with moving to the next page. \n") break else: # execute this only if no exceptions are raised working_try_atags.append(i+2) working_atags.append(i+2) try_counter += 1 print(f"<a> number = {i+2} | Current Attempts (Try Count): {try_counter} \n") return def g_recaptcha_check(): if browser.is_element_present_by_id('g-recaptcha') == True: for sound in range(0, 2): playsound('./sounds/user_alert.wav') print("recaptcha - Check Alert! \n") continue_scrape = input("Newegg system suspects you are a bot. \n Complete the recaptcha test to prove you're not a bot. After, enter in any key and press ENTER to continue the scrape. \n") print("Continuing with scrape... \n") return def are_you_human_backend(target_page_soup): if target_page_soup.find_all("title")[0].text == 'Are you a human?': playsound('./sounds/user_alert.wav') continue_scrape = input("Newegg notices you're a robot on the backend when requesting. REFRESH THE PAGE and you may have to perform a test to prove you're human. After you refresh, enter in any key, and press ENTER to continue the webscrape. \n") print("Now will automatically will refresh the page 2 times, and target new URL. \n") print("Refreshing three times in 12 seconds. Please wait... \n") for i in range(0, 2): browser.reload() time.sleep(2) browser.back() time.sleep(4) browser.forward() time.sleep(3) print("Targeting new url... ") # After user passes test, target the new url, and return updated target_page_soup target_url = browser.url response_target = requests.get(target_url) target_page_soup = soup(response_target.text, 'html.parser') print("#"* 60) print(target_page_soup) print("#"* 60) #target_page_soup break_pedal = input("Does the soup say 'are you human?' in the text?' Enter 'y' or 'n'. ") if break_pedal == 'y': # recursion are_you_human_backend(target_page_soup) else: #print("#"* 60) target_url = browser.url response_target = requests.get(target_url) target_page_soup = soup(response_target.text, 'html.parser') return target_page_soup else: print("Passed the 'Are you human?' check when requesting and parsing the html. Continuing with scrape ... \n") # Otherwise, return the target_page_soup that was passed in. return target_page_soup def random_xpath_top_bottom(): x = random.randint(3, 8) def rdm_slp_5_9(x): time.sleep(x) print(f"Slept for {x} seconds. \n") return x # Check if there are working links on the screen, otherwise alert the user. if (browser.is_element_present_by_tag('h1')) == True: print("(Check 1 - Random Xpath Top Bottom) Header is present and hoverable on page. \n") else: print("(Check 1 - ERROR - Random Xpath Top Bottom) Header is NOT present on page. \n") for s in range(0, 1): playsound('./sounds/user_alert.wav') red_light = input("Program could not detect a clickable links to hover over, and click. Please use your mouse to refresh the page, and enter 'y' to continue the scrape. \n") if (browser.is_element_present_by_tag("a")) == True: print("(Check 2- Random Xpath Top Bottom) <a> tags are present on page. Will begin mouse-over thru the page, and click a link. \n") else: # If there isn't, pause the program. Have user click somewhere on the screen. for s in range(0, 1): playsound('./sounds/user_alert.wav') red_light = input("Program could not detect a clickable links to hover over, and click. Please use your mouse to refresh the page, and enter 'y' to continue the scrape. \n") # There are clickable links, then 'flip the coin' to choose top or bottom button coin_toss_top_bottom = random.randint(0,1) next_page_button_results = [] # If the coin toss is even, mouse_over and click the top page link. if (coin_toss_top_bottom == 0): print('Heads - Clicking "Next Page" Top Button. \n') x = random.randint(3, 8) print(f"Mimic human behavior by randomly sleeping for {x}. \n") rdm_slp_5_9(x) browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button').mouse_over() time.sleep(1) browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button').click() next_page_button_results.append(coin_toss_top_bottom) print('Heads - SUCCESSFUL "Next Page" Top Button. \n') return else: next_page_button_results.append(coin_toss_top_bottom) # try: # after you add item to cart and go back back - this is the bottom next page link # /html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[3]/div[8]/div/div/div[11]/button # /html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[3]/div[6]/div/div/div[11]/button # /html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[3]/div[6]/div/div/div[11]/button # /html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[3]/div[6]/div/div/div[11]/button # /html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[3]/div[6]/div/div/div[11]/button try: print('Tails - Clicking "Next Page" Xpath Bottom Button. \n') x = random.randint(3, 8) print(f"Mimic human behavior by randomly sleeping for {x}. \n") rdm_slp_5_9(x) browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').mouse_over() time.sleep(4) browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').click() print('Tails - 1st Bottom Xpath - SUCCESSFUL "Next Page" Bottom Button. \n') except: print("EXCEPTION - 1st Bottom Xpath Failed. Sleep for 1 second then will try with 2nd Xpath bottom link. \n") try: time.sleep(4) browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[3]/div/div/div[11]/button').mouse_over() time.sleep(4) browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[3]/div/div/div[11]/button').click() print('(Exception Attempt) Tails - 2nd Bottom Xpath - SUCCESSFUL "Next Page" Bottom Button. \n') except: print("EXCEPTION - 2nd Bottom Xpath Failed. Trying with 3rd Xpath bottom link. \n") try: time.sleep(4) browser.find_by_xpath('/html/body/div[5]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').mouse_over() time.sleep(4) browser.find_by_xpath('/html/body/div[5]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').click() print('(Exception Attempt) Tails - 3rd Bottom Xpath - SUCCESSFUL "Next Page" Bottom Button. \n') except: print("3rd Bottom Link - Didn't work - INSPECT AND GRAB THE XPATH... \n") break_pedeal = input("Pause. Enter anything to continue... ") return """ This class takes in the dictionary from the webscraper function, and will be used in a list comprehension to produce class "objects" """ class Laptops: counter = 0 def __init__(self, **entries): self.__dict__.update(entries) Laptops.counter += 1 def count(self): print(f"Total Laptops scraped: {Laptops.counter}") """ Originally modeled out parent/child inheritance object structure. After careful research, I found it much easier to export the Pandas Dataframe of the results to a dictionary, and then into a class object, which I will elaborate more down below. """ # class Product_catalog: # all_prod_count = 0 # def __init__(self, general_category): # computer systems # self.general_category = general_category # Product_catalog.all_prod_count += 1 # def count_prod(self): # return int(self.all_prod_count) # #return '{}'.format(self.general_category) # Sub_category was later changed to Laptops due to the scope of this project. # class Sub_category(Product_catalog): # laptops/notebooks, gaming # sub_category_ct = 0 # def __init__(self, general_category, sub_categ, item_num, brand, price, img_link, prod_link, model_specifications, current_promotions): # super().__init__(general_category) # Sub_category.sub_category_ct += 1 # self.sub_categ = sub_categ # self.item_num = item_num # self.brand = brand # self.price = price # self.img_link = img_link # self.prod_link = prod_link # self.model_specifications = model_specifications # self.current_promotions = current_promotions ``` ## Main Program Logic --- ``` """ Welcome to the program message! """ print("=== NewEgg.Com Laptop WebScraper Beta v1.0 ===") print("=="*30) print('Scope: This project is a beta and is only built to scrape the laptop section of NewEgg.com due to limited time. \n') print("Instructions: \n") return_dt() print(f'Current Date And Time: {current_date} \n') print("(1) Go to www.newegg.com, go to the laptop section, select your requirements (e.g. brand, screensize, and specifications - SSD size, processor brand and etc...) ") print("(2) Copy and paste the url from your exact search when prompted ") print('(3) After the webscraping is successful, you will have an option to concatenate all of the pages you scraped together into one csv file') print('(4) Lastly, you will have an option to clear out the processing folder (data scraped by each page)') print('(5) If you have any issues or errors, "PRESS CTRL + C" to quit the program in the terminal ') print('(6) You may run the program in the background as the program will make an alert noise to flag when Newegg suspects there is a bot, and will pause the scrape until you finish proving you are human. ') print('(7) Disclaimer: Newegg may ban you for a 24 - 48 hours for webscraping their data, then you may resume. \n Also, please consider executing during the day, with tons of web traffic to their site in your respective area. \n') print('Happy Scraping!') # Set up Splinter requirements. executable_path = {'executable_path': './chromedriver.exe'} # Add an item to the cart first, then go to the user URL and scrape. # Ask user to input in the laptop query link they would like to scrape. url = input("Please copy and paste your laptop query that you want to webscrape, and press enter: \n") browser = Browser('chrome', **executable_path, headless=False, incognito=True) ######################## # Throw a headfake first. laptops_home_url = 'https://www.newegg.com/' browser.visit(laptops_home_url) # Load Time. time.sleep(4) #current_url = browser.url browser.find_by_xpath('/html/body/header/div[1]/div[3]/div[1]/form/div/div[1]/input').mouse_over() time.sleep(1) browser.find_by_xpath('/html/body/header/div[1]/div[3]/div[1]/form/div/div[1]/input').click() time.sleep(1) # Type in laptops intial_search = browser.find_by_xpath('/html/body/header/div[1]/div[3]/div[1]/form/div/div[1]/input').type('Lenovo Laptops intel', slowly=True) for k in intial_search: time.sleep(0.5) pass time.sleep(3) # Click the search button browser.find_by_xpath('/html/body/header/div[1]/div[3]/div[1]/form/div/div[3]/button').click() print("Sleeping for 5 seconds. \n") time.sleep(5) # try to click on the first workable link for i in range(2,4): try: browser.find_by_xpath(f'/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[3]/div[{i}]/div[1]/div[1]/a').mouse_over() time.sleep(1) browser.find_by_xpath(f'/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[3]/div[{i}]/div[1]/div[1]/a').click() except: print(f"i {i} - Exception occurred. Trying next link. ") time.sleep(5) browser.back() time.sleep(4) g_recaptcha_check() ##################### print("Sleeping for 5 seconds. \n") time.sleep(3) # Go to the user intended url browser.visit(url) time.sleep(3) g_recaptcha_check() current_url = browser.url # Allocating loading time. time.sleep(4) #current_url = browser.url response = requests.get(current_url) print(f"{response} \n") target_page_soup = soup(response.text, 'html.parser') are_you_human_backend(target_page_soup) # Run the results_pages function to gather the total pages to be scraped. results_pages(target_page_soup) """ This is the loop that performs the page by page scraping of data / results of the user's query. """ # List set up for where class Laptop objects will be stored. print("Beginning webscraping and activity log below... ") print("="*60) product_catalog = [] # "Stop" in range below is "total_results_pages+1" because we started at 1. for turn_page in range(1, total_results_pages+1): """ If "reCAPTCHA" pops up, pause the program using an input. This allows the user to continue to scrape after they're done completing the quiz by inputting any value. """ # Allocating loading time. time.sleep(4) g_recaptcha_check() print(f"Beginning mouse over activity... \n") # Set up "containers" to be passed into main scraping function. if turn_page == 1: containers = target_page_soup.find_all("div", class_="item-container") else: target_url = browser.url # Use Request.get() - throw the boomerang at the target, retrieve the info, & return back to requestor response_target = requests.get(target_url) # Use BeautifulSoup to read grab all the HTML using the lxml parser target_page_soup = soup(response_target.text, 'html.parser') # Pass in target_page_soup to scan on the background (usually 10 pages in) if the html has text "Are you human?" # If yes, the browser will refresh twice, and return a new target_page_soup that should have the scrapable items we want are_you_human_backend(target_page_soup) containers = target_page_soup.find_all("div", class_="item-container") print(f"Scraping Current Page: {turn_page} \n") # Execute webscraper function. Output is a csv file in the processing folder and dictionary. newegg_page_scraper(containers, turn_page) print("Creating laptop objects for this page... \n") # Create instances of class objects of the laptops/notebooks using a list comprehension. objects = [Laptops(**prod_obj) for prod_obj in scraped_dict] print(f"Finished creating Laptop objects for page {turn_page} ... \n") # Append all of the objects to the main product_catalog list (List of List of Objects). print(f"Adding {len(objects)} to laptop catalog... \n") product_catalog.append(objects) random_a_tag_mouse_over3() if turn_page == total_results_pages: print(f"Completed scraping {turn_page} / {total_results_pages} pages. \n ") # Exit the broswer once complete webscraping. browser.quit() else: try: y = random.randint(3, 5) print(f"Current Page: {turn_page}) | SLEEPING FOR {y} SECONDS THEN will click next page. \n") time.sleep(y) random_xpath_top_bottom() except: z = random.randint(3, 5) print(f" (EXCEPTION) Current Page: {turn_page}) | SLEEPING FOR {z} SECONDS - Will click next page, if applicable. \n") time.sleep(z) random_xpath_top_bottom() time.sleep(1) print("") print("="*60) print("") # Prompt the user if they would like to concatenate all of the pages into one csv file concat_y_n = input(f'All {total_results_pages} pages have been saved in the "processing" folder (1 page = csv files). Would you like for us concatenate all the files into one? Enter "y", if so. Otherwise, enter anykey to exit the program. \n') if concat_y_n == 'y': concatenate(total_results_pages) print(f'WebScraping Complete! All {total_results_pages} have been scraped and saved as {current_date}_{pdt_category}_scraped_{total_results_pages}_pages_.csv in the "finished_outputs" folder \n') # Prompt the user to if they would like to clear out processing folder function here - as delete everything to prevent clutter clear_processing_y_n = input(f'The "processing" folder has {total_results_pages} csv files of each page that was scraped. Would you like to clear the files? Enter "y", if so. Otherwise, enter anykey to exit the program. \n') if clear_processing_y_n == 'y': clean_processing_fldr() print('Thank you checking out my project, and hope you found this useful! \n') /html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button /html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button /html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button /html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button # 20 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600136700%20600165638%20601286795%20601346405%20600004341%20600004343&recaptcha=pass&LeftPriceRange=1000%201500 ## 22 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600136700%20600165638%20601286795%20601346405%20600004341%20600004343%20600440394%20601183480%20601307583&LeftPriceRange=1000%201500 # 35 https://www.newegg.com/p/pl?N=100006740%20600004804%20600136700%20600165638%20601286795%20601346405%20600004341%20600004343%20601183480%20601307583%20601286800%204814&LeftPriceRange=1000%201500 # 25 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600136700%20600165638%20601286795%20601346405%20600004341%20600004343%20601183480%20601307583%20601286800%204814%20601296065%20601296059%20601296066&LeftPriceRange=1000%201500 # 15 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600136700%20600165638%20601346405%20600004341%20600004343%20601183480%20601307583%20601286800%204814%20601296065%20601296059%20601296066&LeftPriceRange=1000%201500 # 26 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600136700%20600165638%20601346405%20600004341%20600004343%20601183480%20601307583%20601286800%204814%20601296065%20601296059%20601296066%20601286795%20600440394&LeftPriceRange=1000%201500 # 28 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600136700%20600165638%20601346405%20600004341%20600004343%20601183480%20601307583%20601286800%204814%20601296065%20601296059%20601296066%20601286795%20600440394%20600337010%20601107729%20601331008&LeftPriceRange=1000%201500 # 48 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600136700%20600165638%20600004343%20601183480%20601307583%204814%20601296065%20601296059%20601296066%20601286795%20600440394%20600004344&LeftPriceRange=1000%201500 # 29 https://www.newegg.com/p/pl?N=100006740%20600004804%20600136700%20600165638%20600004343%20601183480%20601307583%204814%20601296066%20601286795%20600440394%20600004344%20601286800&LeftPriceRange=1000%201500 # 33 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600136700%20600165638%20600004343%20601183480%20601307583%204814%20601296066%20601286795%20600440394%20600004344%20601286800%20600337010&LeftPriceRange=1000%201500 # 26 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600136700%20600165638%20601183480%20601307583%204814%20601296066%20601286795%20600440394%20600004344%20601286800%20600337010%20601107729%20601331008&LeftPriceRange=1000%201500 # 11 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600136700%20600165638%20601183480%204814%20601296066%20600440394%20600004344%20601286800%20600337010%20601107729%20601331008&LeftPriceRange=1000%201500 # 22 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600136700%20600165638%20601183480%204814%20601296066%20600440394%20600004344%20601286800%20600337010%20601107729%20601331008%204023%204022%204084 # 33 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600136700%20600165638%204814%20601296066%20600004344%204023%204022%204084 # 33 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600136700%20600165638%204814%20601296066%204023%204022%2050001186%2050010418%2050010772 # 24 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600165638%204814%20601296066%204023%204022%2050001186%2050010418%2050010772 # 15 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600165638%204814%20601296066%204022%2050001186%2050010418%2050010772 # 17 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600165638%204814%20601296066%204022%2050001186%2050010418%2050010772%2050001315%2050001312 # 18 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600165638%204814%20601296066%204022%2050001186%2050010418%2050010772%2050001315%2050001312%2050001146 # 19 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600165638%204814%20601296066%204022%2050001186%2050010418%2050010772%2050001315%2050001312%2050001146%2050001759%2050001149 # 25 pages https://www.newegg.com/p/pl?N=100006740%20600004804%20600165638%204814%20601296066%204022%2050001186%2050010418%2050010772%2050001315%2050001312%2050001146%2050001759%2050001149%2050001077%20600136700 /html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button /html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button /html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button /html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[3]/div/div/div[11]/button browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[3]/div/div/div[11]/button').click() browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[3]/div/div/div[11]/button').click() /html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button').click() target_page_soup.find_all("div", class_="item-container") browser.find_by_xpath('/html/body/div[5]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').click() # 32 pages # https://www.newegg.com/p/pl?N=100006740%20601346405%20601307583%20601107729%20600337010%20601313977%20601274231%20601331008%20600440394%20601183480%20600136700 # 29 pages # https://www.newegg.com/p/pl?N=100006740%20601346405%20601307583%20601107729%20600337010%20601313977%20601274231%20601331008%20600136700 # 18 pages # https://www.newegg.com/p/pl?N=100006740%20601346405%20601307583%20601107729%20601313977%20601274231%20601331008%20600136700 # 30 pages #https://www.newegg.com/p/pl?N=100006740%20601346405%20601307583%20601107729%20601274231%20601331008%20600136700%20601346404%20600337010 # 28 pages # https://www.newegg.com/p/pl?N=100006740%20601346405%20601307583%20601107729%20601274231%20601331008%20600136700%20600337010 # 21 Pages # https://www.newegg.com/p/pl?N=100006740%20601307583%20601107729%20601274231%20601331008%20600136700%20600337010 # 13 pages # https://www.newegg.com/p/pl?N=100006740%20601307583%20601107729%20601274231%20601331008%20600136700 # 23 pages # https://www.newegg.com/p/pl?N=100006740%20601307583%20601107729%20601274231%20600136700%20601313977%20600337010%20600440394 ```
github_jupyter
``` from IPython.display import display, HTML display(HTML(data=""" <style> div#notebook-container { width: 99%; } div#menubar-container { width: 99%; } div#maintoolbar-container { width: 99%; } </style> """)) import os import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from PIL import Image import matplotlib.pyplot as plt %matplotlib inline from democlassi import initialize_model, PretrainedMT, preprocess_fer, preprocess_utk # create model objects resnet_fer, _ = initialize_model("resnet", False, 7, "fer2013", False) resnet_rag = PretrainedMT("resnet", False, False) # load models weights fer_weight = torch.load("./ml_outputs/democlassi/FER_resnet_model_109_val_accuracy_0.6227361.pth", map_location=torch.device("cpu")) rag_weight = torch.load("./ml_outputs/democlassi/RAG_resnet_model_21_val_loss_4.275671.pth", map_location=torch.device("cpu")) resnet_fer.load_state_dict(fer_weight) resnet_rag.load_state_dict(rag_weight) print(f"Number of parameters for FER model : {sum(p.numel() for p in resnet_fer.parameters()):_}") print(f"Number of parameters for RAG model : {sum(p.numel() for p in resnet_rag.parameters()):_}") class FERModel(nn.Module): def __init__(self, model): super(FERModel, self).__init__() self.model = model self.eval() def forward(self, x): with torch.no_grad(): # x = preprocess_fer(x) x = torch.argmax(self.model(x)) return x class RAGModel(nn.Module): def __init__(self, model): super(RAGModel, self).__init__() self.model = model self.eval() def forward(self, x): with torch.no_grad(): # x = preprocess_utk(x) age, gender_pred, race_pred = self.model(x) gender, race = torch.argmax(gender_pred), torch.argmax(race_pred) return age[0][0], gender, race tmp_im = Image.open("./ml_outputs/democlassi/little-girl-tampon.jpg") tmp_im fer_model = FERModel(resnet_fer) # im = torch.tensor(np.array(tmp_im)) # print(fer_model(im)) rag_model = RAGModel(resnet_rag) # print(rag_model(im)) torch.onnx.export(fer_model, # model being run torch.randn((1, 3, 48, 48)), # model input (or a tuple for multiple inputs) "../../public/static/ml_models/fer.onnx", # where to save the model (can be a file or file-like object) export_params=True, # store the trained parameter weights inside the model file opset_version=9, # the ONNX version to export the model to do_constant_folding=True, # whether to execute constant folding for optimization input_names = ['input'], # the model's input names output_names = ['output'], # the model's output names dynamic_axes={'input' : {0 : 'sH', 1: "W"}, # variable lenght axes }) torch.onnx.export(rag_model, # model being run torch.randn((1, 3, 128, 128)), # model input (or a tuple for multiple inputs) "../../public/static/ml_models/rag.onnx", # where to save the model (can be a file or file-like object) export_params=True, # store the trained parameter weights inside the model file opset_version=9, # the ONNX version to export the model to do_constant_folding=True, # whether to execute constant folding for optimization input_names = ['input'], # the model's input names output_names = ['age', "gender", "race"], # the model's output names dynamic_axes={'input' : {0 : 'H', 1: "W"}, # variable lenght axes }) ```
github_jupyter
``` %load_ext autoreload %autoreload 2 import sklearn from sklearn import datasets iris = datasets.load_iris() iris iris.feature_names print(iris.data.shape, iris.data.dtype) iris.target iris.target_names import numpy as np from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset # All dataset is to train for simplicity dataset = NumpyTupleDataset(iris.data.astype(np.float32), iris.target.astype(np.int32)) train = dataset from chainer.functions import relu, dropout from chainer_chemistry.models.mlp import MLP from chainer_chemistry.models.prediction.classifier import Classifier from chainer.functions import dropout def activation_relu_dropout(h): return dropout(relu(h), ratio=0.5) out_dim = len(iris.target_names) predictor = MLP(out_dim=out_dim, hidden_dim=48, n_layers=2, activation=activation_relu_dropout) classifier = Classifier(predictor) from chainer import iterators from chainer import optimizers from chainer import training from chainer.training import extensions as E def fit(model, dataset, batchsize=16, epoch=10, out='results/tmp', device=-1): train_iter = iterators.SerialIterator(train, batchsize) optimizer = optimizers.Adam() optimizer.setup(model) updater = training.StandardUpdater( train_iter, optimizer, device=device) trainer = training.Trainer(updater, (epoch, 'epoch'), out=out) #trainer.extend(E.Evaluator(val_iter, classifier, # device=device, converter=concat_mols)) trainer.extend(E.LogReport(), trigger=(10, 'epoch')) trainer.extend(E.PrintReport([ 'epoch', 'main/loss', 'main/accuracy', 'validation/main/loss', 'validation/main/accuracy', 'elapsed_time'])) trainer.run() fit(classifier, train, batchsize=16, epoch=100) ``` ## Saliency visualization ``` from chainer_chemistry.saliency.calculator.gradient_calculator import GradientCalculator from chainer_chemistry.saliency.calculator.integrated_gradients_calculator import IntegratedGradientsCalculator from chainer_chemistry.link_hooks.variable_monitor_link_hook import VariableMonitorLinkHook # 1. instantiation gradient_calculator = GradientCalculator(classifier) #gradient_calculator = IntegratedGradientsCalculator(classifier, steps=3, from chainer_chemistry.saliency.calculator.calculator_utils import GaussianNoiseSampler # --- VanillaGrad --- M = 30 # 2. compute saliency_samples_vanilla = gradient_calculator.compute( train, M=1,) saliency_samples_smooth = gradient_calculator.compute( train, M=M, noise_sampler=GaussianNoiseSampler()) saliency_samples_bayes = gradient_calculator.compute( train, M=M, train=True) # 3. aggregate method = 'square' saliency_vanilla = gradient_calculator.aggregate( saliency_samples_vanilla, ch_axis=None, method=method) saliency_smooth = gradient_calculator.aggregate( saliency_samples_smooth, ch_axis=None, method=method) saliency_bayes = gradient_calculator.aggregate( saliency_samples_bayes, ch_axis=None, method=method) from chainer_chemistry.saliency.visualizer.table_visualizer import TableVisualizer from chainer_chemistry.saliency.visualizer.visualizer_utils import normalize_scaler visualizer = TableVisualizer() # Visualize saliency of `i`-th data i = 0 visualizer.visualize(saliency_vanilla[i], feature_names=iris.feature_names, scaler=normalize_scaler) ``` visualize saliency of all data --> this can be considered as "feature importance" ``` saliency_mean = np.mean(saliency_vanilla, axis=0) visualizer.visualize(saliency_mean, feature_names=iris.feature_names, num_visualize=-1, scaler=normalize_scaler) visualizer.visualize(saliency_mean, feature_names=iris.feature_names, num_visualize=-1, scaler=normalize_scaler, save_filepath='results/iris_vanilla_{}.png'.format(method)) saliency_mean = np.mean(saliency_smooth, axis=0) visualizer.visualize(saliency_mean, feature_names=iris.feature_names, num_visualize=-1, scaler=normalize_scaler) visualizer.visualize(saliency_mean, feature_names=iris.feature_names, num_visualize=-1, scaler=normalize_scaler, save_filepath='results/iris_smooth_{}.png'.format(method)) saliency_mean = np.mean(saliency_bayes, axis=0) visualizer.visualize(saliency_mean, feature_names=iris.feature_names, num_visualize=-1, scaler=normalize_scaler) visualizer.visualize(saliency_mean, feature_names=iris.feature_names, num_visualize=-1, scaler=normalize_scaler, save_filepath='results/iris_bayes_{}.png'.format(method)) ``` ## sklearn random forest feature importance Ref: - https://qiita.com/TomokIshii/items/290adc16e2ca5032ca07 - https://stackoverflow.com/questions/44101458/random-forest-feature-importance-chart-using-python ``` import numpy as np from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.ensemble import RandomForestClassifier iris = load_iris() X = iris.data y = iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=0) clf_rf = RandomForestClassifier() clf_rf.fit(X_train, y_train) y_pred = clf_rf.predict(X_test) accu = accuracy_score(y_test, y_pred) print('accuracy = {:>.4f}'.format(accu)) # Feature Importance fti = clf_rf.feature_importances_ print('Feature Importances:') for i, feat in enumerate(iris['feature_names']): print('\t{0:20s} : {1:>.6f}'.format(feat, fti[i])) import matplotlib.pyplot as plt features = iris['feature_names'] importances = clf_rf.feature_importances_ indices = np.argsort(importances) plt.title('Random forest feature importance') plt.barh(range(len(indices)), importances[indices], color='b', align='center') plt.yticks(range(len(indices)), [features[i] for i in indices]) plt.xlabel('Relative Importance') plt.show() ```
github_jupyter
<a href="https://colab.research.google.com/github/aks1981/ML/blob/master/P2S10.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Twin-Delayed DDPG Complete credit goes to this [awesome Deep Reinforcement Learning 2.0 Course on Udemy](https://www.udemy.com/course/deep-reinforcement-learning/) for the code. ## Installing the packages ``` !pip install pybullet ``` ## Importing the libraries ``` import os import time import random import numpy as np import matplotlib.pyplot as plt import pybullet_envs import gym import torch import torch.nn as nn import torch.nn.functional as F from gym import wrappers from torch.autograd import Variable from collections import deque ``` ## Step 1: We initialize the Experience Replay memory ``` class ReplayBuffer(object): def __init__(self, max_size=1e6): self.storage = [] self.max_size = max_size self.ptr = 0 def add(self, transition): if len(self.storage) == self.max_size: self.storage[int(self.ptr)] = transition self.ptr = (self.ptr + 1) % self.max_size else: self.storage.append(transition) def sample(self, batch_size): ind = np.random.randint(0, len(self.storage), size=batch_size) batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = [], [], [], [], [] for i in ind: state, next_state, action, reward, done = self.storage[i] batch_states.append(np.array(state, copy=False)) batch_next_states.append(np.array(next_state, copy=False)) batch_actions.append(np.array(action, copy=False)) batch_rewards.append(np.array(reward, copy=False)) batch_dones.append(np.array(done, copy=False)) return np.array(batch_states), np.array(batch_next_states), np.array(batch_actions), np.array(batch_rewards).reshape(-1, 1), np.array(batch_dones).reshape(-1, 1) ``` ## Step 2: We build one neural network for the Actor model and one neural network for the Actor target ``` class Actor(nn.Module): def __init__(self, state_dim, action_dim, max_action): super(Actor, self).__init__() self.layer_1 = nn.Linear(state_dim, 400) self.layer_2 = nn.Linear(400, 300) self.layer_3 = nn.Linear(300, action_dim) self.max_action = max_action def forward(self, x): x = F.relu(self.layer_1(x)) x = F.relu(self.layer_2(x)) x = self.max_action * torch.tanh(self.layer_3(x)) return x ``` ## Step 3: We build two neural networks for the two Critic models and two neural networks for the two Critic targets ``` class Critic(nn.Module): def __init__(self, state_dim, action_dim): super(Critic, self).__init__() # Defining the first Critic neural network self.layer_1 = nn.Linear(state_dim + action_dim, 400) self.layer_2 = nn.Linear(400, 300) self.layer_3 = nn.Linear(300, 1) # Defining the second Critic neural network self.layer_4 = nn.Linear(state_dim + action_dim, 400) self.layer_5 = nn.Linear(400, 300) self.layer_6 = nn.Linear(300, 1) def forward(self, x, u): xu = torch.cat([x, u], 1) # Forward-Propagation on the first Critic Neural Network x1 = F.relu(self.layer_1(xu)) x1 = F.relu(self.layer_2(x1)) x1 = self.layer_3(x1) # Forward-Propagation on the second Critic Neural Network x2 = F.relu(self.layer_4(xu)) x2 = F.relu(self.layer_5(x2)) x2 = self.layer_6(x2) return x1, x2 def Q1(self, x, u): xu = torch.cat([x, u], 1) x1 = F.relu(self.layer_1(xu)) x1 = F.relu(self.layer_2(x1)) x1 = self.layer_3(x1) return x1 ``` ## Steps 4 to 15: Training Process ``` # Selecting the device (CPU or GPU) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Building the whole Training Process into a class class TD3(object): def __init__(self, state_dim, action_dim, max_action): self.actor = Actor(state_dim, action_dim, max_action).to(device) self.actor_target = Actor(state_dim, action_dim, max_action).to(device) self.actor_target.load_state_dict(self.actor.state_dict()) self.actor_optimizer = torch.optim.Adam(self.actor.parameters()) self.critic = Critic(state_dim, action_dim).to(device) self.critic_target = Critic(state_dim, action_dim).to(device) self.critic_target.load_state_dict(self.critic.state_dict()) self.critic_optimizer = torch.optim.Adam(self.critic.parameters()) self.max_action = max_action def select_action(self, state): state = torch.Tensor(state.reshape(1, -1)).to(device) return self.actor(state).cpu().data.numpy().flatten() def train(self, replay_buffer, iterations, batch_size=100, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2): for it in range(iterations): # Step 4: We sample a batch of transitions (s, s’, a, r) from the memory batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = replay_buffer.sample(batch_size) state = torch.Tensor(batch_states).to(device) next_state = torch.Tensor(batch_next_states).to(device) action = torch.Tensor(batch_actions).to(device) reward = torch.Tensor(batch_rewards).to(device) done = torch.Tensor(batch_dones).to(device) # Step 5: From the next state s’, the Actor target plays the next action a’ next_action = self.actor_target(next_state) # Step 6: We add Gaussian noise to this next action a’ and we clamp it in a range of values supported by the environment noise = torch.Tensor(batch_actions).data.normal_(0, policy_noise).to(device) noise = noise.clamp(-noise_clip, noise_clip) next_action = (next_action + noise).clamp(-self.max_action, self.max_action) # Step 7: The two Critic targets take each the couple (s’, a’) as input and return two Q-values Qt1(s’,a’) and Qt2(s’,a’) as outputs target_Q1, target_Q2 = self.critic_target(next_state, next_action) # Step 8: We keep the minimum of these two Q-values: min(Qt1, Qt2) target_Q = torch.min(target_Q1, target_Q2) # Step 9: We get the final target of the two Critic models, which is: Qt = r + γ * min(Qt1, Qt2), where γ is the discount factor target_Q = reward + ((1 - done) * discount * target_Q).detach() # Step 10: The two Critic models take each the couple (s, a) as input and return two Q-values Q1(s,a) and Q2(s,a) as outputs current_Q1, current_Q2 = self.critic(state, action) # Step 11: We compute the loss coming from the two Critic models: Critic Loss = MSE_Loss(Q1(s,a), Qt) + MSE_Loss(Q2(s,a), Qt) critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q) # Step 12: We backpropagate this Critic loss and update the parameters of the two Critic models with a SGD optimizer self.critic_optimizer.zero_grad() critic_loss.backward() self.critic_optimizer.step() # Step 13: Once every two iterations, we update our Actor model by performing gradient ascent on the output of the first Critic model if it % policy_freq == 0: actor_loss = -self.critic.Q1(state, self.actor(state)).mean() self.actor_optimizer.zero_grad() actor_loss.backward() self.actor_optimizer.step() # Step 14: Still once every two iterations, we update the weights of the Actor target by polyak averaging for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) # Step 15: Still once every two iterations, we update the weights of the Critic target by polyak averaging for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) # Making a save method to save a trained model def save(self, filename, directory): torch.save(self.actor.state_dict(), '%s/%s_actor.pth' % (directory, filename)) torch.save(self.critic.state_dict(), '%s/%s_critic.pth' % (directory, filename)) # Making a load method to load a pre-trained model def load(self, filename, directory): self.actor.load_state_dict(torch.load('%s/%s_actor.pth' % (directory, filename))) self.critic.load_state_dict(torch.load('%s/%s_critic.pth' % (directory, filename))) ``` ## We make a function that evaluates the policy by calculating its average reward over 10 episodes ``` def evaluate_policy(policy, eval_episodes=10): avg_reward = 0. for _ in range(eval_episodes): obs = env.reset() done = False while not done: action = policy.select_action(np.array(obs)) obs, reward, done, _ = env.step(action) avg_reward += reward avg_reward /= eval_episodes print ("---------------------------------------") print ("Average Reward over the Evaluation Step: %f" % (avg_reward)) print ("---------------------------------------") return avg_reward ``` ## We set the parameters ``` env_name = "AntBulletEnv-v0" # Name of a environment (set it to any Continous environment you want) seed = 0 # Random seed number start_timesteps = 1e4 # Number of iterations/timesteps before which the model randomly chooses an action, and after which it starts to use the policy network eval_freq = 5e3 # How often the evaluation step is performed (after how many timesteps) max_timesteps = 5e5 # Total number of iterations/timesteps save_models = True # Boolean checker whether or not to save the pre-trained model expl_noise = 0.1 # Exploration noise - STD value of exploration Gaussian noise batch_size = 100 # Size of the batch discount = 0.99 # Discount factor gamma, used in the calculation of the total discounted reward tau = 0.005 # Target network update rate policy_noise = 0.2 # STD of Gaussian noise added to the actions for the exploration purposes noise_clip = 0.5 # Maximum value of the Gaussian noise added to the actions (policy) policy_freq = 2 # Number of iterations to wait before the policy network (Actor model) is updated ``` ## We create a file name for the two saved models: the Actor and Critic models ``` file_name = "%s_%s_%s" % ("TD3", env_name, str(seed)) print ("---------------------------------------") print ("Settings: %s" % (file_name)) print ("---------------------------------------") ``` ## We create a folder inside which will be saved the trained models ``` if not os.path.exists("./results"): os.makedirs("./results") if save_models and not os.path.exists("./pytorch_models"): os.makedirs("./pytorch_models") ``` ## We create the PyBullet environment ``` env = gym.make(env_name) ``` ## We set seeds and we get the necessary information on the states and actions in the chosen environment ``` env.seed(seed) torch.manual_seed(seed) np.random.seed(seed) state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] max_action = float(env.action_space.high[0]) ``` ## We create the policy network (the Actor model) ``` policy = TD3(state_dim, action_dim, max_action) ``` [link text](https://)## We create the Experience Replay memory ``` replay_buffer = ReplayBuffer() ``` ## We define a list where all the evaluation results over 10 episodes are stored ``` evaluations = [evaluate_policy(policy)] ``` ## We create a new folder directory in which the final results (videos of the agent) will be populated ``` def mkdir(base, name): path = os.path.join(base, name) if not os.path.exists(path): os.makedirs(path) return path work_dir = mkdir('exp', 'brs') monitor_dir = mkdir(work_dir, 'monitor') max_episode_steps = env._max_episode_steps save_env_vid = False if save_env_vid: env = wrappers.Monitor(env, monitor_dir, force = True) env.reset() ``` ## We initialize the variables ``` total_timesteps = 0 timesteps_since_eval = 0 episode_num = 0 done = True t0 = time.time() ``` ## Training ``` max_timesteps = 500000 # We start the main loop over 500,000 timesteps while total_timesteps < max_timesteps: # If the episode is done if done: # If we are not at the very beginning, we start the training process of the model if total_timesteps != 0: print("Total Timesteps: {} Episode Num: {} Reward: {}".format(total_timesteps, episode_num, episode_reward)) policy.train(replay_buffer, episode_timesteps, batch_size, discount, tau, policy_noise, noise_clip, policy_freq) # We evaluate the episode and we save the policy if timesteps_since_eval >= eval_freq: timesteps_since_eval %= eval_freq evaluations.append(evaluate_policy(policy)) policy.save(file_name, directory="./pytorch_models") np.save("./results/%s" % (file_name), evaluations) # When the training step is done, we reset the state of the environment obs = env.reset() # Set the Done to False done = False # Set rewards and episode timesteps to zero episode_reward = 0 episode_timesteps = 0 episode_num += 1 # Before 10000 timesteps, we play random actions if total_timesteps < start_timesteps: action = env.action_space.sample() else: # After 10000 timesteps, we switch to the model action = policy.select_action(np.array(obs)) # If the explore_noise parameter is not 0, we add noise to the action and we clip it if expl_noise != 0: action = (action + np.random.normal(0, expl_noise, size=env.action_space.shape[0])).clip(env.action_space.low, env.action_space.high) # The agent performs the action in the environment, then reaches the next state and receives the reward new_obs, reward, done, _ = env.step(action) # We check if the episode is done done_bool = 0 if episode_timesteps + 1 == env._max_episode_steps else float(done) # We increase the total reward episode_reward += reward # We store the new transition into the Experience Replay memory (ReplayBuffer) replay_buffer.add((obs, new_obs, action, reward, done_bool)) # We update the state, the episode timestep, the total timesteps, and the timesteps since the evaluation of the policy obs = new_obs episode_timesteps += 1 total_timesteps += 1 timesteps_since_eval += 1 # We add the last policy evaluation to our list of evaluations and we save our model evaluations.append(evaluate_policy(policy)) if save_models: policy.save("%s" % (file_name), directory="./pytorch_models") np.save("./results/%s" % (file_name), evaluations) ``` ## Inference ``` class Actor(nn.Module): def __init__(self, state_dim, action_dim, max_action): super(Actor, self).__init__() self.layer_1 = nn.Linear(state_dim, 400) self.layer_2 = nn.Linear(400, 300) self.layer_3 = nn.Linear(300, action_dim) self.max_action = max_action def forward(self, x): x = F.relu(self.layer_1(x)) x = F.relu(self.layer_2(x)) x = self.max_action * torch.tanh(self.layer_3(x)) return x class Critic(nn.Module): def __init__(self, state_dim, action_dim): super(Critic, self).__init__() # Defining the first Critic neural network self.layer_1 = nn.Linear(state_dim + action_dim, 400) self.layer_2 = nn.Linear(400, 300) self.layer_3 = nn.Linear(300, 1) # Defining the second Critic neural network self.layer_4 = nn.Linear(state_dim + action_dim, 400) self.layer_5 = nn.Linear(400, 300) self.layer_6 = nn.Linear(300, 1) def forward(self, x, u): xu = torch.cat([x, u], 1) # Forward-Propagation on the first Critic Neural Network x1 = F.relu(self.layer_1(xu)) x1 = F.relu(self.layer_2(x1)) x1 = self.layer_3(x1) # Forward-Propagation on the second Critic Neural Network x2 = F.relu(self.layer_4(xu)) x2 = F.relu(self.layer_5(x2)) x2 = self.layer_6(x2) return x1, x2 def Q1(self, x, u): xu = torch.cat([x, u], 1) x1 = F.relu(self.layer_1(xu)) x1 = F.relu(self.layer_2(x1)) x1 = self.layer_3(x1) return x1 # Selecting the device (CPU or GPU) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Building the whole Training Process into a class class TD3(object): def __init__(self, state_dim, action_dim, max_action): self.actor = Actor(state_dim, action_dim, max_action).to(device) self.actor_target = Actor(state_dim, action_dim, max_action).to(device) self.actor_target.load_state_dict(self.actor.state_dict()) self.actor_optimizer = torch.optim.Adam(self.actor.parameters()) self.critic = Critic(state_dim, action_dim).to(device) self.critic_target = Critic(state_dim, action_dim).to(device) self.critic_target.load_state_dict(self.critic.state_dict()) self.critic_optimizer = torch.optim.Adam(self.critic.parameters()) self.max_action = max_action def select_action(self, state): state = torch.Tensor(state.reshape(1, -1)).to(device) return self.actor(state).cpu().data.numpy().flatten() def train(self, replay_buffer, iterations, batch_size=100, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2): for it in range(iterations): # Step 4: We sample a batch of transitions (s, s’, a, r) from the memory batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = replay_buffer.sample(batch_size) state = torch.Tensor(batch_states).to(device) next_state = torch.Tensor(batch_next_states).to(device) action = torch.Tensor(batch_actions).to(device) reward = torch.Tensor(batch_rewards).to(device) done = torch.Tensor(batch_dones).to(device) # Step 5: From the next state s’, the Actor target plays the next action a’ next_action = self.actor_target(next_state) # Step 6: We add Gaussian noise to this next action a’ and we clamp it in a range of values supported by the environment noise = torch.Tensor(batch_actions).data.normal_(0, policy_noise).to(device) noise = noise.clamp(-noise_clip, noise_clip) next_action = (next_action + noise).clamp(-self.max_action, self.max_action) # Step 7: The two Critic targets take each the couple (s’, a’) as input and return two Q-values Qt1(s’,a’) and Qt2(s’,a’) as outputs target_Q1, target_Q2 = self.critic_target(next_state, next_action) # Step 8: We keep the minimum of these two Q-values: min(Qt1, Qt2) target_Q = torch.min(target_Q1, target_Q2) # Step 9: We get the final target of the two Critic models, which is: Qt = r + γ * min(Qt1, Qt2), where γ is the discount factor target_Q = reward + ((1 - done) * discount * target_Q).detach() # Step 10: The two Critic models take each the couple (s, a) as input and return two Q-values Q1(s,a) and Q2(s,a) as outputs current_Q1, current_Q2 = self.critic(state, action) # Step 11: We compute the loss coming from the two Critic models: Critic Loss = MSE_Loss(Q1(s,a), Qt) + MSE_Loss(Q2(s,a), Qt) critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q) # Step 12: We backpropagate this Critic loss and update the parameters of the two Critic models with a SGD optimizer self.critic_optimizer.zero_grad() critic_loss.backward() self.critic_optimizer.step() # Step 13: Once every two iterations, we update our Actor model by performing gradient ascent on the output of the first Critic model if it % policy_freq == 0: actor_loss = -self.critic.Q1(state, self.actor(state)).mean() self.actor_optimizer.zero_grad() actor_loss.backward() self.actor_optimizer.step() # Step 14: Still once every two iterations, we update the weights of the Actor target by polyak averaging for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) # Step 15: Still once every two iterations, we update the weights of the Critic target by polyak averaging for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) # Making a save method to save a trained model def save(self, filename, directory): torch.save(self.actor.state_dict(), '%s/%s_actor.pth' % (directory, filename)) torch.save(self.critic.state_dict(), '%s/%s_critic.pth' % (directory, filename)) # Making a load method to load a pre-trained model def load(self, filename, directory): self.actor.load_state_dict(torch.load('%s/%s_actor.pth' % (directory, filename))) self.critic.load_state_dict(torch.load('%s/%s_critic.pth' % (directory, filename))) def evaluate_policy(policy, eval_episodes=10): avg_reward = 0. for _ in range(eval_episodes): obs = env.reset() done = False while not done: action = policy.select_action(np.array(obs)) obs, reward, done, _ = env.step(action) avg_reward += reward avg_reward /= eval_episodes print ("---------------------------------------") print ("Average Reward over the Evaluation Step: %f" % (avg_reward)) print ("---------------------------------------") return avg_reward env_name = "AntBulletEnv-v0" seed = 0 file_name = "%s_%s_%s" % ("TD3", env_name, str(seed)) print ("---------------------------------------") print ("Settings: %s" % (file_name)) print ("---------------------------------------") eval_episodes = 10 save_env_vid = True env = gym.make(env_name) max_episode_steps = env._max_episode_steps if save_env_vid: env = wrappers.Monitor(env, monitor_dir, force = True) env.reset() env.seed(seed) torch.manual_seed(seed) np.random.seed(seed) state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] max_action = float(env.action_space.high[0]) policy = TD3(state_dim, action_dim, max_action) policy.load(file_name, './pytorch_models/') _ = evaluate_policy(policy, eval_episodes=eval_episodes) ```
github_jupyter
# Web predictions The purpose of this notebook is to experiment with making predictions from "raw" accumulated user values, that could for instance be user input from a web form. ``` import findspark findspark.init() findspark.find() import pyspark from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession conf = pyspark.SparkConf().setAppName('sparkify-capstone-web').setMaster('local') sc = pyspark.SparkContext(conf=conf) spark = SparkSession(sc) from pyspark.ml.classification import GBTClassifier from pyspark.ml.classification import GBTClassificationModel from pyspark.ml.feature import VectorAssembler transformedPath = "out/transformed.parquet" predictionsPath = "out/predictions.parquet" df_transformed = spark.read.parquet(transformedPath) df_predictions = spark.read.parquet(predictionsPath) model = GBTClassificationModel.load("out/model") zeros = df_predictions.filter(df_predictions["prediction"] == 0) ones = df_predictions.filter(df_predictions["prediction"] == 1) zerosCount = zeros.count() onesCount = ones.count() print("Ones: {}, Zeros: {}".format(onesCount, zerosCount)) print(onesCount / zerosCount * 100) usersPredictedToChurn = df_predictions.filter(df_predictions["prediction"] == 1).take(5) for row in usersPredictedToChurn: print(int(row["userId"])) df_transformed.show() df_predictions.show() # 1 300044 # 0 251 # Select the prediction of a user as value pred = df_predictions[df_predictions["userId"] == 78].select("prediction").collect()[0][0] pred # From a query that could be entered in a web form, create a prediction # Query from web query = "1.0,0.0,10,4,307,0,76200,10" # Split to values values = query.split(",") # Prepare dictionary for feature dataframe from web form values features_dict = [{ "level_index": float(values[0]), "gender_index": float(values[1]), "thumbs_up_sum": int(values[2]), "thumbs_down_sum": int(values[3]), "nextsong_sum": int(values[4]), "downgrade_sum": int(values[5]), "length_sum": float(values[6]), "sessionId_count": int(values[7]), }] # Create a user row to use in VectorAssembler df_user_row = spark.createDataFrame(features_dict) # Create feature dataframe with VectorAssembler df_features = VectorAssembler(inputCols = \ ["level_index", "gender_index", "thumbs_up_sum", "thumbs_down_sum", \ "nextsong_sum", "downgrade_sum", "length_sum", "sessionId_count"], \ outputCol = "features").transform(df_user_row) # Select features df_features = df_features.select("features") # Predict on model prediction = model.transform(df_features) # Show result prediction.show() prediction.select("prediction").collect()[0][0] # Output the notebook to an html file from subprocess import call call(['python', '-m', 'nbconvert', 'web_pred.ipynb']) ```
github_jupyter
# Lecture 9 - Motor Control ### Introduction to modeling and simulation of human movement https://github.com/BMClab/bmc/blob/master/courses/ModSim2018.md * In class: ``` import numpy as np #import pandas as pd #import pylab as pl import matplotlib.pyplot as plt import math %matplotlib notebook ``` ### Muscle properties ``` Lslack = .223 Umax = .04 Lce_o = .093 #optmal l width = .63 Fmax = 3000 a = .25 b = .25*10 ``` ### Initial conditions ``` LceNorm = .087/Lce_o t0 = 0 tf = 2.99 h = 1e-3 u = 1 a = 0 t = np.arange(t0,tf,h) F = np.empty(t.shape) Fkpe = np.empty(t.shape) FiberLength = np.empty(t.shape) TendonLength = np.empty(t.shape) U = np.arange(t0,1,h) ## Funcoes def computeTendonForce(LseeNorm, Lce_o, Lslack): ''' Compute Tendon Length Input: LseeNorm - Normalized Tendon Length Lsalck - slack length of the tendon (non normalized) Lce_o - Optimal length of the fiber Output: FTendonNorm - Force on the tendon normalized ''' Umax = 0.04 if LseeNorm<(Lslack/Lce_o): FTendonNorm = 0 else: FTendonNorm = ((LseeNorm-Lslack/Lce_o)/(Umax*Lslack/Lce_o))**2 return FTendonNorm def computeParallelElementForce (LceNorm): Umax = 1 if LceNorm<1: FkpeNorm = 0 else: FkpeNorm = ((LceNorm-1)/(Umax))**2 #lce_o/Lce_o = 1 (normalizado) return FkpeNorm def computeForceLengthCurve(LceNorm): width = 0.63 FLNorm = max([0, (1-((LceNorm-1)/width)**2)]) return FLNorm def computeActivation(a, u, h): act = 0.015 deact = 0.05 if u>a: T = act*(0.4+(1.5*a)) else: T = deact/(0.5+(1.5*a)) a += h*((u-a)/T) return a def computeContractileElementDerivative(FLNorm, FCENorm, a): #calculate CE velocity from Hill's equation a1 = .25 b = .25*10 Fmlen = 1.8 Vmax = 8 if FCENorm > a*FLNorm: B = ((2+2/a1)*(FLNorm*Fmlen-FCENorm))/(Fmlen-1) LceNormdot = (0.75+0.75*a)*Vmax*((FCENorm-FLNorm)/B) else: B = FLNorm + (FCENorm/a1) LceNormdot = (0.75+0.75*a)*Vmax*((FCENorm-FLNorm)/B) return LceNormdot def computeContractileElementForce(FTendonNorm, FkpeNorm): FCENorm = FTendonNorm - FkpeNorm return FCENorm def ComputeTendonLength(Lm, Lce_o, LceNorm): LseeNorm = Lm/Lce_o - LceNorm return LseeNorm ``` ## Simulation - Parallel ``` for i in range (len(t)): #ramp if t[i]<=1: Lm = 0.31 elif t[i]>1 and t[i]<2: Lm = .31 - .04*(t[i]-1) #print(Lm) ##################################################################### LseeNorm = (Lm/Lce_o) - LceNorm FTendonNorm = computeTendonForce(LseeNorm, Lce_o, Lslack) FkpeNorm = computeParallelElementForce(LceNorm) FLNorm = computeForceLengthCurve(LceNorm) FCENorm = computeContractileElementForce(FTendonNorm, FkpeNorm) LceNormdot = computeContractileElementDerivative(FLNorm,FCENorm, a) a = computeActivation(a, u, h) LceNorm += h*LceNormdot ##################################################################### F[i] = FTendonNorm*Fmax FiberLength[i] = LceNorm*Lce_o TendonLength[i] = LseeNorm*Lce_o ``` ## Plot ``` fig, ax = plt.subplots(1, 1, figsize=(6,6), sharex=True) ax.plot(t,F,c='red') plt.grid() plt.xlabel('time (s)') plt.ylabel('Force [N]') #ax.legend() fig, ax = plt.subplots(1, 1, figsize=(6,6), sharex=True) ax.plot(t,FiberLength, label = 'fibra') ax.plot(t, TendonLength, label = 'tendao') ax.plot(t,FiberLength + TendonLength, label = 'fibra + tendao') plt.grid() plt.legend(loc = 'best') plt.xlabel('time (s)') plt.ylabel('Length [m]') plt.tight_layout() #ax.legend() ```
github_jupyter
# Time Series analysis of O'hare taxi rides data ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.model_selection import TimeSeriesSplit, cross_validate, GridSearchCV pd.set_option('display.max_rows', 6) plt.style.use('ggplot') plt.rcParams.update({'font.size': 16, 'axes.labelweight': 'bold', 'figure.figsize': (8,6)}) from mealprep.mealprep import find_missing_ingredients # pd.set_option('display.max_colwidth', None) pd.set_option('display.max_rows', None) import pickle ORD_df = pd.read_csv('../data/ORD_train.csv').drop(columns=['Unnamed: 0', 'Unnamed: 0.1']) ORD_df ``` ## Tom's functions ``` # Custom functions def lag_df(df, lag, cols): return df.assign(**{f"{col}-{n}": df[col].shift(n) for n in range(1, lag + 1) for col in cols}) def ts_predict(input_data, model, n=20, responses=1): predictions = [] n_features = input_data.size for _ in range(n): predictions = np.append(predictions, model.predict(input_data.reshape(1, -1))) # make prediction input_data = np.append(predictions[-responses:], input_data[:n_features-responses]) # new input data return predictions.reshape((-1, responses)) def plot_ts(ax, df_train, df_test, predictions, xlim, response_cols): col_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color'] for i, col in enumerate(response_cols): ax.plot(df_train[col], '-', c=col_cycle[i], label = f'Train {col}') ax.plot(df_test[col], '--', c=col_cycle[i], label = f'Validation {col}') ax.plot(np.arange(df_train.index[-1] + 1, df_train.index[-1] + 1 + len(predictions)), predictions[:,i], c=col_cycle[-i-2], label = f'Prediction {col}') ax.set_xlim(0, xlim+1) ax.set_title(f"Train Shape = {len(df_train)}, Validation Shape = {len(df_test)}", fontsize=16) ax.set_ylabel(df_train.columns[0]) def plot_forecast(ax, df_train, predictions, xlim, response_cols): col_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color'] for i, col in enumerate(response_cols): ax.plot(df_train[col], '-', c=col_cycle[i], label = f'Train {col}') ax.plot(np.arange(df_train.index[-1] + 1, df_train.index[-1] + 1 + len(predictions)), predictions[:,i], '-', c=col_cycle[-i-2], label = f'Prediction {col}') ax.set_xlim(0, xlim+len(predictions)) ax.set_title(f"{len(predictions)}-step forecast", fontsize=16) ax.set_ylabel(response_cols) def create_rolling_features(df, columns, windows=[6, 12]): for window in windows: df["rolling_mean_" + str(window)] = df[columns].rolling(window=window).mean() df["rolling_std_" + str(window)] = df[columns].rolling(window=window).std() df["rolling_var_" + str(window)] = df[columns].rolling(window=window).var() df["rolling_min_" + str(window)] = df[columns].rolling(window=window).min() df["rolling_max_" + str(window)] = df[columns].rolling(window=window).max() df["rolling_min_max_ratio_" + str(window)] = df["rolling_min_" + str(window)] / df["rolling_max_" + str(window)] df["rolling_min_max_diff_" + str(window)] = df["rolling_max_" + str(window)] - df["rolling_min_" + str(window)] df = df.replace([np.inf, -np.inf], np.nan) df.fillna(0, inplace=True) return df lag = 3 ORD_train_lag = lag_df(ORD_df, lag=lag, cols=['seats']).dropna() ORD_train_lag find_missing_ingredients(ORD_train_lag) lag = 3 # you can vary the number of lagged features in the model n_splits = 5 # you can vary the number of train/validation splits response_col = ['rides'] # df_lag = lag_df(df, lag, response_col).dropna() tscv = TimeSeriesSplit(n_splits=n_splits) # define the splitter model = RandomForestRegressor() # define the model cv = cross_validate(model, X = ORD_train_lag.drop(columns=response_col), y = ORD_train_lag[response_col[0]], scoring =('r2', 'neg_mean_squared_error'), cv=tscv, return_train_score=True) # pd.DataFrame({'split': range(n_splits), # 'train_r2': cv['train_score'], # 'train_negrmse': cv['train_'] # 'validation_r2': cv['test_score']}).set_index('split') pd.DataFrame(cv) fig, ax = plt.subplots(n_splits, 1, figsize=(8,4*n_splits)) for i, (train_index, test_index) in enumerate(tscv.split(ORD_train_lag)): df_train, df_test = ORD_train_lag.iloc[train_index], ORD_train_lag.iloc[test_index] model = RandomForestRegressor().fit(df_train.drop(columns=response_col), df_train[response_col[0]]) # train model # Prediction loop predictions = model.predict(df_test.drop(columns=response_col))[:,None] # Plot plot_ts(ax[i], df_train, df_test, predictions, xlim=ORD_train_lag.index[-1], response_cols=response_col) ax[0].legend(facecolor='w') ax[i].set_xlabel('time') fig.tight_layout() lag = 3 # you can vary the number of lagged features in the model n_splits = 3 # you can vary the number of train/validation splits response_col = ['rides'] # df_lag = lag_df(df, lag, response_col).dropna() tscv = TimeSeriesSplit(n_splits=n_splits) # define the splitter model = RandomForestRegressor() # define the model param_grid = {'n_estimators': [50, 100, 150, 200], 'max_depth': [10,25,50,100, None]} X = ORD_train_lag.drop(columns=response_col) y = ORD_train_lag[response_col[0]] gcv = GridSearchCV(model, param_grid = param_grid, # X = ORD_train_lag.drop(columns=response_col), # y = ORD_train_lag[response_col[0]], scoring ='neg_mean_squared_error', cv=tscv, return_train_score=True) gcv.fit(X,y) # pd.DataFrame({'split': range(n_splits), # 'train_r2': cv['train_score'], # 'train_negrmse': cv['train_'] # 'validation_r2': cv['test_score']}).set_index('split') gcv.score(X,y) filename = 'grid_search_model_1.sav' pickle.dump(gcv, open(filename, 'wb')) A = list(ORD_train_lag.columns) A.remove('rides') pd.DataFrame({'columns' : A, 'importance' : gcv.best_estimator_.feature_importances_}).sort_values('importance', ascending=False) gcv.best_params_ pd.DataFrame(gcv.cv_results_) gcv.estimator.best_ ```
github_jupyter
# Lesson 04: Numpy - Used for working with tensors - Provides vectors, matrices, and tensors - Provides mathematical functions that operate on vectors, matrices, and tensors - Implemented in Fortran and C in the backend ``` import numpy as np ``` ## Making Arrays ``` arr = np.array([1, 2, 3]) print(arr, type(arr), arr.shape, arr.dtype, arr.ndim) matrix = np.array( [[1, 2, 3], [4, 5, 6.2]] ) print(matrix, type(matrix), matrix.shape, matrix.dtype, matrix.ndim) a = np.zeros((10, 2)) print(a) a = np.ones((4, 5)) print(a) a = np.full((2, 3, 5), 6) print(a) a = np.eye(4) print(a) a = np.random.random((5, 5)) print(a) ``` ## Indexing ``` arr = np.array([ [1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15] ]) print(arr) ``` The indexing format is: [rows , columns] You can then slice the individual dimension as follows: [start : end , start : end] ``` print(arr[1:, 2:4]) a = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]]) print(a[ [0, 1, 2, 3], [1, 0, 2, 0] ]) print(a[0, 1], a[1, 0], a[2, 2], a[3, 0]) print(np.array([a[0, 1], a[1, 0], a[2, 2], a[3, 0]])) b = np.array([1, 0, 2, 0]) print(a[np.arange(4), b]) a[np.arange(4), b] += 7 print(a) a = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]]) bool_a = (a > 5) print(bool_a) print(a[bool_a]) print(a[a>7]) ``` ## Data Types ``` b = np.array([1, 2, 3], dtype=np.float64) print(b.dtype) ``` https://numpy.org/doc/stable/reference/arrays.dtypes.html ## Operations ``` x = np.array([ [1, 2], [3, 4] ]) y = np.array([ [5, 6], [7, 8] ]) print(x, x.shape) print(y, y.shape) print(x + y) print(np.add(x, y)) print(x - y) print(np.subtract(x, y)) print(x * y) print(np.multiply(x, y)) print(x / y) print(np.divide(x, y)) ``` ### Matrix Multiplication ``` w = np.array([2, 4]) v = np.array([4, 6]) print(x) print(y) print(w) print(v) ``` #### Vector-vector multiplication ``` print(v.dot(w)) print(np.dot(v, w)) ``` #### Matrix-vector multiplication ``` print(x.dot(w)) ``` #### Matrix multiplication ``` print(x.dot(y)) print(np.dot(x, y)) ``` ### Transpose ``` print(x) print(x.T) ``` http://docs.scipy.org/doc/numpy/reference/routines.array-manipulation.html ### Other Operations ``` print(x) print(np.sum(x)) print(np.sum(x, axis=0)) print(np.sum(x, axis=1)) ``` More array operations are listed here: http://docs.scipy.org/doc/numpy/reference/routines.math.html ## Broadcasting Broadcasting allows Numpy to work with arrays of different shapes. Operations which would have required loops can now be done without them hence speeding up your program. ``` x = np.array([ [1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15], [16, 17, 18], ]) print(x, x.shape) y = np.array([1, 2, 3]) print(y, y.shape) ``` ### Loop Approach ``` z = np.empty_like(x) print(z, z.shape) for i in range(x.shape[0]): z[i, :] = x[i, :] + y print(z) ``` ### Tile Approach ``` yy = np.tile(y, (6, 1)) print(yy, yy.shape) print(x + y) ``` ### Broadcasting Approach ``` print(x, x.shape) print(y, y.shape) print(x + y) ``` - https://numpy.org/doc/stable/user/basics.broadcasting.html - http://scipy.github.io/old-wiki/pages/EricsBroadcastingDoc - http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs ## Reshape ``` x = np.array([ [1, 2, 3], [4, 5, 6] ]) y = np.array([2, 2]) print(x, x.shape) print(y, y.shape) ``` ### Transpose Approach ``` xT = x.T print(xT) xTw = xT + y print(xTw) x = xTw.T print(x) ``` Transpose approach in one line ``` print( (x.T + y).T ) ``` ### Reshape Approach ``` print(y, y.shape, y.ndim) y = np.reshape(y, (2, 1)) print(y, y.shape, y.ndim) print(x + y) ``` # Resources - http://docs.scipy.org/doc/numpy/reference/ - https://numpy.org/doc/stable/user/absolute_beginners.html - https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md
github_jupyter
``` import torch import numpy as np import pandas as pd import matchzoo as mz print('matchzoo version', mz.__version__) ranking_task = mz.tasks.Ranking(losses=mz.losses.RankHingeLoss()) ranking_task.metrics = [ mz.metrics.NormalizedDiscountedCumulativeGain(k=3), mz.metrics.NormalizedDiscountedCumulativeGain(k=5), mz.metrics.MeanAveragePrecision() ] print("`ranking_task` initialized with metrics", ranking_task.metrics) print('data loading ...') train_pack_raw = mz.datasets.wiki_qa.load_data('train', task=ranking_task) dev_pack_raw = mz.datasets.wiki_qa.load_data('dev', task=ranking_task, filtered=True) test_pack_raw = mz.datasets.wiki_qa.load_data('test', task=ranking_task, filtered=True) print('data loaded as `train_pack_raw` `dev_pack_raw` `test_pack_raw`') preprocessor = mz.preprocessors.BasicPreprocessor( truncated_length_left = 10, truncated_length_right = 100, filter_low_freq = 2 ) train_pack_processed = preprocessor.fit_transform(train_pack_raw) dev_pack_processed = preprocessor.transform(dev_pack_raw) test_pack_processed = preprocessor.transform(test_pack_raw) preprocessor.context glove_embedding = mz.datasets.embeddings.load_glove_embedding(dimension=100) term_index = preprocessor.context['vocab_unit'].state['term_index'] embedding_matrix = glove_embedding.build_matrix(term_index) l2_norm = np.sqrt((embedding_matrix * embedding_matrix).sum(axis=1)) embedding_matrix = embedding_matrix / l2_norm[:, np.newaxis] trainset = mz.dataloader.Dataset( data_pack=train_pack_processed, mode='pair', num_dup=2, num_neg=1, batch_size=20, resample=True, sort=False ) testset = mz.dataloader.Dataset( data_pack=test_pack_processed, batch_size=20 ) padding_callback = mz.models.DRMMTKS.get_default_padding_callback() trainloader = mz.dataloader.DataLoader( dataset=trainset, stage='train', callback=padding_callback ) testloader = mz.dataloader.DataLoader( dataset=testset, stage='dev', callback=padding_callback ) model = mz.models.DRMMTKS() model.params['task'] = ranking_task model.params['embedding'] = embedding_matrix model.params['mask_value'] = 0 model.params['top_k'] = 10 model.params['mlp_activation_func'] = 'tanh' model.build() print(model) print('Trainable params: ', sum(p.numel() for p in model.parameters() if p.requires_grad)) optimizer = torch.optim.Adadelta(model.parameters()) trainer = mz.trainers.Trainer( model=model, optimizer=optimizer, trainloader=trainloader, validloader=testloader, validate_interval=None, epochs=10 ) trainer.run() ```
github_jupyter
# Document embeddings in BigQuery This notebook shows how to do use a pre-trained embedding as a vector representation of a natural language text column. Given this embedding, we can use it in machine learning models. ## Embedding model for documents We're going to use a model that has been pretrained on Google News. Here's an example of how it works in Python. We will use it directly in BigQuery, however. ``` import tensorflow as tf import tensorflow_hub as tfhub model = tf.keras.Sequential() model.add(tfhub.KerasLayer("https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1", output_shape=[20], input_shape=[], dtype=tf.string)) model.summary() model.predict([""" Long years ago, we made a tryst with destiny; and now the time comes when we shall redeem our pledge, not wholly or in full measure, but very substantially. At the stroke of the midnight hour, when the world sleeps, India will awake to life and freedom. A moment comes, which comes but rarely in history, when we step out from the old to the new -- when an age ends, and when the soul of a nation, long suppressed, finds utterance. """]) ``` ## Loading model into BigQuery The Swivel model above is already available in SavedModel format. But we need it on Google Cloud Storage before we can load it into BigQuery. ``` %%bash BUCKET=ai-analytics-solutions-kfpdemo # CHANGE AS NEEDED rm -rf tmp mkdir tmp FILE=swivel.tar.gz wget --quiet -O tmp/swivel.tar.gz https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1?tf-hub-format=compressed cd tmp tar xvfz swivel.tar.gz cd .. mv tmp swivel gsutil -m cp -R swivel gs://${BUCKET}/swivel rm -rf swivel echo "Model artifacts are now at gs://${BUCKET}/swivel/*" ``` Let's load the model into a BigQuery dataset named advdata (create it if necessary) ``` %%bigquery CREATE OR REPLACE MODEL advdata.swivel_text_embed OPTIONS(model_type='tensorflow', model_path='gs://ai-analytics-solutions-kfpdemo/swivel/*') ``` From the BigQuery web console, click on "schema" tab for the newly loaded model. We see that the input is called sentences and the output is called output_0: <img src="swivel_schema.png" /> ``` %%bigquery SELECT output_0 FROM ML.PREDICT(MODEL advdata.swivel_text_embed,( SELECT "Long years ago, we made a tryst with destiny; and now the time comes when we shall redeem our pledge, not wholly or in full measure, but very substantially." AS sentences)) ``` ## Create lookup table Let's create a lookup table of embeddings. We'll use the comments field of a storm reports table from NOAA. This is an example of the Feature Store design pattern. ``` %%bigquery CREATE OR REPLACE TABLE advdata.comments_embedding AS SELECT output_0 as comments_embedding, comments FROM ML.PREDICT(MODEL advdata.swivel_text_embed,( SELECT comments, LOWER(comments) AS sentences FROM `bigquery-public-data.noaa_preliminary_severe_storms.wind_reports` )) ``` For an example of using these embeddings in text similarity or document clustering, please see the following Medium blog post: https://medium.com/@lakshmanok/how-to-do-text-similarity-search-and-document-clustering-in-bigquery-75eb8f45ab65 Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
github_jupyter
## Pumpkin Pricing Load up required libraries and dataset. Convert the data to a dataframe containing a subset of the data: - Only get pumpkins priced by the bushel - Convert the date to a month - Calculate the price to be an average of high and low prices - Convert the price to reflect the pricing by bushel quantity ``` import pandas as pd import matplotlib.pyplot as plt import matplotlib.dates as mdates from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, classification_report from sklearn.metrics import roc_curve, roc_auc_score from sklearn.linear_model import LogisticRegression import sklearn import numpy as np import calendar import seaborn as sns pumpkins = pd.read_csv('../data/US-pumpkins.csv') pumpkins.head() pumpkins = pumpkins[pumpkins['Package'].str.contains('bushel', case=True, regex=True)] new_columns = ['Package', 'Variety', 'City Name', 'Month', 'Low Price', 'High Price', 'Date', 'City Num', 'Variety Num'] pumpkins = pumpkins.drop([c for c in pumpkins.columns if c not in new_columns], axis=1) price = (pumpkins['Low Price'] + pumpkins['High Price']) / 2 month = pd.DatetimeIndex(pumpkins['Date']).month new_pumpkins = pd.DataFrame({'Month': month, 'Variety': pumpkins['Variety'], 'City': pumpkins['City Name'], 'Package': pumpkins['Package'], 'Low Price': pumpkins['Low Price'],'High Price': pumpkins['High Price'], 'Price': price}) new_pumpkins.loc[new_pumpkins['Package'].str.contains('1 1/9'), 'Price'] = price/1.1 new_pumpkins.loc[new_pumpkins['Package'].str.contains('1/2'), 'Price'] = price*2 new_pumpkins.head() ``` A basic scatterplot reminds us that we only have month data from August through December. We probably need more data to be able to draw conclusions in a linear fashion. ``` new_pumpkins["Month_str"] = new_pumpkins['Month'].apply(lambda x: calendar.month_abbr[x]) plt.scatter('Month_str', 'Price', data=new_pumpkins) plt.scatter("City", "Price", data=new_pumpkins) new_pumpkins.iloc[:, 0:-1] = new_pumpkins.iloc[:, 0:-1].apply(LabelEncoder().fit_transform) new_pumpkins.head(10) print(new_pumpkins["City"].corr(new_pumpkins["Price"])) print(new_pumpkins["Package"].corr(new_pumpkins["Price"])) new_pumpkins.dropna(inplace=True) new_columns = ["Package", "Price"] lil_pumpkins = new_pumpkins.drop([c for c in new_pumpkins.columns if c not in new_columns], axis="columns") X = lil_pumpkins.values[:, :1] y = lil_pumpkins.values[:, 1:2] X from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) lil_reg = LinearRegression() lil_reg.fit(X_train, y_train) pred = lil_reg.predict(X_test) accuracy_score = lil_reg.score(X_train, y_train) print(f"Model accuracy: {accuracy_score}") plt.scatter(X_test, y_test, color="black") plt.plot(X_test, pred, color="blue", linewidth=3) plt.xlabel("Package") plt.ylabel("Price") lil_reg.predict([[2.75]]) new_columns = ['Variety', 'Package', 'City', 'Month', 'Price'] poly_pumpkins = new_pumpkins.drop([c for c in new_pumpkins.columns if c not in new_columns], axis="columns") corr = poly_pumpkins.corr() corr.style.background_gradient(cmap="coolwarm") X = poly_pumpkins.iloc[:, 3:4].values y = poly_pumpkins.iloc[:, 4:5].values from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import make_pipeline pipeline = make_pipeline(PolynomialFeatures(4), LinearRegression()) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) pipeline.fit(X_train, y_train) y_pred = pipeline.predict(X_test) df = pd.DataFrame({"x": X_test[:,0], "y":y_pred[:,0]}) df.sort_values(by="x", inplace=True) # chiamo pd.DataFrame per creare un nuovo df points = pd.DataFrame(df).to_numpy() plt.plot(points[:, 0], points[:, 1], color="blue", linewidth=3) plt.xlabel("Package") plt.ylabel("Price") plt.scatter(X, y, color="black") accuracy_score = pipeline.score(X_train, y_train) accuracy_score pipeline.predict([[2.75]]) ``` # Lecture 1-4 - Binary classification ``` pumpkins = pd.read_csv('../data/US-pumpkins.csv') new_columns = ['Color','Origin','Item Size','Variety','City Name','Package'] new_pumpkins = pumpkins[new_columns] new_pumpkins.dropna(inplace=True) new_pumpkins = new_pumpkins.apply(LabelEncoder().fit_transform) new_pumpkins g = sns.PairGrid(new_pumpkins) g.map(sns.scatterplot) sns.swarmplot(x="Color", y="Item Size", data=new_pumpkins) sns.catplot(x="Color", y="Item Size", kind="violin", data=new_pumpkins) Selected_features = ['Origin','Item Size','Variety','City Name','Package'] X = new_pumpkins[Selected_features] y = new_pumpkins["Color"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) model = LogisticRegression() model.fit(X_train, y_train) predictions = model.predict(X_test) print(classification_report(y_test, predictions)) # print(f"Predicted labels: {predictions}") print(f"Accuracy: {sklearn.metrics.accuracy_score(y_test, predictions)}") from sklearn.metrics import confusion_matrix confusion_matrix(y_test, predictions) y_scores = model.predict_proba(X_test) fpr, tpr, thresholds = roc_curve(y_test, y_scores[:, 1]) sns.lineplot([0, 1], [0, 1]) sns.lineplot(fpr, tpr) auc = roc_auc_score(y_test, y_scores[:, 1]) print(auc) ```
github_jupyter
<table width="100%"> <tr> <td style="background-color:#ffffff;"> <a href="http://qworld.lu.lv" target="_blank"><img src="../images/qworld.jpg" width="35%" align="left"> </a></td> <td style="background-color:#ffffff;vertical-align:bottom;text-align:right;"> prepared by Abuzer Yakaryilmaz (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>) <br> updated by Melis Pahalı | December 5, 2019 <br> updated by Özlem Salehi | September 17, 2020 </td> </tr></table> <table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table> $ \newcommand{\bra}[1]{\langle #1|} $ $ \newcommand{\ket}[1]{|#1\rangle} $ $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ $ \newcommand{\dot}[2]{ #1 \cdot #2} $ $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ $ \newcommand{\mypar}[1]{\left( #1 \right)} $ $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ $ \newcommand{\onehalf}{\frac{1}{2}} $ $ \newcommand{\donehalf}{\dfrac{1}{2}} $ $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ $ \newcommand{\vzero}{\myvector{1\\0}} $ $ \newcommand{\vone}{\myvector{0\\1}} $ $ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $ $ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ <h2> <font color="blue"> Solutions for </font>Quantum Teleportation</h2> <a id="task1"></a> <h3> Task 1 </h3> Calculate the new quantum state after this CNOT operator. <h3>Solution</h3> The state before CNOT is $ \sqrttwo \big( a\ket{000} + a \ket{011} + b\ket{100} + b \ket{111} \big) $. CNOT(first_qubit,second_qubit) is applied. If the value of the first qubit is 1, then the value of the second qubit is flipped. Thus, the new quantum state after this CNOT is $$ \sqrttwo \big( a\ket{000} + a \ket{011} + b\ket{110} + b \ket{101} \big). $$ <a id="task2"></a> <h3> Task 2 </h3> Calculate the new quantum state after this Hadamard operator. Verify that the resulting quantum state can be written as follows: $$ \frac{1}{2} \ket{00} \big( a\ket{0}+b\ket{1} \big) + \frac{1}{2} \ket{01} \big( a\ket{1}+b\ket{0} \big) + \frac{1}{2} \ket{10} \big( a\ket{0}-b\ket{1} \big) + \frac{1}{2} \ket{11} \big( a\ket{1}-b\ket{0} \big) . $$ <h3>Solution</h3> The state before Hadamard is $ \sqrttwo \big( a\ket{000} + a \ket{011} + b\ket{110} + b \ket{101} \big). $ The effect of Hadamard to the first qubit is given below: $ H \ket{0yz} \rightarrow \sqrttwo \ket{0yz} + \sqrttwo \ket{1yz} $ $ H \ket{1yz} \rightarrow \sqrttwo \ket{0yz} - \sqrttwo \ket{1yz} $ For each triple $ \ket{xyz} $ in the quantum state, we apply this transformation: $ \frac{1}{2} \big( a\ket{000} + a\ket{100} \big) + \frac{1}{2} \big( a\ket{011} + a\ket{111} \big) + \frac{1}{2} \big( b\ket{010} - b\ket{110} \big) + \frac{1}{2} \big( b\ket{001} - b\ket{101} \big) . $ We can rearrange the summation so that we can separate Asja's qubit from the Balvis' qubit: $ \frac{1}{2} \big( a\ket{000}+b\ket{001} \big) + \frac{1}{2} \big( a\ket{011}+b\ket{010} \big) + \frac{1}{2} \big( a\ket{100} - b\ket{101} \big) + \frac{1}{2} \big( a\ket{111}- b\ket{110} \big) $. This is equivalent to $$ \frac{1}{2} \ket{00} \big( a\ket{0}+b\ket{1} \big) + \frac{1}{2} \ket{01} \big( a\ket{1}+b\ket{0} \big) + \frac{1}{2} \ket{10} \big( a\ket{0}-b\ket{1} \big) + \frac{1}{2} \ket{11} \big( a\ket{1}-b\ket{0} \big) . $$ <a id="task3"></a> <h3> Task 3 </h3> Asja sends the measurement outcomes to Balvis by using two classical bits: $ x $ and $ y $. For each $ (x,y) $ pair, determine the quantum operator(s) that Balvis can apply to obtain $ \ket{v} = a\ket{0}+b\ket{1} $ exactly. <h3>Solution</h3> <b>Measurement outcome "00":</b> The state of Balvis' qubit is $ a\ket{0}+b\ket{1} $. Balvis does not need to apply any extra operation. <b>Measurement outcome "01":</b> The state of Balvis' qubit is $ a\ket{1}+b\ket{0} $. If Balvis applies <u>NOT operator</u>, then the state becomes: $ a\ket{0}+b\ket{1} $. <b>Measurement outcome "10":</b> The state of Balvis' qubit is $ a\ket{0}-b\ket{1} $. If Balvis applies <u>Z operator</u>, then the state becomes: $ a\ket{0}+b\ket{1} $. <b>Measurement outcome "11":</b> The state of Balvis' qubit is $ a\ket{1}-b\ket{0} $. If Balvis applies <u>NOT operator</u> and <u>Z operator</u>, then the state becomes: $ a\ket{0}+b\ket{1} $. <a id="task4"></a> <h3> Task 4 </h3> Create a quantum circuit with three qubits and two classical bits. Assume that Asja has the first two qubits and Balvis has the third qubit. Implement the protocol given above until Balvis makes the measurement. <ul> <li>Create entanglement between Asja's second qubit and Balvis' qubit.</li> <li>The state of Asja's first qubit can be initialized to a randomly picked angle.</li> <li>Asja applies CNOT and Hadamard operators to her qubits.</li> <li>Asja measures her own qubits and the results are stored in the classical registers. </li> </ul> At this point, read the state vector of the circuit by using "statevector_simulator". <i> When a circuit having measurement is simulated by "statevector_simulator", the simulator picks one of the outcomes, and so we see one of the states after the measurement.</i> Verify that the state of Balvis' qubit is in one of these: $ \ket{v_{00}}$, $ \ket{v_{01}}$, $ \ket{v_{10}}$, and $ \ket{v_{11}}$. <i> Follow the Qiskit order. That is, let qreg[2] be Asja's first qubit, qreg[1] be Asja's second qubit and let qreg[0] be Balvis' qubit.</i> <h3>Solution</h3> ``` from qiskit import QuantumCircuit,QuantumRegister,ClassicalRegister,execute,Aer from random import randrange from math import sin,cos,pi # We start with 3 quantum registers # qreg[2]: Asja's first qubit - qubit to be teleported # qreg[1]: Asja's second qubit # qreg[0]: Balvis' qubit qreg=QuantumRegister(3) creg=ClassicalRegister(2) #Classical register with 2 qubits is enough qcir=QuantumCircuit(qreg,creg) # Generation of the entangled state. # Asja's second qubit is entangled with Balvis' qubit. qcir.h(qreg[1]) qcir.cx(qreg[1],qreg[0]) qcir.barrier() # We create a random qubit to teleport. # We pick a random angle. d=randrange(360) r=2*pi*d/360 print("Picked angle is "+str(d)+" degrees, "+str(round(r,2))+" radians.") # The amplitudes of the angle. x=cos(r) y=sin(r) print("cos component of the angle: "+str(round(x,2))+", sin component of the angle: "+str(round(y,2))) print("So to be teleported state is "+str(round(x,2))+"|0>+"+str(round(y,2))+"|1>.") #Asja's qubit to be teleported # Generation of random qubit by rotating the quantum register at the amount of picked angle. qcir.ry(2*r,qreg[2]) qcir.barrier() #CNOT operator by Asja where first qubit is the control and second qubit is the target qcir.cx(qreg[2],qreg[1]) qcir.barrier() #Hadamard operator by Asja on her first qubit qcir.h(qreg[2]) qcir.barrier() #Measurement by Asja stored in classical registers qcir.measure(qreg[1],creg[0]) qcir.measure(qreg[2],creg[1]) print() result=execute(qcir,Aer.get_backend('statevector_simulator'),optimization_level=0).result() print("When you use statevector_simulator, one of the possible outcomes is picked randomly. Classical registers contain:") print(result.get_counts()) print() print("The final statevector.") v=result.get_statevector() for i in range(len(v)): print(v[i].real) print() qcir.draw(output='mpl') ``` <a id="task5"></a> <h3> Task 5 </h3> Implement the protocol above by including the post-processing part done by Balvis, i.e., the measurement results by Asja are sent to Balvis and then he may apply $ X $ or $ Z $ gates depending on the measurement results. We use the classically controlled quantum operators. Since we do not make measurement on $ q[2] $, we define only 2 classical bits, each of which can also be defined separated. ```python q = QuantumRegister(3) c2 = ClassicalRegister(1,'c2') c1 = ClassicalRegister(1,'c1') qc = QuantumCircuit(q,c1,c2) ... qc.measure(q[1],c1) ... qc.x(q[0]).c_if(c1,1) # x-gate is applied to q[0] if the classical bit c1 is equal to 1 ``` Read the state vector and verify that Balvis' state is $ \myvector{a \\ b} $ after the post-processing. <h3>Solution</h3> <i>Classically controlled</i> recovery operations are also added as follows. Below, the state vector is used to confirm that quantum teleportation is completed. ``` from qiskit import QuantumCircuit,QuantumRegister,ClassicalRegister,execute,Aer from random import randrange from math import sin,cos,pi # We start with 3 quantum registers # qreg[2]: Asja's first qubit - qubit to be teleported # qreg[1]: Asja's second qubit # qreg[0]: Balvis' qubit qreg=QuantumRegister(3) c1=ClassicalRegister(1) c2=ClassicalRegister(1) qcir=QuantumCircuit(qreg,c1,c2) # Generation of the entangled state. # Asja's second qubit is entangled with Balvis' qubit. qcir.h(qreg[1]) qcir.cx(qreg[1],qreg[0]) qcir.barrier() # We create a random qubit to teleport. # We pick a random angle. d=randrange(360) r=2*pi*d/360 print("Picked angle is "+str(d)+" degrees, "+str(round(r,2))+" radians.") # The amplitudes of the angle. x=cos(r) y=sin(r) print("Cos component of the angle: "+str(round(x,2))+", sin component of the angle: "+str(round(y,2))) print("So to be teleported state is "+str(round(x,2))+"|0>+"+str(round(y,2))+"|1>.") #Asja's qubit to be teleported # Generation of random qubit by rotating the quantum register at the amount of picked angle. qcir.ry(2*r,qreg[2]) qcir.barrier() #CNOT operator by Asja where first qubit is the control and second qubit is the target qcir.cx(qreg[2],qreg[1]) qcir.barrier() #Hadamard operator by Asja on the first qubit qcir.h(qreg[2]) qcir.barrier() #Measurement by Asja stored in classical registers qcir.measure(qreg[1],c1) qcir.measure(qreg[2],c2) print() #Post processing by Balvis qcir.x(qreg[0]).c_if(c1,1) qcir.z(qreg[0]).c_if(c2,1) result2=execute(qcir,Aer.get_backend('statevector_simulator'),optimization_level=0).result() print("When you use statevector_simulator, one of the possible outcomes is picked randomly. Classical registers contain:") print(result2.get_counts()) # print() print("The final statevector.") v=result2.get_statevector() for i in range(len(v)): print(v[i].real) print() qcir.draw(output='mpl') ```
github_jupyter
# DLISIO in a Nutshell ## Importing ``` %matplotlib inline import os import pandas as pd import dlisio import matplotlib.pyplot as plt import numpy as np import numpy.lib.recfunctions as rfn import hvplot.pandas import holoviews as hv from holoviews import opts, streams from holoviews.plotting.links import DataLink hv.extension('bokeh', logo=None) ``` ### You can work with a single file using the cell below - or by adding an additional for loop to the code below, you can work through a list of files. Another option is to use os.walk to get all .dlis files in a parent folder. Example: for (root, dirs, files) in os.walk(folderpath): for f in files: filepath = os.path.join(root, f) if filepath.endswith('.' + 'dlis'): print(filepath) ### But for this example, we will work with a single .dlis file specified in the cell below. Note that there are some .dlis file formats that are not supported by DLISIO yet - good to catch them in a try except loop if you are reading files enmasse. ### We will load a dlis file from the open source Volve dataset available here: https://data.equinor.com/dataset/Volve ``` filepath = r"" ``` ## Query for specific curve ### Very quickly you can use regex to find certain curves in a file (helpful if you are scanning a lot of files for certain curves) ``` with dlisio.dlis.load(filepath) as file: for d in file: depth_channels = d.find('CHANNEL','DEPT') for channel in depth_channels: print(channel.name) print(channel.curves()) ``` ## Examining internal files and frames ### Keep in mind that dlis files can contain multiple files and multiple frames. You can quickly get a numpy array of the curves in each frame below. ``` with dlisio.dlis.load(filepath) as file: print(file.describe()) with dlisio.dlis.load(filepath) as file: for d in file: for fram in d.frames: print(d.channels) print(fram.curves()) ``` ## Metadata including Origin information (well name and header) ``` with dlisio.dlis.load(filepath) as file: for d in file: print(d.describe()) for fram in d.frames: print(fram.describe()) for channel in d.channels: print(channel.describe()) with dlisio.dlis.load(filepath) as file: for d in file: for origin in d.origins: print(origin.describe()) ``` ## Reading a full dlis file ### But most likely we want a single data frame of every curve, no matter which frame it came from. So we write a bit more code to look through each frame, then look at each channel and get the curve name and unit information along with it. We will also save the information about which internal file and which frame each curve resides in. ``` curves_L = [] curves_name = [] longs = [] unit = [] files_L = [] files_num = [] frames = [] frames_num = [] with dlisio.dlis.load(filepath) as file: for d in file: files_L.append(d) frame_count = 0 for fram in d.frames: if frame_count == 0: frames.append(fram) frame_count = frame_count + 1 for channel in d.channels: curves_name.append(channel.name) longs.append(channel.long_name) unit.append(channel.units) files_num.append(len(files_L)) frames_num.append(len(frames)) curves = channel.curves() curves_L.append(curves) curve_index = pd.DataFrame( {'Curve': curves_name, 'Long': longs, 'Unit': unit, 'Internal_File': files_num, 'Frame_Number': frames_num }) curve_index ``` ## Creating a Pandas dataframe for the entire .dlis file ### We have to be careful creating a dataframe for the whole .dlis file as often there are some curves that represent mulitple values (numpy array of list values). So, you can use something like: df = pd.DataFrame(data=curves_L, index=curves_name).T ### to view the full dlis file with lists as some of the curve values. ### Or we will use the code below to process each curve's 2D numpy array, stacking it if the curve contains multiple values per sample. Then we convert each curve into its own dataframe (uniquifying the column names by adding a .1, .2, .3...etc). Then, to preserve the order with the curve index above, append each data frame together in order to build the final dlis full dataframe. ``` def df_column_uniquify(df): df_columns = df.columns new_columns = [] for item in df_columns: counter = 0 newitem = item while newitem in new_columns: counter += 1 newitem = "{}_{}".format(item, counter) new_columns.append(newitem) df.columns = new_columns return df curve_df = pd.DataFrame() name_index = 0 for c in curves_L: name = curves_name[name_index] np.vstack(c) try: num_col = c.shape[1] col_name = [name] * num_col df = pd.DataFrame(data=c, columns=col_name) name_index = name_index + 1 df = df_column_uniquify(df) curve_df = pd.concat([curve_df, df], axis=1) except: num_col = 0 df = pd.DataFrame(data=c, columns=[name]) name_index = name_index + 1 curve_df = pd.concat([curve_df, df], axis=1) continue curve_df.head() ## If we have a simpler dlis file with a single logical file and single frame and with single data values in each channel. with dlisio.dlis.load(filepath) as file: logical_count = 0 for d in file: frame_count = 0 for fram in d.frames: if frame_count == 0 & logical_count == 0: curves = fram.curves() curve_df = pd.DataFrame(curves, index=curves[fram.index]) curve_df.head() ``` ### Then we can set the index and start making some plots. ``` curve_df = df_column_uniquify(curve_df) curve_df['DEPTH_Calc_ft'] = curve_df.loc[:,'TDEP'] * 0.0083333 #0.1 inch/12 inches per foot curve_df['DEPTH_ft'] = curve_df['DEPTH_Calc_ft'] curve_df = curve_df.set_index("DEPTH_Calc_ft") curve_df.index.names = [None] curve_df = curve_df.replace(-999.25,np.nan) min_val = curve_df['DEPTH_ft'].min() max_val = curve_df['DEPTH_ft'].max() curve_list = list(curve_df.columns) curve_list.remove('DEPTH_ft') curve_df.head() def curve_plot(log, df, depthname): aplot = df.hvplot(x=depthname, y=log, invert=True, flip_yaxis=True, shared_axes=True, height=600, width=300).opts(fontsize={'labels': 16,'xticks': 14, 'yticks': 14}) return aplot; plotlist = [curve_plot(x, df=curve_df, depthname='DEPTH_ft') for x in curve_list] well_section = hv.Layout(plotlist).cols(len(curve_list)) well_section ``` # Hopefully that is enough code to get you started working with DLISIO. There is much more functionality which can be accessed with help(dlisio) or at the read the docs.
github_jupyter
``` # Import modules import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt import math from sklearn.model_selection import train_test_split import sklearn.metrics as metrics #keras from tensorflow.keras.utils import to_categorical from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.models import Sequential from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D from tensorflow.keras.callbacks import LearningRateScheduler, EarlyStopping from tensorflow.keras.metrics import top_k_categorical_accuracy def reduce_mem_usage(df): """ iterate through all the columns of a dataframe and modify the data type to reduce memory usage. """ start_mem = df.memory_usage().sum() / 1024**2 print('Memory usage of dataframe is {:.2f} MB'.format(start_mem)) for col in df.columns: col_type = df[col].dtype if col_type != object: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) else: df[col] = df[col].astype('category') end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df def import_data(file): """create a dataframe and optimize its memory usage""" df = pd.read_csv(file, parse_dates=True, keep_date_col=True) df = reduce_mem_usage(df) return df train = import_data(r'../input/emnist/emnist-letters-train.csv') test = import_data(r'../input/emnist/emnist-letters-test.csv') print("Train: %s, Test: %s" %(train.shape, test.shape)) # iam_1 = import_data('../input/iam-edited/iam_1_edit.csv') # iam_2 = import_data('../input/iam-edited/iam_2_edit.csv') # iam_3 = import_data('../input/iam-edited/iam_3_edit.csv') # iam_4 = import_data('../input/iam-edited/iam_11_edit.csv') # iam = pd.concat([iam_1,iam_2,iam_3,iam_4],axis=0) # iam.columns = train.columns.values # iam_35_labels = list(iam['35'].values) # iam_labels = [] # for i in iam['35'].values: # if i < 58: # i -= 48 # iam_labels.append(i) # elif 58 < i < 91: # i -= 55 # iam_labels.append(i) # elif 91 < i: # i -= 61 # iam_labels.append(i) # iam['35'].replace(dict(zip(iam_35_labels,iam_labels)),inplace=True) # iam mapp = pd.read_csv( r'../input/emnist/emnist-letters-mapping.txt', delimiter=' ', index_col=0, header=None, squeeze=True ) # train_half = pd.DataFrame(columns=list(train.columns.values)) # for label in mapp.values: # train_label = train[train['35']==(label-48)] # train_label = train_label.iloc[::2] # train_half = pd.concat([train_half,train_label],axis=0) # train_x_half = train_half.iloc[:,1:] # Get the images # train_y_half = train_half.iloc[:,0] # Get the label # del train_half # train_x_half = np.asarray(train_x_half) # train_x_half = np.apply_along_axis(rotate, 1, train_x_half) # print ("train_x:",train_x_half.shape) # iam_x = iam.iloc[:,1:] # Get the images # iam_y = iam.iloc[:,0] # Get the label # del iam # iam_x = np.asarray(iam_x) # iam_x = np.apply_along_axis(rotate, 1, iam_x) # print ("iam_x:",iam_x.shape) # train_x = np.concatenate((train_x_half,iam_x),axis=0) # print(train_x.shape) # train_y = np.concatenate((train_y_half,iam_y),axis=0) # print(train_y.shape) # del train_x_half # del train_y_half # del iam_x # del iam_y # train_new = pd.concat([train_half,iam],0) # train_new.shape # Constants HEIGHT = 28 WIDTH = 28 # del train_half # del iam # Split x and y train_x = train.iloc[:,1:] # Get the images train_y = train.iloc[:,0] # Get the label del train # free up some memory test_x = test.iloc[:,1:] test_y = test.iloc[:,0] del test # Reshape and rotate EMNIST images def rotate(image): image = image.reshape(HEIGHT, WIDTH) image = np.fliplr(image) image = np.rot90(image) return image # Flip and rotate image train_x = np.asarray(train_x) train_x = np.apply_along_axis(rotate, 1, train_x) print ("train_x:",train_x.shape) test_x = np.asarray(test_x) test_x = np.apply_along_axis(rotate, 1, test_x) print ("test_x:",test_x.shape) # Normalize train_x = train_x / 255.0 test_x = test_x / 255.0 print(type(train_x[0,0,0])) print(type(test_x[0,0,0])) # Plot image for i in range(100,109): plt.subplot(330 + (i+1)) plt.subplots_adjust(hspace=0.5, top=1) plt.imshow(train_x[i], cmap=plt.get_cmap('gray')) plt.title(chr(mapp.iloc[train_y[i]-1,0])) # Number of classes num_classes = train_y.nunique() # .nunique() returns the number of unique objects print(num_classes) # One hot encoding train_y = to_categorical(train_y-1, num_classes) test_y = to_categorical(test_y-1, num_classes) print("train_y: ", train_y.shape) print("test_y: ", test_y.shape) # partition to train and val train_x, val_x, train_y, val_y = train_test_split(train_x, train_y, test_size=0.10, random_state=7) print(train_x.shape, val_x.shape, train_y.shape, val_y.shape) # Reshape train_x = train_x.reshape(-1, HEIGHT, WIDTH, 1) test_x = test_x.reshape(-1, HEIGHT, WIDTH, 1) val_x = val_x.reshape(-1, HEIGHT, WIDTH, 1) # Create more images via data augmentation datagen = ImageDataGenerator( rotation_range = 10, zoom_range = 0.10, width_shift_range=0.1, height_shift_range=0.1 ) train_gen = datagen.flow(train_x, train_y, batch_size=64) val_gen = datagen.flow(val_x, val_y, batch_size=64) # Building model # ((Si - Fi + 2P)/S) + 1 model = Sequential() model.add(Conv2D(32, kernel_size=3, activation='relu', input_shape=(HEIGHT, WIDTH, 1))) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size=3,activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size=5, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(64, kernel_size=3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size=3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size=5, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(128, kernel_size=4, activation='relu')) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dropout(0.4)) model.add(Dense(units=num_classes, activation='softmax')) input_shape = (None, HEIGHT, WIDTH, 1) model.build(input_shape) model.summary() my_callbacks = [ # Decrease learning rate LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x), # Training will stop there is no improvement in val_loss after 3 epochs EarlyStopping(monitor="val_acc", patience=3, mode='max', restore_best_weights=True) ] # def top_3_accuracy(y_true, y_pred): # return top_k_categorical_accuracy(y_true, y_pred, k=3) # TRAIN NETWORKS model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # history = model.fit(train_x, train_y, # epochs=100, # verbose=1, validation_data=(val_x, val_y), # callbacks=my_callbacks) # With datagen history = model.fit_generator(train_gen, steps_per_epoch=train_x.shape[0]//64, epochs=100, validation_data=val_gen, validation_steps=val_x.shape[0]//64, callbacks=my_callbacks) # plot accuracy and loss def plotacc(epochs, acc, val_acc): # Plot training & validation accuracy values plt.plot(epochs, acc, 'b') plt.plot(epochs, val_acc, 'r') plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Val'], loc='upper left') plt.show() def plotloss(epochs, acc, val_acc): # Plot training & validation accuracy values plt.plot(epochs, acc, 'b') plt.plot(epochs, val_acc, 'r') plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Val'], loc='upper left') plt.show() #%% acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1,len(acc)+1) # Accuracy curve plotgraph(epochs, acc, val_acc) # loss curve plotloss(epochs, loss, val_loss) # del train_x # del train_y score = model.evaluate(test_x, test_y, verbose=0) print("Test loss:", score[0]) print("Test accuracy:", score[1]) model.save("emnist_model_letters_aug.h5") model.save_weights("emnist_model_weights_letters_aug.h5") y_pred = model.predict(test_x) y_pred = (y_pred > 0.5) cm = metrics.confusion_matrix(test_y.argmax(axis=1), y_pred.argmax(axis=1)) print(cm) ```
github_jupyter
# Example 5: Quantum-to-quantum transfer learning. This is an example of a continuous variable (CV) quantum network for state classification, developed according to the *quantum-to-quantum transfer learning* scheme presented in [1]. ## Introduction In this proof-of-principle demonstration we consider two distinct toy datasets of Gaussian and non-Gaussian states. Such datasets can be generated according to the following simple prescriptions: **Dataset A**: - Class 0 (Gaussian): random Gaussian layer applied to the vacuum. - Class 1 (non-Gaussian): random non-Gaussian Layer applied to the vacuum. **Dataset B**: - Class 0 (Gaussian): random Gaussian layer applied to a coherent state with amplitude $\alpha=1$. - Class 1 (non-Gaussian): random Gaussian layer applied to a single photon Fock state $|1\rangle$. **Variational Circuit A**: Our starting point is a single-mode variational circuit [2] (a non-Gaussian layer), pre-trained on _Dataset A_. We assume that after the circuit is applied, the output mode is measured with an _on/off_ detector. By averaging over many shots, one can estimate the vacuum probability: $$ p_0 = | \langle \psi_{\rm out} |0 \rangle|^2. $$ We use _Dataset A_ and train the circuit to rotate Gaussian states towards the vacuum while non-Gaussian states far away from the vacuum. For the final classification we use the simple decision rule: $$ p_0 \ge 0 \longrightarrow {\rm Class=0.} \\ p_0 < 0 \longrightarrow {\rm Class=1.} $$ **Variational Circuit B**: Once _Circuit A_ has been optimized, we can use is as a pre-trained block applicable also to the different _Dataset B_. In other words, we implement a _quantum-to-quantum_ transfer learning model: _Circuit B_ = _Circuit A_ (pre-trained) followed by a sequence of _variational layers_ (to be trained). Also in this case, after the application of _Circuit B_, we assume to measure the single mode with an _on/off_ detector, and we apply a similar classification rule: $$ p_0 \ge 0 \longrightarrow {\rm Class=1.} \\ p_0 < 0 \longrightarrow {\rm Class=0.} $$ The motivation for this transfer learning approach is that, even if _Circuit A_ is optimized on a different dataset, it can still act as a good pre-processing block also for _Dataset B_. Ineeed, as we are going to show, the application of _Circuit A_ can significantly improve the training efficiency of _Circuit B_. ## General setup The main imported modules are: the `tensorflow` machine learning framework, the quantum CV software `strawberryfields` [3] and the python plotting library `matplotlib`. All modules should be correctly installed in the system before running this notebook. ``` # Plotting %matplotlib inline import matplotlib.pyplot as plt # TensorFlow import tensorflow as tf # Strawberryfields (simulation of CV quantum circuits) import strawberryfields as sf from strawberryfields.ops import Dgate, Kgate, Sgate, Rgate, Vgate, Fock, Ket # Other modules import numpy as np import time # System variables import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # avoid warning messages os.environ['OMP_NUM_THREADS'] = '1' # set number of threads. os.environ['CUDA_VISIBLE_DEVICES'] = '1' # select the GPU unit. # Path with pre-trained parameters weights_path = 'results/weights/' ``` Setting of the main parameters of the network model and of the training process.<br> ``` # Hilbert space cutoff cutoff = 15 # Normalization cutoff (must be equal or smaller than cutoff dimension) target_cutoff = 15 # Normalization weight norm_weight = 0 # Batch size batch_size = 8 # Number of batches (i.e. number training iterations) num_batches = 500 # Number of state generation layers g_depth = 1 # Number of pre-trained layers (for transfer learning) pre_depth = 1 # Number of state classification layers q_depth = 3 # Standard deviation of random state generation parameters rot_sd = np.math.pi * 2 dis_sd = 0 sq_sd = 0.5 non_lin_sd = 0.5 # this is used as fixed non-linear constant. # Standard deviation of initial trainable weights active_sd = 0.001 passive_sd = 0.001 # Magnitude limit for trainable active parameters clip = 1 # Learning rate lr = 0.01 # Random seeds tf.set_random_seed(0) rng_data = np.random.RandomState(1) # Reset TF graph tf.reset_default_graph() ``` ## Variational circuits for state generation and classificaiton ### Input states: _Dataset B_ The dataset is introduced by defining the corresponding random variational circuit that generates input Gaussian and non-Gaussian states. ``` # Placeholders for class labels batch_labels = tf.placeholder(dtype=tf.int64, shape = [batch_size]) batch_labels_fl = tf.to_float(batch_labels) # State generation parameters # Squeezing gate sq_gen = tf.placeholder(dtype = tf.float32, shape = [batch_size,g_depth]) # Rotation gates r1_gen = tf.placeholder(dtype = tf.float32, shape = [batch_size,g_depth]) r2_gen = tf.placeholder(dtype = tf.float32, shape = [batch_size,g_depth]) r3_gen = tf.placeholder(dtype = tf.float32, shape = [batch_size,g_depth]) # Explicit definitions of the ket tensors of |0> and |1> np_ket0, np_ket1 = np.zeros((2, batch_size, cutoff)) np_ket0[:,0] = 1.0 np_ket1[:,1] = 1.0 ket0 = tf.constant(np_ket0, dtype = tf.float32, shape = [batch_size, cutoff]) ket1 = tf.constant(np_ket1, dtype = tf.float32, shape = [batch_size, cutoff]) # Ket of the quantum states associated to the label: i.e. |batch_labels> ket_init = ket0 * (1.0 - tf.expand_dims(batch_labels_fl, 1)) + ket1 * tf.expand_dims(batch_labels_fl, 1) # State generation layer def layer_gen(i, qmode): # If label is 0 (Gaussian) prepare a coherent state with alpha=1 otherwise prepare fock |1> Ket(ket_init) | qmode Dgate((1.0 - batch_labels_fl) * 1.0, 0) | qmode # Random Gaussian operation (without displacement) Rgate(r1_gen[:, i]) | qmode Sgate(sq_gen[:, i], 0) | qmode Rgate(r2_gen[:, i]) | qmode return qmode ``` ### Loading of pre-trained block (_Circuit A_) We assume that _Circuit A_ has been already pre-trained (e.g. by running a dedicated Python script) and that the associated optimal weights have been saved to a NumPy file. Here we first load the such parameters and then we define _Circuit A_ as a constant pre-processing block. ``` # Loading of pre-trained weights trained_params_npy = np.load('pre_trained/circuit_A.npy') if trained_params_npy.shape[1] < pre_depth: print("Error: circuit q_depth > trained q_depth.") raise SystemExit(0) # Convert numpy arrays to TF tensors trained_params = tf.constant(trained_params_npy) sq_pre = trained_params[0] d_pre = trained_params[1] r1_pre = trained_params[2] r2_pre = trained_params[3] r3_pre = trained_params[4] kappa_pre = trained_params[5] # Definition of the pre-trained Circuit A (single layer) def layer_pre(i, qmode): # Rotation gate Rgate(r1_pre[i]) | qmode # Squeezing gate Sgate(tf.clip_by_value(sq_pre[i], -clip, clip), 0) # Rotation gate Rgate(r2_pre[i]) | qmode # Displacement gate Dgate(tf.clip_by_value(d_pre[i], -clip, clip) , 0) | qmode # Rotation gate Rgate(r3_pre[i]) | qmode # Cubic gate Vgate(tf.clip_by_value(kappa_pre[i], -clip, clip) ) | qmode return qmode ``` ### Addition of trainable layers (_Circuit B_) As discussed in the introduction, _Circuit B_ can is obtained by adding some additional layers that we are going to train on _Dataset B_. ``` # Trainable variables with tf.name_scope('variables'): # Squeeze gate sq_var = tf.Variable(tf.random_normal(shape=[q_depth], stddev=active_sd)) # Displacement gate d_var = tf.Variable(tf.random_normal(shape=[q_depth], stddev=active_sd)) # Rotation gates r1_var = tf.Variable(tf.random_normal(shape=[q_depth], stddev=passive_sd)) r2_var = tf.Variable(tf.random_normal(shape=[q_depth], stddev=passive_sd)) r3_var = tf.Variable(tf.random_normal(shape=[q_depth], stddev=passive_sd)) # Kerr gate kappa_var = tf.Variable(tf.random_normal(shape=[q_depth], stddev=active_sd)) # 0-depth parameter (just to generate a gradient) x_var = tf.Variable(0.0) parameters = [sq_var, d_var, r1_var, r2_var, r3_var, kappa_var] # Definition of a single trainable variational layer def layer_var(i, qmode): Rgate(r1_var[i]) | qmode Sgate(tf.clip_by_value(sq_var[i], -clip, clip), 0) | qmode Rgate(r2_var[i]) | qmode Dgate(tf.clip_by_value(d_var[i], -clip, clip) , 0) | qmode Rgate(r3_var[i]) | qmode Vgate(tf.clip_by_value(kappa_var[i], -clip, clip) ) | qmode return qmode ``` ## Symbolic evaluation of the full network We first instantiate a _StrawberryFields_ quantum simulator, taylored for simulating a single-mode quantum optical system. Then we synbolically evaluate a batch of output states. ``` prog = sf.Program(1) eng = sf.Engine('tf', backend_options={'cutoff_dim': cutoff, 'batch_size': batch_size}) # Circuit B with prog.context as q: # State generation network for k in range(g_depth): layer_gen(k, q[0]) # Pre-trained network (Circuit A) for k in range(pre_depth): layer_pre(k, q[0]) # State classification network for k in range(q_depth): layer_var(k, q[0]) # Special case q_depth==0 if q_depth == 0: Dgate(0.001, x_var ) | q[0] # almost identity operation just to generate a gradient. # Symbolic computation of the output state results = eng.run(prog, run_options={"eval": False}) out_state = results.state # Batch state norms out_norm = tf.to_float(out_state.trace()) # Batch mean energies mean_n = out_state.mean_photon(0) ``` ## Loss function, accuracy and optimizer. As usual in machine learning, we need to define a loss function that we are going to minimize during the training phase. As discussed in the introduction, we assume that only the vacuum state probability `p_0` is measured. Ideally, `p_0` should be large for non-Gaussian states (_label 1_), while should be small for Gaussian states (_label 0_). The circuit can be trained to this task by minimizing the _cross entropy_ loss function defined in the next cell. Moreover, if `norm_weight` is different from zero, also a regularization term is added to the full cost function in order to reduce quantum amplitudes beyond the target Hilbert space dimension `target_cutoff`. ``` # Batch vacuum probabilities p0 = out_state.fock_prob([0]) # Complementary probabilities q0 = 1.0 - p0 # Cross entropy loss function eps = 0.0000001 main_loss = tf.reduce_mean(-batch_labels_fl * tf.log(p0 + eps) - (1.0 - batch_labels_fl) * tf.log(q0 + eps)) # Decision function predictions = tf.sign(p0 - 0.5) * 0.5 + 0.5 # Accuracy between predictions and labels accuracy = tf.reduce_mean((predictions + batch_labels_fl - 1.0) ** 2) # Norm loss. This is monitored but not minimized. norm_loss = tf.reduce_mean((out_norm - 1.0) ** 2) # Cutoff loss regularization. This is monitored and minimized if norm_weight is nonzero. c_in = out_state.all_fock_probs() cut_probs = c_in[:, :target_cutoff] cut_norms = tf.reduce_sum(cut_probs, axis=1) cutoff_loss = tf.reduce_mean((cut_norms - 1.0) ** 2 ) # Full regularized loss function full_loss = main_loss + norm_weight * cutoff_loss # Optimization algorithm optim = tf.train.AdamOptimizer(learning_rate=lr) training = optim.minimize(full_loss) ``` ## Training and testing Up to now we just defined the analytic graph of the quantum network without numerically evaluating it. Now, after initializing a _TensorFlow_ session, we can finally run the actual training and testing phases. ``` # Function generating a dictionary of random parameters for a batch of states. def random_dict(): param_dict = { # Labels (0 = Gaussian, 1 = non-Gaussian) batch_labels: rng_data.randint(2, size=batch_size), # Squeezing and rotation parameters sq_gen: rng_data.uniform(low=-sq_sd, high=sq_sd, size=[batch_size, g_depth]), r1_gen: rng_data.uniform(low=-rot_sd, high=rot_sd, size=[batch_size, g_depth]), r2_gen: rng_data.uniform(low=-rot_sd, high=rot_sd, size=[batch_size, g_depth]), r3_gen: rng_data.uniform(low=-rot_sd, high=rot_sd, size=[batch_size, g_depth]), } return param_dict # TensorFlow session with tf.Session() as session: session.run(tf.global_variables_initializer()) train_loss = 0.0 train_loss_sum = 0.0 train_acc = 0.0 train_acc_sum = 0.0 test_loss = 0.0 test_loss_sum = 0.0 test_acc = 0.0 test_acc_sum = 0.0 # ========================================================= # Training Phase # ========================================================= if q_depth > 0: for k in range(num_batches): rep_time = time.time() # Training step [_training, _full_loss, _accuracy, _norm_loss] = session.run([ training, full_loss, accuracy, norm_loss], feed_dict=random_dict()) train_loss_sum += _full_loss train_acc_sum += _accuracy train_loss = train_loss_sum / (k + 1) train_acc = train_acc_sum / (k + 1) # Training log if ((k + 1) % 100) == 0: print('Train batch: {:d}, Running loss: {:.4f}, Running acc {:.4f}, Norm loss {:.4f}, Batch time {:.4f}' .format(k + 1, train_loss, train_acc, _norm_loss, time.time() - rep_time)) # ========================================================= # Testing Phase # ========================================================= num_test_batches = min(num_batches, 1000) for i in range(num_test_batches): rep_time = time.time() # Evaluation step [_full_loss, _accuracy, _norm_loss, _cutoff_loss, _mean_n, _parameters] = session.run([full_loss, accuracy, norm_loss, cutoff_loss, mean_n, parameters], feed_dict=random_dict()) test_loss_sum += _full_loss test_acc_sum += _accuracy test_loss = test_loss_sum / (i + 1) test_acc = test_acc_sum / (i + 1) # Testing log if ((i + 1) % 100) == 0: print('Test batch: {:d}, Running loss: {:.4f}, Running acc {:.4f}, Norm loss {:.4f}, Batch time {:.4f}' .format(i + 1, test_loss, test_acc, _norm_loss, time.time() - rep_time)) # Compute mean photon number of the last batch of states mean_fock = np.mean(_mean_n) print('Training and testing phases completed.') print('RESULTS:') print('{:>11s}{:>11s}{:>11s}{:>11s}{:>11s}{:>11s}'.format('train_loss', 'train_acc', 'test_loss', 'test_acc', 'norm_loss', 'mean_n')) print('{:11f}{:11f}{:11f}{:11f}{:11f}{:11f}'.format(train_loss, train_acc, test_loss, test_acc, _norm_loss, mean_fock)) ``` ## References [1] Andrea Mari, Thomas R. Bromley, Josh Izaac, Maria Schuld, and Nathan Killoran. _Transfer learning in hybrid classical-quantum neural networks_. [arXiv:1912.08278](https://arxiv.org/abs/1912.08278), (2019). [2] Nathan Killoran, Thomas R. Bromley, Juan Miguel Arrazola, Maria Schuld, Nicolás Quesada, and Seth Lloyd. _Continuous-variable quantum neural networks_. [arXiv:1806.06871](https://arxiv.org/abs/1806.06871), (2018). [3] Nathan Killoran, Josh Izaac, Nicolás Quesada, Ville Bergholm, Matthew Amy, and Christian Weedbrook. _Strawberry Fields: A Software Platform for Photonic Quantum Computing_. [Quantum, 3, 129 (2019)](https://doi.org/10.22331/q-2019-03-11-129).
github_jupyter
## Borehole lithology logs viewer Interactive view of borehole data used for [exploratory lithology analysis](https://github.com/csiro-hydrogeology/pyela) Powered by [Voila](https://github.com/QuantStack/voila), [ipysheet](https://github.com/QuantStack/ipysheet) and [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) ### Data The sample borehole data around Canberra, Australia is derived from the Australian Bureau of Meteorology [National Groundwater Information System](http://www.bom.gov.au/water/groundwater/ngis/index.shtml). You can check the licensing for these data; the short version is that use for demo and learning purposes is fine. ``` import os import sys import pandas as pd import numpy as np # from bqplot import Axis, Figure, Lines, LinearScale # from bqplot.interacts import IndexSelector # from ipyleaflet import basemaps, FullScreenControl, LayerGroup, Map, MeasureControl, Polyline, Marker, MarkerCluster, CircleMarker, WidgetControl # from ipywidgets import Button, HTML, HBox, VBox, Checkbox, FileUpload, Label, Output, IntSlider, Layout, Image, link from ipywidgets import Output, HTML from ipyleaflet import Map, Marker, MarkerCluster, basemaps import ipywidgets as widgets import ipysheet example_folder = "./examples" # classified_logs_filename = os.path.join(cbr_datadir_out,'classified_logs.pkl') # with open(classified_logs_filename, 'rb') as handle: # df = pickle.load(handle) # geoloc_filename = os.path.join(cbr_datadir_out,'geoloc.pkl') # with open(geoloc_filename, 'rb') as handle: # geoloc = pickle.load(handle) df = pd.read_csv(os.path.join(example_folder,'classified_logs.csv')) geoloc = pd.read_csv(os.path.join(example_folder,'geoloc.csv')) DEPTH_FROM_COL = 'FromDepth' DEPTH_TO_COL = 'ToDepth' TOP_ELEV_COL = 'TopElev' BOTTOM_ELEV_COL = 'BottomElev' LITHO_DESC_COL = 'Description' HYDRO_CODE_COL = 'HydroCode' HYDRO_ID_COL = 'HydroID' BORE_ID_COL = 'BoreID' # if we want to keep vboreholes that have more than one row x = df[HYDRO_ID_COL].values unique, counts = np.unique(x, return_counts=True) multiple_counts = unique[counts > 1] # len(multiple_counts), len(unique) keep = set(df[HYDRO_ID_COL].values) keep = set(multiple_counts) s = geoloc[HYDRO_ID_COL] geoloc = geoloc[s.isin(keep)] class GlobalThing: def __init__(self, bore_data, displayed_colnames = None): self.marker_info = dict() self.bore_data = bore_data if displayed_colnames is None: displayed_colnames = [BORE_ID_COL, DEPTH_FROM_COL, DEPTH_TO_COL, LITHO_DESC_COL] # 'Lithology_1', 'MajorLithCode']] self.displayed_colnames = displayed_colnames def add_marker_info(self, lat, lon, code): self.marker_info[(lat, lon)] = code def get_code(self, lat, lon): return self.marker_info[(lat, lon)] def data_for_hydroid(self, ident): df_sub = self.bore_data.loc[df[HYDRO_ID_COL] == ident] return df_sub[self.displayed_colnames] def register_geolocations(self, geoloc): for index, row in geoloc.iterrows(): self.add_marker_info(row.Latitude, row.Longitude, row.HydroID) globalthing = GlobalThing(df, displayed_colnames = [BORE_ID_COL, DEPTH_FROM_COL, DEPTH_TO_COL, LITHO_DESC_COL, 'Lithology_1']) globalthing.register_geolocations(geoloc) def plot_map(geoloc, click_handler): """ Plot the markers for each borehole, and register a custom click_handler """ mean_lat = geoloc.Latitude.mean() mean_lng = geoloc.Longitude.mean() # create the map m = Map(center=(mean_lat, mean_lng), zoom=12, basemap=basemaps.Stamen.Terrain) m.layout.height = '600px' # show trace markers = [] for index, row in geoloc.iterrows(): message = HTML() message.value = str(row.HydroID) message.placeholder = "" message.description = "HydroID" marker = Marker(location=(row.Latitude, row.Longitude)) marker.on_click(click_handler) marker.popup = message markers.append(marker) marker_cluster = MarkerCluster( markers=markers ) # not sure whether we could register once instead of each marker: # marker_cluster.on_click(click_handler) m.add_layer(marker_cluster); # m.add_control(FullScreenControl()) return m # If printing a data frame straight to an output widget def raw_print(out, ident): bore_data = globalthing.data_for_hydroid(ident) out.clear_output() with out: print(ident) print(bore_data) def click_handler_rawprint(**kwargs): blah = dict(**kwargs) xy = blah['coordinates'] ident = globalthing.get_code(xy[0], xy[1]) raw_print(out, ident) # to display using an ipysheet def mk_sheet(d): return ipysheet.pandas_loader.from_dataframe(d) def upate_display_df(ident): bore_data = globalthing.data_for_hydroid(ident) out.clear_output() with out: display(mk_sheet(bore_data)) def click_handler_ipysheet(**kwargs): blah = dict(**kwargs) xy = blah['coordinates'] ident = globalthing.get_code(xy[0], xy[1]) upate_display_df(ident) out = widgets.Output(layout={'border': '1px solid black'}) ``` Note: it may take a minute or two for the display to first appear.... Select a marker: ``` plot_map(geoloc, click_handler_ipysheet) # plot_map(geoloc, click_handler_rawprint) ``` Descriptive lithology: ``` out ## Appendix A : qgrid, but at best ended up with "Model not available". May not work yet with Jupyter lab 1.0.x # import qgrid # d = data_for_hydroid(10062775) # d # import ipywidgets as widgets # def build_qgrid(): # qgrid.set_grid_option('maxVisibleRows', 10) # col_opts = { # 'editable': False, # } # qgrid_widget = qgrid.show_grid(d, show_toolbar=False, column_options=col_opts) # qgrid_widget.layout = widgets.Layout(width='920px') # return qgrid_widget, qgrid # qgrid_widget, qgrid = build_qgrid() # display(qgrid_widget) # pitch_app = widgets.VBox(qgrid_widget) # display(pitch_app) # def click_handler(**kwargs): # blah = dict(**kwargs) # xy = blah['coordinates'] # ident = globalthing.get_code(xy[0], xy[1]) # bore_data = data_for_hydroid(ident) # grid.df = bore_data ## Appendix B: using striplog # from striplog import Striplog, Interval, Component, Legend, Decor # import matplotlib as mpl # lithologies = ['shale', 'clay','granite','soil','sand', 'porphyry','siltstone','gravel', ''] # lithology_color_names = ['lightslategrey', 'olive', 'dimgray', 'chocolate', 'gold', 'tomato', 'teal', 'lavender', 'black'] # lithology_colors = [mpl.colors.cnames[clr] for clr in lithology_color_names] # clrs = dict(zip(lithologies, lithology_colors)) # def mk_decor(lithology, component): # dcor = {'color': clrs[lithology], # 'component': component, # 'width': 2} # return Decor(dcor) # def create_striplog_itvs(d): # itvs = [] # dcrs = [] # for index, row in d.iterrows(): # litho = row.Lithology_1 # c = Component({'description':row.Description,'lithology': litho}) # decor = mk_decor(litho, c) # itvs.append(Interval(row.FromDepth, row.ToDepth, components=[c]) ) # dcrs.append(decor) # return itvs, dcrs # def click_handler(**kwargs): # blah = dict(**kwargs) # xy = blah['coordinates'] # ident = globalthing.get_code(xy[0], xy[1]) # bore_data = data_for_hydroid(ident) # itvs, dcrs = create_striplog_itvs(bore_data) # s = Striplog(itvs) # with out: # print(ident) # print(s.plot(legend = Legend(dcrs))) # def plot_striplog(bore_data, ax=None): # itvs, dcrs = create_striplog_itvs(bore_data) # s = Striplog(itvs) # s.plot(legend = Legend(dcrs), ax=ax) # def plot_evaluation_metrics(bore_data): # fig, ax = plt.subplots(figsize=(12, 3)) # # actual plotting # plot_striplog(bore_data, ax=ax) # # finalize # fig.suptitle("Evaluation metrics with cutoff\n", va='bottom') # plt.show() # plt.close(fig) # %matplotlib inline # from ipywidgets import interactive # import matplotlib.pyplot as plt # import numpy as np # def f(m, b): # plt.figure(2) # x = np.linspace(-10, 10, num=1000) # plt.plot(x, m * x + b) # plt.ylim(-5, 5) # plt.show() # interactive_plot = interactive(f, m=(-2.0, 2.0), b=(-3, 3, 0.5)) # output = interactive_plot.children[-1] # output.layout.height = '350px' # interactive_plot # def update_sheet(s, d): # print("before: %s"%(s.rows)) # s.rows = len(d) # for i in range(len(d.columns)): # s.cells[i].value = d[d.columns[i]].values ```
github_jupyter
``` ## do_runcode ##%overwritefile ##%file:src/do_dot_runcode.py ##%noruncode def do_runcode(self,return_code,fil_ename,magics,code, silent, store_history=True, user_expressions=None, allow_stdin=True): return_code=return_code fil_ename=fil_ename bcancel_exec=False retinfo=self.mymagics.get_retinfo() retstr='' ## 代码运行前 ## 有解释器则使用解释器执行文件 fil_ename,否则直接执行文件 fil_ename src = None has_error = False try: src = Source(code) png_src = src.pipe(format="svg") except subprocess.CalledProcessError as _called_error: has_error = True error = _called_error.stderr except Exception as e: self.mymagics._logln(str(e),3) if not has_error: data_string = base64.b64encode(png_src).decode("utf-8",errors='ignore') mimetype='image/svg+xml' header="<div><img alt=\"Output\" src=\"data:"+mimetype+";base64," end="\"></div>" data_string=header+data_string+end self.send_response(self.iopub_socket, 'display_data', {'data': {mimetype:data_string}, 'metadata': {mimetype:{}}}) else: self.mymagics._logln(error.decode(),3) ## if p.returncode != 0: ## self.mymagics._log("Executable exited with code {}".format(p.returncode),2) return bcancel_exec,retinfo,magics, code,fil_ename,retstr ## do_compile_code ##%overwritefile ##%file:src/do_dot_compilecode.py ##%noruncode def do_compile_code(self,return_code,fil_ename,magics,code, silent, store_history=True, user_expressions=None, allow_stdin=True): bcancel_exec=False retinfo=self.mymagics.get_retinfo() retstr='' binary_filename=fil_ename # if len(self.kernel_info['compiler']['cmd'])>0: # ## 执行编译 # returncode,binary_filename=self._exec_sc_(fil_ename,magics) # if returncode!=0:return True,retinfo, code,fil_ename,retstr return bcancel_exec,retinfo,magics, code,binary_filename,retstr ## do_dot_create_codefile ##%overwritefile ##%file:src/do_dot_create_codefile.py ##%noruncode def do_create_codefile(self,magics,code, silent, store_history=True, user_expressions=None, allow_stdin=True): fil_ename='' bcancel_exec=False retinfo=self.mymagics.get_retinfo() retstr='' source_file=self.mymagics.create_codetemp_file(magics,code,suffix=self.kernel_info['extension']) fil_ename=source_file.name return bcancel_exec,retinfo,magics, code,fil_ename,retstr ## do_dot_preexecute ##%overwritefile ##%file:src/do_dot_preexecute.py ##%noruncode def do_preexecute(self,code, magics,silent, store_history=True, user_expressions=None, allow_stdin=False): bcancel_exec=False retinfo=self.mymagics.get_retinfo() ## 需要运行代码并且需要 main()函数则调用 _add_main 函数处理 if (len(self.mymagics.addkey2dict(magics,'noruncode'))<1 and len(self.kernel_info['needmain'])>0 ): magics, code = self.mymagics._add_main(magics, code) return bcancel_exec,retinfo,magics, code ```
github_jupyter
# Series Inelastic Cantilever This notebook verifies the `elle.beam2dseries` element against an analysis run with the FEDEASLab `Inel2dFrm_wOneComp` element. ``` import anon import anon as ana import elle.beam2d import elle.solvers import elle.sections import anon.ops as anp ``` ### Model Definition ``` from elle.beam2dseries import no_3, no_4, no_5, no_6 from elle.sections import aisc L = 72.0 E = 29e3 fy = 60.0 Hi = 1.0e-6 Hk = 1e-9 #1.0e-9 sect = aisc.load('W14x426','A, I, Zx') Np = fy*sect['A'] Mp = fy*sect['Zx'] Hi = Hi * 6.* E*sect['I']/L * anp.ones((2,1)) Hk = Hk * 6.* E*sect['I']/L * anp.ones((2,1)) xyz = anp.array([[0.0, 0.0],[0.0, L]]) Mp_vector = anp.array([Mp,Mp])[:,None] u = anp.zeros(3) limit_surface = elle.beam2dseries.no_6 geometry = elle.beam2d.geom_no1 transform = elle.beam2d.transform(geometry) basic_response = elle.beam2d.resp_no1 BeamResponse = transform( # <u>, p, state -> u, <p>, state limit_surface( basic_response(E=E,**sect), Mp=Mp_vector,Hi=Hi,Hk=Hk,tol=1e-7 ), xyz=xyz ) BeamResponse ag = anon.autodiff.jacfwd(geometry(xyz), 1, 0) BeamModel = elle.solvers.invert_no2(BeamResponse, nr=3, maxiter=20, tol=1e-6) BeamModel ``` ## Loading ``` # Np = 0.85*Np q_ref = anp.array([[ 0.0*Np, 0.0, 0.000*Mp], [-0.4*Np, 0.0, 0.000*Mp], [-0.4*Np, 0.0, 0.400*Mp], [-0.4*Np, 0.0, 0.700*Mp], [-0.32*Np, 0.0, 0.600*Mp], [-0.2*Np, 0.0, 0.400*Mp], [-0.1*Np, 0.0, 0.200*Mp], [-0.0*Np, 0.0, 0.000*Mp]]) # steps = [5,8,15,15,15,15,10] steps = [5,5,5,5,5,5,5] load_history = elle.sections.load_hist(q_ref, steps) ``` ### Model Initialization ``` u, q = [], [] u0, p0 = anp.zeros((6,1)), anp.zeros((6,1)) # vi = U_to_V(U[0]) BeamResponse(u0,p0) pi, ui, state = BeamModel(p0, u0) u.append(ui) q.append(state[0]) # [print(s) for s in state] # print(ui) ``` ### Analysis Procedure ``` for i in range(len(load_history)): pi = ag(ui, pi).T @ load_history[i][:, None] pi, ui, state = BeamModel(pi, ui, state) u.append(ui) q.append(state[0]) ``` ## Post Processing and Validation ``` import matplotlib.pyplot as plt # plt.style.use('trois-pas'); # %config InlineBackend.figure_format = 'svg' fig, ax = plt.subplots() ax.plot([ ui[1] for ui in u ],[ qi[2] for qi in q ], '.'); # ax.plot([ ui[1] for ui in u ], # [ pi['Elem'][0]['q'][1] for pi in data['Post'] ], '.', label='FEDEASLab'); # plt.legend() fig.savefig('../img/no1-analysis.svg') plt.plot([ i for i in range(len(q)) ], [ qi[0] for qi in q ], '.') # plt.plot([ i for i in range(len(q)) ], [ pi['Elem'][0]['q'][0] for pi in post ], '.', label='FEDEASLab') # plt.legend(); plt.plot([ i for i in range(len(q)) ], [ qi[1] for qi in q ], 'x'); # plt.plot([ i for i in range(len(q)) ], # [ pi['Elem'][0]['q'][1] for pi in data['Post'] ], '.', label='FEDEASLab'); # plt.legend(); plt.plot([ i for i in range(len(q)) ], [ qi[2] for qi in q ], '.'); # plt.plot([ i for i in range(len(q)) ], # [ pi['Elem'][0]['q'][2] for pi in post ], '.', label='FEDEASLab'); # plt.legend(); plt.plot([i for i in range(len(u))],[ ui[1] for ui in u ], '.'); ```
github_jupyter
``` import folium import pandas as pd import numpy as np import seaborn as sns import matplotlib data = pd.read_csv('MDD_Dataset_PSD_and_SNR.csv') coords = pd.read_excel('AudioMothPeru_Coordinates.xlsx', engine='openpyxl') data data = data.drop(columns=['SourceFile', 'Directory', 'FileSize', 'AudioMothID', 'Encoding', 'NumChannels', 'SampleRate', 'AvgBytesPerSec', 'BitsPerSample', 'Artist', 'FileType', 'MIMEType', 'LOWER_FREQUENCY', 'UPPER_FREQUENCY', 'Error']) # Only has 8 entries, not a good sample data.drop(data[data['AudioMothCode'] == 'AM-18'].index, inplace=True) data coords devices_PSD = {} devices_SNR = {} devices_time = {} devices_list = data.drop_duplicates(subset=['AudioMothCode']).loc[:,'AudioMothCode'].values def Average(lst): return sum(lst) / len(lst) def colors_from_values(values, palette_name): # normalize the values to range [0, 1] normalized = (values - min(values)) / (max(values) - min(values)) # convert to indices indices = np.round(normalized * (len(values) - 1)).astype(np.int32) # use the indices to get the colors palette = sns.color_palette(palette_name, len(values)) return np.array(palette).take(indices, axis=0) # Add the PSD and SNR values to their corresponding audiomoths PSD_averages = [] SNR_averages = [] for name in devices_list: dictPSD = {} dictSNR = {} dicttime = {} dictPSD['AudioMothCode'] = name dictPSD['values'] = data[data['AudioMothCode'] == name].loc[:,'PSD'].values dictPSD['Average PSD'] = Average(dictPSD['values']) devices_PSD[name] = dictPSD PSD_averages.append(Average(dictPSD['values'])) dictSNR['AudioMothCode'] = name dictSNR['values'] = data[data['AudioMothCode'] == name].loc[:,'SNR'].values dictSNR['Average SNR'] = Average(dictSNR['values']) devices_SNR[name] = dictSNR SNR_averages.append(Average(dictSNR['values'])) dicttime['AudioMothCode'] = name dicttime['values'] = data[data['AudioMothCode'] == name].loc[:,'HourDecTime'].values dicttime['average'] = Average(dictPSD['values']) # dicttime['values'] = list(map(int, dicttime['values'])) devices_time[name] = dicttime PSD_BAR = pd.DataFrame(devices_PSD, columns = devices_list) PSD_BAR = PSD_BAR.drop('values') PSD_BAR = PSD_BAR.T PSD_BAR APSD = sns.barplot(x='AudioMothCode', y='Average PSD', data=PSD_BAR, palette="crest") APSD.set_xticklabels(APSD.get_xticklabels(), rotation=90, horizontalalignment='right') APSD.set_title('Average PSD Values per AudioMoth') APSD SNR_BAR = pd.DataFrame(devices_SNR, columns = devices_list) SNR_BAR = SNR_BAR.drop('values') SNR_BAR = SNR_BAR.T SNR_BAR ASNR = sns.barplot(x='AudioMothCode', y='Average SNR', data=SNR_BAR, palette="crest") ASNR.set_xticklabels(ASNR.get_xticklabels(), rotation=90, horizontalalignment='right') ASNR.set_title('Average SNR Values per AudioMoth') ASNR hour_index = 0 for name in device_names: for i in range(0, 23): PSDheat = data PSDheat.pivot("AudioMothCode", "HourDecTime", "PSD") heatmapPSD = sns.heatmap() def mapcolor(value): if value > 0.0: return 'blue' return 'red' mPSD = folium.Map(location=[-11.382,-69.951], zoom_start=10, tiles='Stamen Terrain') # mPSD = folium.Map(location=[-11.382,-69.951], zoom_start=10, # tiles='https://api.mapbox.com/styles/v1/nishantbalaji/ckoqa9o9x35hd18qe8rpnlsug/tiles/256/{z}/{x}/{y}@2x?access_token=pk.eyJ1IjoibmlzaGFudGJhbGFqaSIsImEiOiJja2xkOGl3cjcxc21yMndtdmxtZWpxeGRuIn0.isOPq2BjpvuzwjZMXW1yWA', # attr='Mapbox') # trying to style it to look nicer, but lets get the images on the map first # zoom_start=12, # tiles='https://api.mapbox.com/styles/v1/nishantbalaji/ckoqa9o9x35hd18qe8rpnlsug/tiles/256/{z}/{x}/{y}@2x?access_token=pk.eyJ1IjoibmlzaGFudGJhbGFqaSIsImEiOiJja2xkOGl3cjcxc21yMndtdmxtZWpxeGRuIn0.isOPq2BjpvuzwjZMXW1yWA', # attr='Mapbox' for i in range(0,len(devices_list)): folium.CircleMarker( location=[coords.iloc[i]['Lat'], coords.iloc[i]['Long']], popup=PSD_BAR.iloc[i]['AudioMothCode']+" \nAverage PSD: "+str(PSD_BAR.iloc[i]['Average PSD']), radius=float(abs(PSD_BAR.iloc[i]['Average PSD']))* 2, color=mapcolor(PSD_BAR.iloc[i]['Average PSD']), fill=True, fill_color=mapcolor(PSD_BAR.iloc[i]['Average PSD']) ).add_to(mPSD) mPSD mSNR = folium.Map(location=[-11.382,-69.951], zoom_start=10, tiles='Stamen Terrain') # mSNR = folium.Map(location=[-11.382,-69.951], zoom_start=10, # tiles='https://api.mapbox.com/styles/v1/nishantbalaji/ckoqa9o9x35hd18qe8rpnlsug/tiles/256/{z}/{x}/{y}@2x?access_token=pk.eyJ1IjoibmlzaGFudGJhbGFqaSIsImEiOiJja2xkOGl3cjcxc21yMndtdmxtZWpxeGRuIn0.isOPq2BjpvuzwjZMXW1yWA', # attr='Mapbox') for i in range(0,len(devices_list)): folium.CircleMarker( location=[coords.iloc[i]['Lat'], coords.iloc[i]['Long']], popup=SNR_BAR.iloc[i]['AudioMothCode']+" \nAverage SNR: "+str(SNR_BAR.iloc[i]['Average SNR']), radius=float(abs(SNR_BAR.iloc[i]['Average SNR'])) * 40, color=mapcolor(SNR_BAR.iloc[i]['Average SNR']), fill=True, fill_color=mapcolor(SNR_BAR.iloc[i]['Average SNR']) ).add_to(mSNR) mSNR mPSD.save("AveragePSD.html") mSNR.save("AverageSNR.html") ```
github_jupyter
# Classification This notebook aims at giving an overview of the classification metrics that can be used to evaluate the predictive model generalization performance. We can recall that in a classification setting, the vector `target` is categorical rather than continuous. We will load the blood transfusion dataset. ``` import pandas as pd blood_transfusion = pd.read_csv("../datasets/blood_transfusion.csv") data = blood_transfusion.drop(columns="Class") target = blood_transfusion["Class"] ``` <div class="admonition note alert alert-info"> <p class="first admonition-title" style="font-weight: bold;">Note</p> <p class="last">If you want a deeper overview regarding this dataset, you can refer to the Appendix - Datasets description section at the end of this MOOC.</p> </div> Let's start by checking the classes present in the target vector `target`. ``` import matplotlib.pyplot as plt target.value_counts().plot.barh() plt.xlabel("Number of samples") _ = plt.title("Number of samples per classes present\n in the target") ``` We can see that the vector `target` contains two classes corresponding to whether a subject gave blood. We will use a logistic regression classifier to predict this outcome. To focus on the metrics presentation, we will only use a single split instead of cross-validation. ``` from sklearn.model_selection import train_test_split data_train, data_test, target_train, target_test = train_test_split( data, target, shuffle=True, random_state=0, test_size=0.5) ``` We will use a logistic regression classifier as a base model. We will train the model on the train set, and later use the test set to compute the different classification metric. ``` from sklearn.linear_model import LogisticRegression classifier = LogisticRegression() classifier.fit(data_train, target_train) ``` ## Classifier predictions Before we go into details regarding the metrics, we will recall what type of predictions a classifier can provide. For this reason, we will create a synthetic sample for a new potential donor: he/she donated blood twice in the past (1000 c.c. each time). The last time was 6 months ago, and the first time goes back to 20 months ago. ``` new_donor = [[6, 2, 1000, 20]] ``` We can get the class predicted by the classifier by calling the method `predict`. ``` classifier.predict(new_donor) ``` With this information, our classifier predicts that this synthetic subject is more likely to not donate blood again. However, we cannot check whether the prediction is correct (we do not know the true target value). That's the purpose of the testing set. First, we predict whether a subject will give blood with the help of the trained classifier. ``` target_predicted = classifier.predict(data_test) target_predicted[:5] ``` ## Accuracy as a baseline Now that we have these predictions, we can compare them with the true predictions (sometimes called ground-truth) which we did not use until now. ``` target_test == target_predicted ``` In the comparison above, a `True` value means that the value predicted by our classifier is identical to the real value, while a `False` means that our classifier made a mistake. One way of getting an overall rate representing the generalization performance of our classifier would be to compute how many times our classifier was right and divide it by the number of samples in our set. ``` import numpy as np np.mean(target_test == target_predicted) ``` This measure is called the accuracy. Here, our classifier is 78% accurate at classifying if a subject will give blood. `scikit-learn` provides a function that computes this metric in the module `sklearn.metrics`. ``` from sklearn.metrics import accuracy_score accuracy = accuracy_score(target_test, target_predicted) print(f"Accuracy: {accuracy:.3f}") ``` `LogisticRegression` also has a method named `score` (part of the standard scikit-learn API), which computes the accuracy score. ``` classifier.score(data_test, target_test) ``` ## Confusion matrix and derived metrics The comparison that we did above and the accuracy that we calculated did not take into account the type of error our classifier was making. Accuracy is an aggregate of the errors made by the classifier. We may be interested in finer granularity - to know independently what the error is for each of the two following cases: - we predicted that a person will give blood but she/he did not; - we predicted that a person will not give blood but she/he did. ``` from sklearn.metrics import ConfusionMatrixDisplay _ = ConfusionMatrixDisplay.from_estimator(classifier, data_test, target_test) ``` The in-diagonal numbers are related to predictions that were correct while off-diagonal numbers are related to incorrect predictions (misclassifications). We now know the four types of correct and erroneous predictions: * the top left corner are true positives (TP) and corresponds to people who gave blood and were predicted as such by the classifier; * the bottom right corner are true negatives (TN) and correspond to people who did not give blood and were predicted as such by the classifier; * the top right corner are false negatives (FN) and correspond to people who gave blood but were predicted to not have given blood; * the bottom left corner are false positives (FP) and correspond to people who did not give blood but were predicted to have given blood. Once we have split this information, we can compute metrics to highlight the generalization performance of our classifier in a particular setting. For instance, we could be interested in the fraction of people who really gave blood when the classifier predicted so or the fraction of people predicted to have given blood out of the total population that actually did so. The former metric, known as the precision, is defined as TP / (TP + FP) and represents how likely the person actually gave blood when the classifier predicted that they did. The latter, known as the recall, defined as TP / (TP + FN) and assesses how well the classifier is able to correctly identify people who did give blood. We could, similarly to accuracy, manually compute these values, however scikit-learn provides functions to compute these statistics. ``` from sklearn.metrics import precision_score, recall_score precision = precision_score(target_test, target_predicted, pos_label="donated") recall = recall_score(target_test, target_predicted, pos_label="donated") print(f"Precision score: {precision:.3f}") print(f"Recall score: {recall:.3f}") ``` These results are in line with what was seen in the confusion matrix. Looking at the left column, more than half of the "donated" predictions were correct, leading to a precision above 0.5. However, our classifier mislabeled a lot of people who gave blood as "not donated", leading to a very low recall of around 0.1. ## The issue of class imbalance At this stage, we could ask ourself a reasonable question. While the accuracy did not look bad (i.e. 77%), the recall score is relatively low (i.e. 12%). As we mentioned, precision and recall only focuses on samples predicted to be positive, while accuracy takes both into account. In addition, we did not look at the ratio of classes (labels). We could check this ratio in the training set. ``` target_train.value_counts(normalize=True).plot.barh() plt.xlabel("Class frequency") _ = plt.title("Class frequency in the training set") ``` We observe that the positive class, `'donated'`, comprises only 24% of the samples. The good accuracy of our classifier is then linked to its ability to correctly predict the negative class `'not donated'` which may or may not be relevant, depending on the application. We can illustrate the issue using a dummy classifier as a baseline. ``` from sklearn.dummy import DummyClassifier dummy_classifier = DummyClassifier(strategy="most_frequent") dummy_classifier.fit(data_train, target_train) print(f"Accuracy of the dummy classifier: " f"{dummy_classifier.score(data_test, target_test):.3f}") ``` With the dummy classifier, which always predicts the negative class `'not donated'`, we obtain an accuracy score of 76%. Therefore, it means that this classifier, without learning anything from the data `data`, is capable of predicting as accurately as our logistic regression model. The problem illustrated above is also known as the class imbalance problem. When the classes are imbalanced, accuracy should not be used. In this case, one should either use the precision and recall as presented above or the balanced accuracy score instead of accuracy. ``` from sklearn.metrics import balanced_accuracy_score balanced_accuracy = balanced_accuracy_score(target_test, target_predicted) print(f"Balanced accuracy: {balanced_accuracy:.3f}") ``` The balanced accuracy is equivalent to accuracy in the context of balanced classes. It is defined as the average recall obtained on each class. ## Evaluation and different probability thresholds All statistics that we presented up to now rely on `classifier.predict` which outputs the most likely label. We haven't made use of the probability associated with this prediction, which gives the confidence of the classifier in this prediction. By default, the prediction of a classifier corresponds to a threshold of 0.5 probability in a binary classification problem. We can quickly check this relationship with the classifier that we trained. ``` target_proba_predicted = pd.DataFrame(classifier.predict_proba(data_test), columns=classifier.classes_) target_proba_predicted[:5] target_predicted = classifier.predict(data_test) target_predicted[:5] ``` Since probabilities sum to 1 we can get the class with the highest probability without using the threshold 0.5. ``` equivalence_pred_proba = ( target_proba_predicted.idxmax(axis=1).to_numpy() == target_predicted) np.all(equivalence_pred_proba) ``` The default decision threshold (0.5) might not be the best threshold that leads to optimal generalization performance of our classifier. In this case, one can vary the decision threshold, and therefore the underlying prediction, and compute the same statistics presented earlier. Usually, the two metrics recall and precision are computed and plotted on a graph. Each metric plotted on a graph axis and each point on the graph corresponds to a specific decision threshold. Let's start by computing the precision-recall curve. ``` from sklearn.metrics import PrecisionRecallDisplay disp = PrecisionRecallDisplay.from_estimator( classifier, data_test, target_test, pos_label='donated', marker="+" ) _ = disp.ax_.set_title("Precision-recall curve") ``` <div class="admonition tip alert alert-warning"> <p class="first admonition-title" style="font-weight: bold;">Tip</p> <p class="last">Scikit-learn will return a display containing all plotting element. Notably, displays will expose a matplotlib axis, named <tt class="docutils literal">ax_</tt>, that can be used to add new element on the axis. You can refer to the documentation to have more information regarding the <a class="reference external" href="https://scikit-learn.org/stable/visualizations.html#visualizations">visualizations in scikit-learn</a></p> </div> On this curve, each blue cross corresponds to a level of probability which we used as a decision threshold. We can see that, by varying this decision threshold, we get different precision vs. recall values. A perfect classifier would have a precision of 1 for all recall values. A metric characterizing the curve is linked to the area under the curve (AUC) and is named average precision (AP). With an ideal classifier, the average precision would be 1. The precision and recall metric focuses on the positive class, however, one might be interested in the compromise between accurately discriminating the positive class and accurately discriminating the negative classes. The statistics used for this are sensitivity and specificity. Sensitivity is just another name for recall. However, specificity measures the proportion of correctly classified samples in the negative class defined as: TN / (TN + FP). Similar to the precision-recall curve, sensitivity and specificity are generally plotted as a curve called the receiver operating characteristic (ROC) curve. Below is such a curve: ``` from sklearn.metrics import RocCurveDisplay disp = RocCurveDisplay.from_estimator( classifier, data_test, target_test, pos_label='donated', marker="+") disp = RocCurveDisplay.from_estimator( dummy_classifier, data_test, target_test, pos_label='donated', color="tab:orange", linestyle="--", ax=disp.ax_) _ = disp.ax_.set_title("ROC AUC curve") ``` This curve was built using the same principle as the precision-recall curve: we vary the probability threshold for determining "hard" prediction and compute the metrics. As with the precision-recall curve, we can compute the area under the ROC (ROC-AUC) to characterize the generalization performance of our classifier. However, it is important to observe that the lower bound of the ROC-AUC is 0.5. Indeed, we show the generalization performance of a dummy classifier (the orange dashed line) to show that even the worst generalization performance obtained will be above this line.
github_jupyter
Para entrar no modo apresentação, execute a seguinte célula e pressione `-` ``` %reload_ext slide ``` <span class="notebook-slide-start"/> # Proxy Este notebook apresenta os seguintes tópicos: - [Introdução](#Introdu%C3%A7%C3%A3o) - [Servidor de proxy](#Servidor-de-proxy) ## Introdução Existe muita informação disponível em repositórios software. A seguir temos uma *screenshot* do repositório `gems-uff/sapos`. <img src="images/githubexample.png" alt="Página Inicial de Repositório no GitHub" width="auto"/> Nessa imagem, vemos a organização e nome do repositório <img src="images/githubexample1.png" alt="Página Inicial de Repositório no GitHub com nome do repositório selecionado" width="auto"/> Estrelas, forks, watchers <img src="images/githubexample2.png" alt="Página Inicial de Repositório no GitHub com watchers, star e fork selecionados" width="auto"/> Número de issues e pull requests <img src="images/githubexample3.png" alt="Página Inicial de Repositório no GitHub com numero de issues e pull requests selecionados" width="auto"/> Número de commits, branches, releases, contribuidores e licensa <span class="notebook-slide-extra" data-count="1"/> <img src="images/githubexample4.png" alt="Página Inicial de Repositório no GitHub com número de commits, branches, releases, contribuidores e licensa selecionados" width="auto"/> Arquivos <img src="images/githubexample5.png" alt="Página Inicial de Repositório no GitHub com arquivos selecionados" width="auto"/> Mensagem e data dos commits que alteraram esses arquivos por último <img src="images/githubexample6.png" alt="Página Inicial de Repositório no GitHub com arquivos selecionados" width="auto"/> Podemos extrair informações de repositórios de software de 3 formas: - Crawling do site do repositório - APIs que fornecem dados - Diretamente do sistema de controle de versões Neste minicurso abordaremos as 3 maneiras, porém daremos mais atenção a APIs do GitHub e extração direta do Git. ## Servidor de proxy Servidores de repositório costumam limitar a quantidade de requisições que podemos fazer. Em geral, essa limitação não afeta muito o uso esporádico dos serviços para mineração. Porém, quando estamos desenvolvendo algo, pode ser que passemos do limite com requisições repetidas. Para evitar esse problema, vamos configurar um servidor de proxy simples em flask. Quando estamos usando um servidor de proxy, ao invés de fazermos requisições diretamente ao site de destino, fazemos requisições ao servidor de proxy, que, em seguida, redireciona as requisições para o site de destino. Ao receber o resultado da requisição, o proxy faz um cache do resultado e nos retorna o resultado. Se uma requisição já tiver sido feita pelo servidor de proxy, ele apenas nos retorna o resultado do cache. ### Implementação do Proxy A implementação do servidor de proxy está no arquivo `proxy.py`. Como queremos executar o proxy em paralelo ao notebook, o servidor precisa ser executado externamente. Entretanto, o código do proxy será explicado aqui. Começamos o arquivo com os imports necessários. ```python import hashlib import requests import simplejson import os import sys from flask import Flask, request, Response ``` A biblioteca `hashlib` é usada para fazer hash das requisições. A biblioteca `requests` é usada para fazer requisições ao GitHub. A biblioteca `simplejson` é usada para transformar requisiçoes e respostas em JSON. A biblioteca `os` é usada para manipular caminhos de diretórios e verificar a existência de arquivos. A biblioteca `sys` é usada para pegar os argumentos da execução. Por fim, `flask` é usada como servidor. Em seguida, definimos o site para qual faremos proxy, os headers excluídos da resposta recebida, e criamos um `app` pro `Flask`. Note que `SITE` está sendo definido como o primeiro argumendo da execução do programa ou como https://github.com/, caso não haja argumento. ```python if len(sys.argv) > 1: SITE = sys.argv[1] else: SITE = "https://github.com/" EXCLUDED_HEADERS = ['content-encoding', 'content-length', 'transfer-encoding', 'connection'] app = Flask(__name__) ``` Depois, definimos uma função para tratar todas rotas e métodos possíveis que o servidor pode receber. ```python METHODS = ['GET', 'POST', 'PATCH', 'PUT', 'DELETE'] @app.route('/', defaults={'path': ''}, methods=METHODS) @app.route('/<path:path>', methods=METHODS) def catch_all(path): ``` Dentro desta função, definimos um dicionário de requisição com base na requisição que foi recebida pelo `flask`. ```python request_dict = { "method": request.method, "url": request.url.replace(request.host_url, SITE), "headers": {key: value for (key, value) in request.headers if key != 'Host'}, "data": request.get_data(), "cookies": request.cookies, "allow_redirects": False } ``` Nesta requsição, substituímos o host pelo site de destino. Em seguida, convertemos o dicionário para JSON e calculamos o hash SHA1 do resultado. ```python request_json = simplejson.dumps(request_dict, sort_keys=True) sha1 = hashlib.sha1(request_json.encode("utf-8")).hexdigest() path_req = os.path.join("cache", sha1 + ".req") path_resp = os.path.join("cache", sha1 + ".resp") ``` No diretório `cache` armazenamos arquivos `{sha1}.req` e `{sha1}.resp` com a requisição e resposta dos resultados em cache. Com isso, ao receber uma requisição, podemos ver se `{sha1}.req` existe. Se existir, podemos comparar com a nossa requisição (para evitar conflitos). Por fim, se forem iguais, podemos retornar a resposta que está em cache. ```python if os.path.exists(path_req): with open(path_req, "r") as req: req_read = req.read() if req_read == request_json: with open(path_resp, "r") as dump: response = simplejson.load(dump) return Response( response["content"], response["status_code"], response["headers"] ) ``` Se a requisição não estiver em cache, transformamos o dicionário da requisição em uma requisição do `requests` para o GitHub, excluimos os headers populados pelo `flask` e criamos um JSON para a resposta. ```python resp = requests.request(**request_dict) headers = [(name, value) for (name, value) in resp.raw.headers.items() if name.lower() not in EXCLUDED_HEADERS] response = { "content": resp.content, "status_code": resp.status_code, "headers": headers } response_json = simplejson.dumps(response, sort_keys=True) ``` Depois disso, salvamos a resposta no cache e retornamos ela para o cliente original. ```python with open(path_resp, "w") as dump: dump.write(response_json) with open(path_req, "w") as req: req.write(request_json) return Response( response["content"], response["status_code"], response["headers"] ) ``` No fim do script, iniciamos o servidor. ```python if __name__ == '__main__': app.run(debug=True) ``` ### Uso do Proxy Execute a seguinte linha em um terminal: ```bash python proxy.py ``` Agora, toda requisição que faríamos a github.com, passaremos a fazer a localhost:5000. Por exemplo, ao invés de acessar https://github.com/gems-uff/sapos, acessaremos http://localhost:5000/gems-uff/sapos ### Requisição com requests A seguir fazemos uma requisição com requests para o proxy. <span class="notebook-slide-extra" data-count="2"/> ``` SITE = "http://localhost:5000/" # Se não usar o proxy, alterar para https://github.com/ import requests response = requests.get(SITE + "gems-uff/sapos") response.headers['server'], response.status_code ``` <span class="notebook-slide-scroll" data-position="-1"/> Podemos que o resultado foi obtido do GitHub e que a requisição funcionou, dado que o resultado foi 200. Continua: [5.Crawling.ipynb](5.Crawling.ipynb) &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;
github_jupyter
``` import pandas as pd import numpy as np import time import seaborn as sns import matplotlib.pyplot as plt from sklearn import preprocessing as pp from sklearn.model_selection import StratifiedKFold from sklearn.metrics import accuracy_score from sklearn import preprocessing import xgboost as xgb from sklearn.ensemble import BaggingClassifier import lightgbm as lgb from sklearn.naive_bayes import GaussianNB from sklearn import preprocessing as pp from sklearn.neighbors import KNeighborsClassifier from sklearn import tree from sklearn.model_selection import cross_val_score from sklearn.model_selection import cross_val_predict from statistics import mode from sklearn.model_selection import cross_val_score, cross_validate, train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier import xgboost as xgb import lightgbm as lgb #Todas las librerías para los distintos algoritmos from sklearn.naive_bayes import GaussianNB from sklearn.naive_bayes import ComplementNB from sklearn.naive_bayes import BernoulliNB from sklearn.naive_bayes import MultinomialNB from sklearn.calibration import CalibratedClassifierCV from sklearn.svm import LinearSVC from sklearn.svm import OneClassSVM from sklearn.svm import SVC from sklearn.svm import NuSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import sklearn.metrics as metrics from sklearn.neural_network import MLPClassifier from sklearn.ensemble import BaggingClassifier import statistics from sklearn.preprocessing import LabelEncoder from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.decomposition import PCA from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler import warnings from mlxtend.classifier import StackingClassifier from mlxtend.classifier import StackingCVClassifier from sklearn.neural_network import MLPClassifier from sklearn.ensemble import GradientBoostingClassifier from pylab import rcParams from collections import Counter warnings.simplefilter('ignore') data_train= pd.read_csv("./datos/train.csv",na_values=["?"]) data_test= pd.read_csv("./datos/test.csv",na_values=["?"]) data_trainCopia = data_train.copy() data_testCopia = data_test.copy() Nombre = LabelEncoder().fit(pd.read_csv("./datos/nombre.csv").Nombre) Año = LabelEncoder().fit(pd.read_csv("./datos/ao.csv").Año) Ciudad = LabelEncoder().fit(pd.read_csv("./datos/ciudad.csv").Ciudad) Combustible = LabelEncoder().fit(pd.read_csv("./datos/combustible.csv").Combustible) Consumo = LabelEncoder().fit(pd.read_csv("./datos/consumo.csv").Consumo) Descuento = LabelEncoder().fit(pd.read_csv("./datos/descuento.csv").Descuento) Kilometros = LabelEncoder().fit(pd.read_csv("./datos/kilometros.csv").Kilometros) Mano = LabelEncoder().fit(pd.read_csv("./datos/mano.csv").Mano) Potencia = LabelEncoder().fit(pd.read_csv("./datos/potencia.csv").Potencia) Asientos = LabelEncoder().fit(pd.read_csv("./datos/asientos.csv").Asientos) Motor_CC=LabelEncoder().fit(pd.read_csv("./datos/motor_cc.csv").Motor_CC) Tipo_marchas=LabelEncoder().fit(pd.read_csv("./datos/Tipo_marchas.csv").Tipo_marchas) data_trainCopia['Nombre']=data_trainCopia['Nombre'].fillna(mode(data_trainCopia['Nombre'])) data_trainCopia['Año']=data_trainCopia['Año'].fillna(mode(data_trainCopia['Año'])) data_trainCopia['Ciudad']=data_trainCopia['Ciudad'].fillna(mode(data_trainCopia['Ciudad'])) #data_trainCopia['Kilometros']=data_trainCopia['Kilometros'].fillna(mode(data_trainCopia['Kilometros'])) data_trainCopia['Combustible']=data_trainCopia['Combustible'].fillna(mode(data_trainCopia['Combustible'])) data_trainCopia['Tipo_marchas']=data_trainCopia['Tipo_marchas'].fillna(mode(data_trainCopia['Tipo_marchas'])) #data_trainCopia['Mano']=data_trainCopia['Mano'].fillna(mode(data_trainCopia['Mano'])) data_trainCopia['Consumo']=data_trainCopia['Consumo'].fillna(mode(data_trainCopia['Consumo'])) data_trainCopia['Motor_CC']=data_trainCopia['Motor_CC'].fillna(mode(data_trainCopia['Motor_CC'])) data_trainCopia['Potencia']=data_trainCopia['Potencia'].fillna(mode(data_trainCopia['Potencia'])) data_trainCopia['Asientos']=data_trainCopia['Asientos'].fillna(mode(data_trainCopia['Asientos'])) data_trainCopia['Descuento']=data_trainCopia['Descuento'].fillna(mode(data_trainCopia['Descuento'])) #Eliminamos las columnas que no necesitamos data_trainCopia=data_trainCopia.drop(['Descuento'], axis=1) data_trainCopia=data_trainCopia.drop(['id'], axis=1) data_trainCopia=data_trainCopia.drop(['Kilometros'], axis=1) data_testCopia=data_testCopia.drop(['Descuento'], axis=1) data_testCopia=data_testCopia.drop(['id'], axis=1) data_testCopia=data_testCopia.drop(['Kilometros'], axis=1) #Eliminamos los nan de los ids data_trainCopia=data_trainCopia.dropna() data_testCopia=data_testCopia.dropna() #Codificación de las filas data_trainCopia.Nombre = Nombre.transform(data_trainCopia.Nombre) data_trainCopia.Año = Año.transform(data_trainCopia.Año) data_trainCopia.Ciudad = Ciudad.transform(data_trainCopia.Ciudad) data_trainCopia.Combustible = Combustible.transform(data_trainCopia.Combustible) data_trainCopia.Potencia = Potencia.transform(data_trainCopia.Potencia) data_trainCopia.Consumo = Consumo.transform(data_trainCopia.Consumo) #data_trainCopia.Kilometros = Kilometros.transform(data_trainCopia.Kilometros) data_trainCopia.Mano = Mano.transform(data_trainCopia.Mano) data_trainCopia.Motor_CC = Motor_CC.transform(data_trainCopia.Motor_CC) data_trainCopia.Tipo_marchas = Tipo_marchas.transform(data_trainCopia.Tipo_marchas) data_trainCopia.Asientos = Asientos.transform(data_trainCopia.Asientos) #------------------------------------------------------------------------------------------- data_testCopia.Nombre = Nombre.transform(data_testCopia.Nombre) data_testCopia.Año = Año.transform(data_testCopia.Año) data_testCopia.Ciudad = Ciudad.transform(data_testCopia.Ciudad) data_testCopia.Combustible = Combustible.transform(data_testCopia.Combustible) data_testCopia.Potencia = Potencia.transform(data_testCopia.Potencia) data_testCopia.Consumo = Consumo.transform(data_testCopia.Consumo) #data_testCopia.Kilometros = Kilometros.transform(data_testCopia.Kilometros) data_testCopia.Mano = Mano.transform(data_testCopia.Mano) data_testCopia.Tipo_marchas = Tipo_marchas.transform(data_testCopia.Tipo_marchas) data_testCopia.Asientos = Asientos.transform(data_testCopia.Asientos) data_testCopia.Motor_CC = Motor_CC.transform(data_testCopia.Motor_CC) target = pd.read_csv('./datos/precio_cat.csv') target_train=data_trainCopia['Precio_cat'] data_trainCopia=data_trainCopia.drop(['Precio_cat'], axis=1) GradientBoostingClassifier(criterion='friedman_mse', init=None, learning_rate=0.1, loss='deviance', max_depth=3, max_features=None, max_leaf_nodes=None, min_impurity_split=1e-07, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=100, presort='auto', random_state=None, subsample=1.0, verbose=0, warm_start=False) from imblearn.over_sampling import SMOTE Xo, yo = SMOTE(random_state=42).fit_resample(data_trainCopia, target_train) clf = GradientBoostingClassifier(learning_rate=0.07, n_estimators=700, max_depth=2) scores = cross_val_score(clf, Xo, yo, cv=5, scoring='accuracy') print("Score Validacion Cruzada", np.mean(scores)*100) clfEntrenado = clf.fit(Xo, yo) preclf = clfEntrenado.predict(data_testCopia) clf = GradientBoostingClassifier(learning_rate=0.09, n_estimators=700, max_depth=2) scores = cross_val_score(clf, Xo, yo, cv=5, scoring='accuracy') print("Score Validacion Cruzada", np.mean(scores)*100) clfEntrenado = clf.fit(Xo, yo) preclf = clfEntrenado.predict(data_testCopia) clf = GradientBoostingClassifier(learning_rate=0.9, n_estimators=750, max_depth=2) scores = cross_val_score(clf, Xo, yo, cv=5, scoring='accuracy') print("Score Validacion Cruzada", np.mean(scores)*100) clfEntrenado = clf.fit(Xo, yo) preclfOverGradient = clfEntrenado.predict(data_testCopia) dfAux = pd.DataFrame({'id':data_test['id']}) dfAux.set_index('id', inplace=True) dfFinal = pd.DataFrame({'id': data_test['id'], 'Precio_cat': preclfOverGradient}, columns=['id', 'Precio_cat']) dfFinal.set_index('id', inplace=True) dfFinal.to_csv("./soluciones/GradientOverSamplingConRandomStateScoreLocal895628.csv") clf = GradientBoostingClassifier(learning_rate=0.055, n_estimators=2500, max_depth=2) scores = cross_val_score(clf, Xo, yo, cv=5, scoring='accuracy') print("Score Validacion Cruzada", np.mean(scores)*100) clfEntrenado = clf.fit(Xo, yo) preclfOverGradient = clfEntrenado.predict(data_testCopia) clf = GradientBoostingClassifier(learning_rate=0.5, n_estimators=400) scores = cross_val_score(clf, Xo, yo, cv=5, scoring='accuracy') print("Score Validacion Cruzada", np.mean(scores)*100) clfEntrenado = clf.fit(Xo, yo) preclfOverGradient = clfEntrenado.predict(data_testCopia) clf = GradientBoostingClassifier(learning_rate=0.5, n_estimators=100, max_depth=6) scores = cross_val_score(clf, Xo, yo, cv=5, scoring='accuracy') print("Score Validacion Cruzada", np.mean(scores)*100) clfEntrenado = clf.fit(Xo, yo) preclfOverGradient = clfEntrenado.predict(data_testCopia) clf = GradientBoostingClassifier(learning_rate=0.5, n_estimators=100, max_depth=6) scores = cross_val_score(clf, Xo, yo, cv=5, scoring='accuracy') print("Score Validacion Cruzada", np.mean(scores)*100) clfEntrenado = clf.fit(Xo, yo) preclfOverGradient = clfEntrenado.predict(data_testCopia) clf = GradientBoostingClassifier(learning_rate=0.5, n_estimators=100, max_depth=6) scores = cross_val_score(clf, data_trainCopia, target_train, cv=5, scoring='accuracy') print("Score Validacion Cruzada", np.mean(scores)*100) clfEntrenado = clf.fit(data_trainCopia, target_train) preclfOverGradient = clfEntrenado.predict(data_testCopia) lgbm1 = lgb.LGBMClassifier(learning_rate=0.5, objective='binary', n_estimators=550, n_jobs=2, num_leaves=11, max_depth=-1, reg_alpha=0.1) scores = cross_val_score(lgbm1, Xo, yo, cv=5, scoring='accuracy') print("Score Validacion Cruzada CON MODELO", np.mean(scores)*100) lgbmEntrenado = lgbm1.fit(Xo, yo) preMamaJuanca = lgbmEntrenado.predict(data_testCopia) lgbm1 = lgb.LGBMClassifier(learning_rate=0.5, objective='binary', n_estimators=550, n_jobs=2, num_leaves=11, max_depth=-1, reg_alpha=0.1) scores = cross_val_score(lgbm1, Xo, yo, cv=5, scoring='accuracy') print("Score Validacion Cruzada CON MODELO", np.mean(scores)*100) lgbmEntrenado = lgbm1.fit(Xo, yo) preMamaJuanca = lgbmEntrenado.predict(data_testCopia) lgbm1 = lgb.LGBMClassifier(learning_rate=0.2, objective='binary', n_estimators=550, n_jobs=2, num_leaves=11, max_depth=-1) scores = cross_val_score(lgbm1, Xo, yo, cv=5, scoring='accuracy') print("Score Validacion Cruzada CON MODELO", np.mean(scores)*100) lgbmEntrenado = lgbm1.fit(Xo, yo) preMamaJuanca = lgbmEntrenado.predict(data_testCopia) lgbm1 = lgb.LGBMClassifier(learning_rate=0.2, objective='multiclassova', n_estimators=550, n_jobs=2, num_leaves=11, max_depth=-1) scores = cross_val_score(lgbm1, data_trainCopia, target_train, cv=5, scoring='accuracy') print("Score Validacion Cruzada CON MODELO", np.mean(scores)*100) lgbmEntrenado = lgbm1.fit(data_trainCopia, target_train) preMamaJuanca = lgbmEntrenado.predict(data_testCopia) lgbm1 = lgb.LGBMClassifier(learning_rate=0.3, objective='binary', n_estimators=500, n_jobs=2, num_leaves=11, max_depth=-1) scores = cross_val_score(lgbm1, Xo, yo, cv=5, scoring='accuracy') print("Score Validacion Cruzada CON MODELO", np.mean(scores)*100) lgbmEntrenado = lgbm1.fit(Xo, yo) preMamaJuanca = lgbmEntrenado.predict(data_testCopia) lgbm1 = lgb.LGBMClassifier(learning_rate=0.3, objective='binary', n_estimators=60, n_jobs=2, num_leaves=8, max_depth=8) scores = cross_val_score(lgbm1, data_trainCopia, target_train, cv=5, scoring='accuracy') print("Score Validacion Cruzada CON MODELO", np.mean(scores)*100) lgbmEntrenado = lgbm1.fit(data_trainCopia, target_train) preMamaJuanca = lgbmEntrenado.predict(data_testCopia) #GRADIENT BOOSTING CON PARAMETROS DE GRIDSHARE Y DATOS SIN NORMALIZAR clf = GradientBoostingClassifier(learning_rate=0.3, n_estimators=70, max_depth=4) scores = cross_val_score(clf, data_trainCopia, target_train, cv=5, scoring='accuracy') print("Score Validacion Cruzada", np.mean(scores)*100) clfEntrenado = clf.fit(data_trainCopia, target_train) preclfOverGradient = clfEntrenado.predict(data_testCopia) #ESTO ME HA DICHO QUE ES LO QUE ME VA A SUBIR #GRADIENT BOOSTING CON PARAMETROS DE GRIDSHARE Y DATOS NORMALIZADOS clf = GradientBoostingClassifier(learning_rate=0.5, n_estimators=70, max_depth=5, random_state=42) scores = cross_val_score(clf, Xo, yo, cv=5, scoring='accuracy') print("Score Validacion Cruzada", np.mean(scores)*100) clfEntrenado = clf.fit(Xo, yo) preclfOverGradient = clfEntrenado.predict(data_testCopia) lgbm1 = lgb.LGBMClassifier(learning_rate=0.7, objective='binary', n_estimators=70, n_jobs=2, num_leaves=10, max_depth=4, random_state=42) scores = cross_val_score(lgbm1,Xo, yo, cv=5, scoring='accuracy') print("Score Validacion Cruzada CON MODELO", np.mean(scores)*100) lgbmEntrenado = lgbm1.fit(Xo, yo) preMamaJuanca = lgbmEntrenado.predict(data_testCopia) ```
github_jupyter
<p></p> <p style="text-align:center"><font size="20">BRAIN IMAGING</font></p> <p style="text-align:center"><font size="20">DATA STRUCTURE</font></p> The dataset for this tutorial is structured according to the [Brain Imaging Data Structure (BIDS)](http://bids.neuroimaging.io/). BIDS is a simple and intuitive way to organize and describe your neuroimaging and behavioral data. Neuroimaging experiments result in complicated data that can be arranged in many different ways. So far there is no consensus on how to organize and share data obtained in neuroimaging experiments. BIDS tackles this problem by suggesting a new standard for the arrangement of neuroimaging datasets. The idea of BIDS is that the file and folder names follow a strict set of rules: ![](../static/images/bids.png) Using the same structure for all of your studies will allow you to easily reuse all of your scripts between studies. But additionally, it also has the advantage that sharing code with and using scripts from other researchers will be much easier. # Tutorial Dataset For this tutorial, we will be using a subset of the [fMRI dataset (ds000114)](https://openfmri.org/dataset/ds000114/) publicly available on [openfmri.org](https://openfmri.org). **If you're using the suggested Docker image you probably have all data needed to run the tutorial within the Docker container.** If you want to have data locally you can use [Datalad](http://datalad.org/) to download a subset of the dataset, via the [datalad repository](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds000114). In order to install dataset with all subrepositories you can run: ``` %%bash cd /data datalad install -r ///workshops/nih-2017/ds000114 ``` In order to download data, you can use ``datalad get foldername`` command, to download all files in the folder ``foldername``. For this tutorial we only want to download part of the dataset, i.e. the anatomical and the functional `fingerfootlips` images: ``` %%bash cd /data/ds000114 datalad get -J 4 derivatives/fmriprep/sub-*/anat/*preproc.nii.gz \ sub-01/ses-test/anat \ sub-*/ses-test/func/*fingerfootlips* ``` So let's have a look at the tutorial dataset. ``` !tree -L 4 /data/ds000114/ ``` As you can, for every subject we have one anatomical T1w image, five functional images, and one diffusion weighted image. **Note**: If you used `datalad` or `git annex` to get the dataset, you can see symlinks for the image files. # Behavioral Task Subject from the ds000114 dataset did five behavioral tasks. In our dataset two of them are included. The **motor task** consisted of ***finger tapping***, ***foot twitching*** and ***lip pouching*** interleaved with fixation at a cross. The **landmark task** was designed to mimic the ***line bisection task*** used in neurological practice to diagnose spatial hemineglect. Two conditions were contrasted, specifically judging if a horizontal line had been bisected exactly in the middle, versus judging if a horizontal line was bisected at all. More about the dataset and studies you can find [here](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3641991/). To each of the functional images above, we therefore also have a tab-separated values file (``tva``), containing information such as stimuli onset, duration, type, etc. So let's have a look at one of them: ``` %%bash cd /data/ds000114 datalad get sub-01/ses-test/func/sub-01_ses-test_task-linebisection_events.tsv !cat /data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-linebisection_events.tsv ```
github_jupyter
STAT 453: Deep Learning (Spring 2021) Instructor: Sebastian Raschka ([email protected]) Course website: http://pages.stat.wisc.edu/~sraschka/teaching/stat453-ss2021/ GitHub repository: https://github.com/rasbt/stat453-deep-learning-ss21 --- ``` %load_ext watermark %watermark -a 'Sebastian Raschka' -v -p torch ``` - Runs on CPU or GPU (if available) # A Convolutional ResNet and Residual Blocks Please note that this example does not implement a really deep ResNet as described in literature but rather illustrates how the residual blocks described in He et al. [1] can be implemented in PyTorch. - [1] He, Kaiming, et al. "Deep residual learning for image recognition." *Proceedings of the IEEE conference on computer vision and pattern recognition*. 2016. ## Imports ``` import time import numpy as np import torch from torch.utils.data import DataLoader from torchvision import datasets from torchvision import transforms ``` ## Settings and Dataset ``` ########################## ### SETTINGS ########################## # Device device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") # Hyperparameters random_seed = 123 learning_rate = 0.01 num_epochs = 10 batch_size = 128 # Architecture num_classes = 10 ########################## ### MNIST DATASET ########################## # Note transforms.ToTensor() scales input images # to 0-1 range train_dataset = datasets.MNIST(root='data', train=True, transform=transforms.ToTensor(), download=True) test_dataset = datasets.MNIST(root='data', train=False, transform=transforms.ToTensor()) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) # Checking the dataset for images, labels in train_loader: print('Image batch dimensions:', images.shape) print('Image label dimensions:', labels.shape) break ``` ## ResNet with identity blocks The following code implements the residual blocks with skip connections such that the input passed via the shortcut matches the dimensions of the main path's output, which allows the network to learn identity functions. Such a residual block is illustrated below: ![](./2-resnet-ex/resnet-ex-1-1.png) ``` ########################## ### MODEL ########################## class ConvNet(torch.nn.Module): def __init__(self, num_classes): super(ConvNet, self).__init__() ######################### ### 1st residual block ######################### self.block_1 = torch.nn.Sequential( torch.nn.Conv2d(in_channels=1, out_channels=4, kernel_size=(1, 1), stride=(1, 1), padding=0), torch.nn.BatchNorm2d(4), torch.nn.ReLU(inplace=True), torch.nn.Conv2d(in_channels=4, out_channels=1, kernel_size=(3, 3), stride=(1, 1), padding=1), torch.nn.BatchNorm2d(1) ) self.block_2 = torch.nn.Sequential( torch.nn.Conv2d(in_channels=1, out_channels=4, kernel_size=(1, 1), stride=(1, 1), padding=0), torch.nn.BatchNorm2d(4), torch.nn.ReLU(inplace=True), torch.nn.Conv2d(in_channels=4, out_channels=1, kernel_size=(3, 3), stride=(1, 1), padding=1), torch.nn.BatchNorm2d(1) ) ######################### ### Fully connected ######################### self.linear_1 = torch.nn.Linear(1*28*28, num_classes) def forward(self, x): ######################### ### 1st residual block ######################### shortcut = x x = self.block_1(x) x = torch.nn.functional.relu(x + shortcut) ######################### ### 2nd residual block ######################### shortcut = x x = self.block_2(x) x = torch.nn.functional.relu(x + shortcut) ######################### ### Fully connected ######################### logits = self.linear_1(x.view(-1, 1*28*28)) return logits torch.manual_seed(random_seed) model = ConvNet(num_classes=num_classes) model = model.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) ``` ### Training ``` def compute_accuracy(model, data_loader): correct_pred, num_examples = 0, 0 for i, (features, targets) in enumerate(data_loader): features = features.to(device) targets = targets.to(device) logits = model(features) _, predicted_labels = torch.max(logits, 1) num_examples += targets.size(0) correct_pred += (predicted_labels == targets).sum() return correct_pred.float()/num_examples * 100 start_time = time.time() for epoch in range(num_epochs): model = model.train() for batch_idx, (features, targets) in enumerate(train_loader): features = features.to(device) targets = targets.to(device) ### FORWARD AND BACK PROP logits = model(features) cost = torch.nn.functional.cross_entropy(logits, targets) optimizer.zero_grad() cost.backward() ### UPDATE MODEL PARAMETERS optimizer.step() ### LOGGING if not batch_idx % 250: print ('Epoch: %03d/%03d | Batch %03d/%03d | Cost: %.4f' %(epoch+1, num_epochs, batch_idx, len(train_loader), cost)) model = model.eval() # eval mode to prevent upd. batchnorm params during inference with torch.set_grad_enabled(False): # save memory during inference print('Epoch: %03d/%03d training accuracy: %.2f%%' % ( epoch+1, num_epochs, compute_accuracy(model, train_loader))) print('Time elapsed: %.2f min' % ((time.time() - start_time)/60)) print('Total Training Time: %.2f min' % ((time.time() - start_time)/60)) print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader))) ``` ## ResNet with convolutional blocks for resizing The following code implements the residual blocks with skip connections such that the input passed via the shortcut matches is resized to dimensions of the main path's output. Such a residual block is illustrated below: ![](./2-resnet-ex/resnet-ex-1-2.png) ``` class ResidualBlock(torch.nn.Module): """ Helper Class""" def __init__(self, channels): super(ResidualBlock, self).__init__() self.block = torch.nn.Sequential( torch.nn.Conv2d(in_channels=channels[0], out_channels=channels[1], kernel_size=(3, 3), stride=(2, 2), padding=1), torch.nn.BatchNorm2d(channels[1]), torch.nn.ReLU(inplace=True), torch.nn.Conv2d(in_channels=channels[1], out_channels=channels[2], kernel_size=(1, 1), stride=(1, 1), padding=0), torch.nn.BatchNorm2d(channels[2]) ) self.shortcut = torch.nn.Sequential( torch.nn.Conv2d(in_channels=channels[0], out_channels=channels[2], kernel_size=(1, 1), stride=(2, 2), padding=0), torch.nn.BatchNorm2d(channels[2]) ) def forward(self, x): shortcut = x block = self.block(x) shortcut = self.shortcut(x) x = torch.nn.functional.relu(block+shortcut) return x ########################## ### MODEL ########################## class ConvNet(torch.nn.Module): def __init__(self, num_classes): super(ConvNet, self).__init__() self.residual_block_1 = ResidualBlock(channels=[1, 4, 8]) self.residual_block_2 = ResidualBlock(channels=[8, 16, 32]) self.linear_1 = torch.nn.Linear(7*7*32, num_classes) def forward(self, x): out = self.residual_block_1(x) out = self.residual_block_2(out) logits = self.linear_1(out.view(-1, 7*7*32)) return logits torch.manual_seed(random_seed) model = ConvNet(num_classes=num_classes) model.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) ``` ### Training ``` for epoch in range(num_epochs): model = model.train() for batch_idx, (features, targets) in enumerate(train_loader): features = features.to(device) targets = targets.to(device) ### FORWARD AND BACK PROP logits = model(features) cost = torch.nn.functional.cross_entropy(logits, targets) optimizer.zero_grad() cost.backward() ### UPDATE MODEL PARAMETERS optimizer.step() ### LOGGING if not batch_idx % 50: print ('Epoch: %03d/%03d | Batch %03d/%03d | Cost: %.4f' %(epoch+1, num_epochs, batch_idx, len(train_dataset)//batch_size, cost)) model = model.eval() # eval mode to prevent upd. batchnorm params during inference with torch.set_grad_enabled(False): # save memory during inference print('Epoch: %03d/%03d training accuracy: %.2f%%' % ( epoch+1, num_epochs, compute_accuracy(model, train_loader))) print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader))) ```
github_jupyter
# Hidden Markov Model ## What is a Hidden Markov Model? A Hidden Markov Model (HMM) is a statistical Markov model in with the system being modeled is assumed to be a Markov process with **hidden** states. An HMM allows us to talk about both observed events (like words that we see in the input) and hidden events (like Part-Of-Speech tags). An HMM is specified by the following components: ![image.png](attachment:image.png) **State Transition Probabilities** are the probabilities of moving from state i to state j. ![image-2.png](attachment:image-2.png) **Observation Probability Matrix** also called emission probabilities, express the probability of an observation Ot being generated from a state i. ![image-4.png](attachment:image-4.png) **Initial State Distribution** $\pi$<sub>i</sub> is the probability that the Markov chain will start in state i. Some state j with $\pi$<sub>j</sub>=0 means that they cannot be initial states. Hence, the entire Hidden Markov Model can be described as, ![image-3.png](attachment:image-3.png) ``` # Inorder to get the notebooks running in current directory import os, sys, inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(currentdir) sys.path.insert(0, parentdir) import hmm ``` Let us take a simple example with two hidden states and two observable states. The **Hidden states** will be **Rainy** and **Sunny**. The **Observable states** will be **Sad** and **Happy**. The transition and emission matrices are given below. The initial probabilities are obtained by computing the stationary distribution of the transition matrix. This means that for a given matrix A, the stationary distribution would be given as, $\pi$A = $\pi$ ``` # Hidden hidden_states = ["Rainy", "Sunny"] transition_matrix = [[0.5, 0.5], [0.3, 0.7]] # Observable observable_states = ["Sad", "Happy"] emission_matrix = [[0.8, 0.2], [0.4, 0.6]] # Inputs input_seq = [0, 0, 1] model = hmm.HiddenMarkovModel( observable_states, hidden_states, transition_matrix, emission_matrix ) model.print_model_info() model.visualize_model(output_dir="simple_demo", notebook=True) ``` Here the <span style="color: blue;">blue</span> lines indicate the hidden transitions. Here the <span style="color: red;">red</span> lines indicate the emission transitions. # Problem 1: Computing Likelihood: Given an HMM $\lambda$ = (A, B) and an observation sequence O, determine the likelihood P(O | $\lambda$) ## How It Is Calculated? For our example, for the given **observed** sequence - (Sad, Sad, Happy) the probabilities will be calculated as, <em> P(Sad, Sad, Happy) = P(Rainy) * P(Sad | Rainy) * P(Rainy | Rainy) * P(Sad | Rainy) * P(Rainy | Rainy) * P(Happy | Rainy) + P(Rainy) * P(Sad | Rainy) * P(Rainy | Rainy) * P(Sad | Rainy) * P(Sunny | Rainy) * P(Happy | Sunny) + P(Rainy) * P(Sad | Rainy) * P(Sunny | Rainy) * P(Sad | Sunny) * P(Rainy | Sunny) * P(Happy | Rainy) + P(Rainy) * P(Sad | Rainy) * P(Sunny | Rainy) * P(Sad | Sunny) * P(Sunny | Sunny) * P(Happy | Sunny) + P(Sunny) * P(Sad | Sunny) * P(Rainy | Sunny) * P(Sad | Rainy) * P(Rainy | Rainy) * P(Happy | Rainy) + P(Sunny) * P(Sad | Sunny) * P(Rainy | Sunny) * P(Sad | Rainy) * P(Sunny | Rainy) * P(Happy | Sunny) + P(Sunny) * P(Sad | Sunny) * P(Sunny | Sunny) * P(Sad | Sunny) * P(Rainy | Sunny) * P(Happy | Rainy) + P(Sunny) * P(Sad | Sunny) * P(Sunny | Sunny) * P(Sad | Sunny) * P(Sunny | Sunny) * P(Happy | Sunny) </em> ## The Problems With This Method This however, is a naive way of computation. The number of multiplications this way is of the order of 2TN<sup>T</sup>. where T is the length of the observed sequence and N is the number of hidden states. This means that the time complexity increases exponentially as the number of hidden states increases. # Forward Algorithm We are computing *P(Rainy) * P(Sad | Rainy)* and *P(Sunny) * P(Sad | Sunny)* a total of 4 times. Even parts like *P(Rainy) * P(Sad | Rainy) * P(Rainy | Rainy) * P(Sad | Rainy)*, *P(Rainy) * P(Sad | Rainy) * P(Sunny | Rainy) * P(Sad | Sunny)*, *P(Sunny) * P(Sad | Sunny) * P(Rainy | Sunny) * P(Sad | Rainy)* and *P(Sunny) * P(Sad | Sunny) * P(Sunny | Sunny) * P(Sad | Sunny)* are repeated. We can avoid so many computation by using recurrance relations with the help of **Dynamic Programming**. ![ForwardHMM](../assets/ForwardHMM.png) In code, it can be written as: ``` alpha[:, 0] = self.pi * emission_matrix[:, input_seq[0]] # Initialize for t in range(1, T): for s in range(n_states): alpha[s, t] = emission_matrix[s, input_seq[t]] * np.sum( alpha[:, t - 1] * transition_matrix[:, s] ) ``` This will lead to the following computations: ![Computation](../assets/Computation.png) ``` alpha, a_probs = model.forward(input_seq) hmm.print_forward_result(alpha, a_probs) ``` # Backward Algorithm The Backward Algorithm is the time-reversed version of the Forward Algorithm. ``` beta, b_probs = model.backward(input_seq) hmm.print_backward_result(beta, b_probs) ``` # Problem 2: Given an observation sequence O and an HMM λ = (A,B), discover the best hidden state sequence Q. ## Viterbi Algorithm The Viterbi Algorithm increments over each time step, finding the maximum probability of any path that gets to state i at time t, that also has the correct observations for the sequence up to time t. The algorithm also keeps track of the state with the highest probability at each stage. At the end of the sequence, the algorith will iterate backwards selecting the state that won which creates the most likely path or sequence of hidden states that led to the sequence of observations. In code, it is written as: ``` delta[:, 0] = self.pi * emission_matrix[:, input_seq[0]] # Initialize for t in range(1, T): for s in range(n_states): delta[s, t] = ( np.max(delta[:, t - 1] * transition_matrix[:, s]) * emission_matrix[s, input_seq[t]] ) phi[s, t] = np.argmax(delta[:, t - 1] * transition_matrix[:, s]) ``` The Viterbi Algorithm is identical to the forward algorithm except that it takes the **max** over the previous path probabilities whereas the forward algorithm takes the **sum**. The code for the Backtrace is written as: ``` path[T - 1] = np.argmax(delta[:, T - 1]) # Initialize for t in range(T - 2, -1, -1): path[t] = phi[path[t + 1], [t + 1]] ``` ``` path, delta, phi = model.viterbi(input_seq) hmm.print_viterbi_result(input_seq, observable_states, hidden_states, path, delta, phi) ```
github_jupyter
# Deep Reinforcement Learning in Action ### by Alex Zai and Brandon Brown #### Chapter 3 ##### Listing 3.1 ``` from Gridworld import Gridworld game = Gridworld(size=4, mode='static') import sys game.display() game.makeMove('d') game.makeMove('d') game.makeMove('d') game.display() game.reward() game.board.render_np() game.board.render_np().shape ``` ##### Listing 3.2 ``` import numpy as np import torch from Gridworld import Gridworld import random from matplotlib import pylab as plt l1 = 64 l2 = 150 l3 = 100 l4 = 4 model = torch.nn.Sequential( torch.nn.Linear(l1, l2), torch.nn.ReLU(), torch.nn.Linear(l2, l3), torch.nn.ReLU(), torch.nn.Linear(l3,l4) ) loss_fn = torch.nn.MSELoss() learning_rate = 1e-3 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) gamma = 0.9 epsilon = 1.0 action_set = { 0: 'u', 1: 'd', 2: 'l', 3: 'r', } ``` ##### Listing 3.3 ``` epochs = 1000 losses = [] for i in range(epochs): game = Gridworld(size=4, mode='static') state_ = game.board.render_np().reshape(1,64) + np.random.rand(1,64)/10.0 state1 = torch.from_numpy(state_).float() status = 1 while(status == 1): qval = model(state1) qval_ = qval.data.numpy() if (random.random() < epsilon): action_ = np.random.randint(0,4) else: action_ = np.argmax(qval_) action = action_set[action_] game.makeMove(action) state2_ = game.board.render_np().reshape(1,64) + np.random.rand(1,64)/10.0 state2 = torch.from_numpy(state2_).float() reward = game.reward() #-1 for lose, +1 for win, 0 otherwise with torch.no_grad(): newQ = model(state2.reshape(1,64)) maxQ = torch.max(newQ) if reward == -1: # if game still in play Y = reward + (gamma * maxQ) else: Y = reward Y = torch.Tensor([Y]).detach().squeeze() X = qval.squeeze()[action_] loss = loss_fn(X, Y) optimizer.zero_grad() loss.backward() losses.append(loss.item()) optimizer.step() state1 = state2 if reward != -1: #game lost status = 0 if epsilon > 0.1: epsilon -= (1/epochs) plt.plot(losses) m = torch.Tensor([2.0]) m.requires_grad=True b = torch.Tensor([1.0]) b.requires_grad=True def linear_model(x,m,b): y = m @ x + b return y y = linear_model(torch.Tensor([4.]), m,b) y y.grad_fn with torch.no_grad(): y = linear_model(torch.Tensor([4]),m,b) y y.grad_fn y = linear_model(torch.Tensor([4.]), m,b) y.backward() m.grad b.grad ``` ##### Listing 3.4 ``` def test_model(model, mode='static', display=True): i = 0 test_game = Gridworld(mode=mode) state_ = test_game.board.render_np().reshape(1,64) + np.random.rand(1,64)/10.0 state = torch.from_numpy(state_).float() if display: print("Initial State:") print(test_game.display()) status = 1 while(status == 1): qval = model(state) qval_ = qval.data.numpy() action_ = np.argmax(qval_) action = action_set[action_] if display: print('Move #: %s; Taking action: %s' % (i, action)) test_game.makeMove(action) state_ = test_game.board.render_np().reshape(1,64) + np.random.rand(1,64)/10.0 state = torch.from_numpy(state_).float() if display: print(test_game.display()) reward = test_game.reward() if reward != -1: #if game is over if reward > 0: #if game won status = 2 if display: print("Game won! Reward: %s" % (reward,)) else: #game is lost status = 0 if display: print("Game LOST. Reward: %s" % (reward,)) i += 1 if (i > 15): if display: print("Game lost; too many moves.") break win = True if status == 2 else False return win test_model(model, 'static') ``` ##### Listing 3.5 ``` from collections import deque epochs = 5000 losses = [] mem_size = 1000 batch_size = 200 replay = deque(maxlen=mem_size) max_moves = 50 h = 0 for i in range(epochs): game = Gridworld(size=4, mode='random') state1_ = game.board.render_np().reshape(1,64) + np.random.rand(1,64)/100.0 state1 = torch.from_numpy(state1_).float() status = 1 mov = 0 while(status == 1): mov += 1 qval = model(state1) qval_ = qval.data.numpy() if (random.random() < epsilon): action_ = np.random.randint(0,4) else: action_ = np.argmax(qval_) action = action_set[action_] game.makeMove(action) state2_ = game.board.render_np().reshape(1,64) + np.random.rand(1,64)/100.0 state2 = torch.from_numpy(state2_).float() reward = game.reward() done = True if reward > 0 else False exp = (state1, action_, reward, state2, done) replay.append(exp) state1 = state2 if len(replay) > batch_size: minibatch = random.sample(replay, batch_size) state1_batch = torch.cat([s1 for (s1,a,r,s2,d) in minibatch]) action_batch = torch.Tensor([a for (s1,a,r,s2,d) in minibatch]) reward_batch = torch.Tensor([r for (s1,a,r,s2,d) in minibatch]) state2_batch = torch.cat([s2 for (s1,a,r,s2,d) in minibatch]) done_batch = torch.Tensor([d for (s1,a,r,s2,d) in minibatch]) Q1 = model(state1_batch) with torch.no_grad(): Q2 = model(state2_batch) Y = reward_batch + gamma * ((1 - done_batch) * torch.max(Q2,dim=1)[0]) X = \ Q1.gather(dim=1,index=action_batch.long().unsqueeze(dim=1)).squeeze() loss = loss_fn(X, Y.detach()) optimizer.zero_grad() loss.backward() losses.append(loss.item()) optimizer.step() if reward != -1 or mov > max_moves: status = 0 mov = 0 losses = np.array(losses) plt.plot(losses) test_model(model,mode='random') ``` ##### Listing 3.6 ``` max_games = 1000 wins = 0 for i in range(max_games): win = test_model(model, mode='random', display=False) if win: wins += 1 win_perc = float(wins) / float(max_games) print("Games played: {0}, # of wins: {1}".format(max_games,wins)) print("Win percentage: {}".format(100.0*win_perc)) ``` ##### Listing 3.7 ``` import copy model = torch.nn.Sequential( torch.nn.Linear(l1, l2), torch.nn.ReLU(), torch.nn.Linear(l2, l3), torch.nn.ReLU(), torch.nn.Linear(l3,l4) ) model2 = model2 = copy.deepcopy(model) model2.load_state_dict(model.state_dict()) sync_freq = 50 loss_fn = torch.nn.MSELoss() learning_rate = 1e-3 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) ``` ##### Listing 3.8 ``` from IPython.display import clear_output from collections import deque epochs = 5000 losses = [] mem_size = 1000 batch_size = 200 replay = deque(maxlen=mem_size) max_moves = 50 h = 0 sync_freq = 500 j=0 for i in range(epochs): game = Gridworld(size=4, mode='random') state1_ = game.board.render_np().reshape(1,64) + np.random.rand(1,64)/100.0 state1 = torch.from_numpy(state1_).float() status = 1 mov = 0 while(status == 1): j+=1 mov += 1 qval = model(state1) qval_ = qval.data.numpy() if (random.random() < epsilon): action_ = np.random.randint(0,4) else: action_ = np.argmax(qval_) action = action_set[action_] game.makeMove(action) state2_ = game.board.render_np().reshape(1,64) + np.random.rand(1,64)/100.0 state2 = torch.from_numpy(state2_).float() reward = game.reward() done = True if reward > 0 else False exp = (state1, action_, reward, state2, done) replay.append(exp) state1 = state2 if len(replay) > batch_size: minibatch = random.sample(replay, batch_size) state1_batch = torch.cat([s1 for (s1,a,r,s2,d) in minibatch]) action_batch = torch.Tensor([a for (s1,a,r,s2,d) in minibatch]) reward_batch = torch.Tensor([r for (s1,a,r,s2,d) in minibatch]) state2_batch = torch.cat([s2 for (s1,a,r,s2,d) in minibatch]) done_batch = torch.Tensor([d for (s1,a,r,s2,d) in minibatch]) Q1 = model(state1_batch) with torch.no_grad(): Q2 = model2(state2_batch) Y = reward_batch + gamma * ((1-done_batch) * \ torch.max(Q2,dim=1)[0]) X = Q1.gather(dim=1,index=action_batch.long() \ .unsqueeze(dim=1)).squeeze() loss = loss_fn(X, Y.detach()) print(i, loss.item()) clear_output(wait=True) optimizer.zero_grad() loss.backward() losses.append(loss.item()) optimizer.step() if j % sync_freq == 0: model2.load_state_dict(model.state_dict()) if reward != -1 or mov > max_moves: status = 0 mov = 0 losses = np.array(losses) plt.plot(losses) test_model(model,mode='random') ```
github_jupyter
``` %matplotlib inline %config InlineBackend.figure_format = 'retina' ``` # 0. General note * This notebook produces figures and calculations presented in [Ye et al. 2017, JGR](https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1002/2016JB013811). * This notebook demonstrates how to correct pressure scales for the existing phase boundary data. # 1. Global setup ``` import matplotlib.pyplot as plt import numpy as np from uncertainties import unumpy as unp import pytheos as eos ``` # 2. Pressure calculations for PPv * Data from Tateno2009 T (K) | Au-Tsuchiya | Pt-Holmes | MgO-Speziale ------|-------------|-----------|-------------- 3500 | 120.4 | 137.7 | 135.6 2000 | 110.5 | 126.8 | 115.8 * Dorogokupets2007 T (K) | Au | Pt | MgO ------|-------------|-----------|-------------- 3500 | 119.7 | 135.2 | 129.6 2000 | 108.9 | 123.2 | 113.2 <b> * In conclusion, PPV boundary discrepancy is not likely due to pressure scale problem. </b> ``` t_ppv = np.asarray([3500., 2000.]) Au_T = eos.gold.Tsuchiya2003() Au_D = eos.gold.Dorogokupets2007() v = np.asarray([51.58,51.7]) p_Au_T_ppv = Au_T.cal_p(v, t_ppv) p_Au_D_ppv = Au_D.cal_p(v, t_ppv) print(p_Au_T_ppv, p_Au_D_ppv) print('slopes: ', (p_Au_T_ppv[0]-p_Au_T_ppv[1])/(t_ppv[0]-t_ppv[1]),\ (p_Au_D_ppv[0]-p_Au_D_ppv[1])/(t_ppv[0]-t_ppv[1]) ) Pt_H = eos.platinum.Holmes1989() Pt_D = eos.platinum.Dorogokupets2007() v = np.asarray([48.06, 48.09]) p_Pt_H_ppv = Pt_H.cal_p(v, t_ppv) p_Pt_D_ppv = Pt_D.cal_p(v, t_ppv) print(p_Pt_H_ppv, p_Pt_D_ppv) print('slopes: ', (p_Pt_H_ppv[0]-p_Pt_H_ppv[1])/(t_ppv[0]-t_ppv[1]),\ (p_Pt_D_ppv[0]-p_Pt_D_ppv[1])/(t_ppv[0]-t_ppv[1]) ) MgO_S = eos.periclase.Speziale2001() MgO_D = eos.periclase.Dorogokupets2007() v = np.asarray([52.87, 53.6]) p_MgO_S_ppv = MgO_S.cal_p(v, t_ppv) p_MgO_D_ppv = MgO_D.cal_p(v, t_ppv) print(p_MgO_S_ppv, p_MgO_D_ppv) print('slopes: ', (p_MgO_S_ppv[0]-p_MgO_S_ppv[1])/(t_ppv[0]-t_ppv[1]), \ (p_MgO_D_ppv[0]-p_MgO_D_ppv[1])/(t_ppv[0]-t_ppv[1]) ) ``` # 3. Post-spinel Fei2004 Scales| PT | PT ------|------------|------------ MgO-S | 23.6, 1573 | 22.8, 2173 MgO-D | 23.1, 1573 | 22.0, 2173 Ye2014 Scales | PT | PT -------|------------|------------ Pt-F | 25.2, 1550 | 23.2, 2380 Pt-D | 24.6, 1550 | 22.5, 2380 Au-F | 28.3, 1650 | 27.1, 2150 Au-D | 27.0, 1650 | 25.6, 2150 ``` MgO_S = eos.periclase.Speziale2001() MgO_D = eos.periclase.Dorogokupets2007() v = np.asarray([68.75, 70.3]) t_MgO = np.asarray([1573.,2173.]) p_MgO_S = MgO_S.cal_p(v, t_MgO) p_MgO_D = MgO_D.cal_p(v, t_MgO) print(p_MgO_S, p_MgO_D) print('slopes: ', (p_MgO_S[0]-p_MgO_S[1])/(t_MgO[0]-t_MgO[1]), (p_MgO_D[0]-p_MgO_D[1])/(t_MgO[0]-t_MgO[1]) ) Pt_F = eos.platinum.Fei2007bm3() Pt_D = eos.platinum.Dorogokupets2007() v = np.asarray([57.43, 58.85]) t_Pt = np.asarray([1550., 2380.]) p_Pt_F = Pt_F.cal_p(v, t_Pt) p_Pt_D = Pt_D.cal_p(v, t_Pt) print(p_Pt_F, p_Pt_D) print('slopes: ', (p_Pt_F[0]-p_Pt_F[1])/(t_Pt[0]-t_Pt[1]), (p_Pt_D[0]-p_Pt_D[1])/(t_Pt[0]-t_Pt[1]) ) Au_F = eos.gold.Fei2007bm3() Au_D = eos.gold.Dorogokupets2007() v = np.asarray([62.33,63.53]) t_Au = np.asarray([1650., 2150.]) p_Au_F = Au_F.cal_p(v, t_Au) p_Au_D = Au_D.cal_p(v, t_Au) print(p_Au_F, p_Au_D) print('slopes: ', (p_Au_F[0]-p_Au_F[1])/(t_Au[0]-t_Au[1]), (p_Au_D[0]-p_Au_D[1])/(t_Au[0]-t_Au[1]) ) f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,3.5)) #ax.plot(unp.nominal_values(p_Au_T), t, c='b', ls='--', label='Au-Tsuchiya') lw = 4 l_alpha = 0.3 ax1.plot(unp.nominal_values(p_Au_D), t_Au, c='b', ls='-', alpha=l_alpha, label='Au-D07', lw=lw) ax1.annotate('Au-D07', xy=(25.7, 2100), xycoords='data', xytext=(26.9, 2100), textcoords='data', arrowprops=dict(facecolor='k', alpha=0.5, shrink=1, width = 0.1, headwidth=5), horizontalalignment='right', verticalalignment='center') ax1.plot(unp.nominal_values(p_Au_D-2.5), t_Au, c='b', ls='-', label='Au-mD07', lw=lw) ax1.annotate('Au-D07,\n corrected', xy=(24.35, 1700), xycoords='data', xytext=(24.8, 1700), textcoords='data', arrowprops=dict(facecolor='k', alpha=0.5, shrink=1, width = 0.1, headwidth=5), horizontalalignment='left', verticalalignment='center') #ax.plot(unp.nominal_values(p_Pt_H), t, c='r', ls='--', label='Pt-Holmes') ax1.plot(unp.nominal_values(p_Pt_D), t_Pt, c='r', ls='-', label='Pt-D07', lw=lw) ax1.annotate('Pt-D07', xy=(22.7, 2300), xycoords='data', xytext=(23.1, 2300), textcoords='data', arrowprops=dict(facecolor='k', alpha=0.5, shrink=1, width = 0.1, headwidth=5), horizontalalignment='left', verticalalignment='center') ax1.plot(unp.nominal_values(p_MgO_S), t_MgO, c='k', ls='-', alpha=l_alpha, label='MgO-S01', lw=lw) ax1.annotate('MgO-S01', xy=(22.9, 2150), xycoords='data', xytext=(22.5, 2250), textcoords='data', arrowprops=dict(facecolor='k', alpha=0.5, shrink=1, width = 0.1, headwidth=5), horizontalalignment='right', verticalalignment='top') ax1.plot(unp.nominal_values(p_MgO_D), t_MgO, c='k', ls='-', label='MgO-D07', lw=lw) ax1.annotate('MgO-D07', xy=(22.7, 1800), xycoords='data', xytext=(22.3, 1800), textcoords='data', arrowprops=dict(facecolor='k', alpha=0.5, shrink=1, width = 0.1, headwidth=5), horizontalalignment='right', verticalalignment='center') ax1.fill([23.5,24,24,23.5], [1700,1700,2000,2000], 'k', alpha=0.2) ax1.set_xlabel("Pressure (GPa)"); ax1.set_ylabel("Temperature (K)") #l = ax1.legend(loc=3, fontsize=10, handlelength=2.5); l.get_frame().set_linewidth(0.5) ax2.plot(unp.nominal_values(p_Au_T_ppv), t_ppv, c='b', ls='-', alpha=l_alpha, label='Au-T04', lw=lw) ax2.annotate('Au-T04', xy=(120, 3400), xycoords='data', xytext=(122, 3400), textcoords='data', arrowprops=dict(facecolor='k', alpha=0.5, shrink=1, width = 0.1, headwidth=5), horizontalalignment='left', verticalalignment='center') ax2.plot(unp.nominal_values(p_Au_D_ppv), t_ppv, c='b', ls='-', label='Au-D07', lw=lw) ax2.annotate('Au-D07', xy=(119, 3400), xycoords='data', xytext=(117, 3400), textcoords='data', arrowprops=dict(facecolor='k', alpha=0.5, shrink=1, width = 0.1, headwidth=5), horizontalalignment='right', verticalalignment='center') ax2.plot(unp.nominal_values(p_Pt_H_ppv), t_ppv, c='r', ls='-', alpha=l_alpha, label='Pt-H89', lw=lw) ax2.annotate('Pt-H89', xy=(129, 2300), xycoords='data', xytext=(132, 2300), textcoords='data', arrowprops=dict(facecolor='k', alpha=0.5, shrink=1, width = 0.1, headwidth=5), horizontalalignment='left', verticalalignment='center') ax2.plot(unp.nominal_values(p_Pt_D_ppv), t_ppv, c='r', ls='-', label='Pt-D07', lw=lw) ax2.annotate('Pt-D07', xy=(124, 2150), xycoords='data', xytext=(123.7, 2300), textcoords='data', arrowprops=dict(facecolor='k', alpha=0.5, shrink=1, width = 0.1, headwidth=5), horizontalalignment='center', verticalalignment='bottom') ax2.plot(unp.nominal_values(p_MgO_S_ppv), t_ppv, c='k', ls='-', alpha=l_alpha, label='MgO-S01', lw=lw) ax2.annotate('MgO-S01', xy=(132, 3250), xycoords='data', xytext=(132.2, 3550), textcoords='data', arrowprops=dict(facecolor='k', alpha=0.5, shrink=1, width = 0.1, headwidth=5), horizontalalignment='left', verticalalignment='bottom') ax2.plot(unp.nominal_values(p_MgO_D_ppv), t_ppv, c='k', ls='-', label='MgO-D07', lw=lw) ax2.annotate('MgO-D07', xy=(128, 3400), xycoords='data', xytext=(128, 3550), textcoords='data', arrowprops=dict(facecolor='k', alpha=0.5, shrink=1, width = 0.1, headwidth=5), horizontalalignment='center', verticalalignment='bottom') ax2.set_xlabel("Pressure (GPa)"); ax2.set_ylabel("Temperature (K)") ax2.set_ylim(1900, 3700.) #l = ax2.legend(loc=0, fontsize=10, handlelength=2.5); l.get_frame().set_linewidth(0.5) ax1.text(0.05, 0.03, 'a', horizontalalignment='center',\ verticalalignment='bottom', transform = ax1.transAxes,\ fontsize = 32) ax2.text(0.05, 0.03, 'b', horizontalalignment='center',\ verticalalignment='bottom', transform = ax2.transAxes,\ fontsize = 32) ax1.set_yticks(ax1.get_yticks()[::2]) #ax2.set_yticks(ax2.get_yticks()[::2]) plt.tight_layout(pad=0.6) plt.savefig('f-boundaries.pdf', bbox_inches='tight', \ pad_inches=0.1) ```
github_jupyter
# Generators # 生成器 > Here we'll take a deeper dive into Python generators, including *generator expressions* and *generator functions*. 本章我们深入讨论Python的生成器,包括*生成器表达式*和*生成器函数* ## Generator Expressions ## 生成器表达式 > The difference between list comprehensions and generator expressions is sometimes confusing; here we'll quickly outline the differences between them: 列表解析和生成器表达式之间的区别很容易令人混乱;下面我们快速地说明一下它们之间的区别: ### List comprehensions use square brackets, while generator expressions use parentheses ### 列表解析使用中括号,而生成器表达式使用小括号 > This is a representative list comprehension: 下面是一个很有代表性的列表解析: ``` [n ** 2 for n in range(12)] ``` > While this is a representative generator expression: 下面这个却是一个生成器表达式: ``` (n ** 2 for n in range(12)) ``` > Notice that printing the generator expression does not print the contents; one way to print the contents of a generator expression is to pass it to the ``list`` constructor: 你会注意到直接打印生成器表达式并不会输出生成器的内容;可以使用`list`将生成器转换为一个列表然后输出: ``` G = (n ** 2 for n in range(12)) list(G) ``` ### A list is a collection of values, while a generator is a recipe for producing values ### 列表是一个集合,而生成器只是产生集合值的配方 > When you create a list, you are actually building a collection of values, and there is some memory cost associated with that. When you create a generator, you are not building a collection of values, but a recipe for producing those values. Both expose the same iterator interface, as we can see here: 当你创建一个列表,你真实地创建了一个集合,当然这个集合存储在内存当中需要一定的空间。当你创建了一个生成器,你并没有创建一个集合,你仅仅是指定了产生集合值的方法。两者都实现了迭代器接口,由下面两个例子可以看到: ``` L = [n ** 2 for n in range(12)] for val in L: print(val, end=' ') G = (n ** 2 for n in range(12)) for val in G: print(val, end=' ') ``` > The difference is that a generator expression does not actually compute the values until they are needed. This not only leads to memory efficiency, but to computational efficiency as well! This also means that while the size of a list is limited by available memory, the size of a generator expression is unlimited! 区别在于生成器仅在你用到值的时候才会按照配方计算一个值返回给你。这样的好处不仅仅是节省内存,还能节省计算资源。这还意味着,列表的大小受限于可用内存的大小,而生成器的大小是无限的。 > An example of an infinite generator expression can be created using the ``count`` iterator defined in ``itertools``: 我们可以使用`itertools`里面的`count`函数来构造一个无限的生成器表达式: ``` from itertools import count count() for i in count(): print(i, end=' ') if i >= 10: break ``` > The ``count`` iterator will go on happily counting forever until you tell it to stop; this makes it convenient to create generators that will also go on forever: `count`函数会永远的迭代下去除非你停止了它的运行;这也可以用来创建永远运行的生成器: ``` factors = [2, 3, 5, 7] G = (i for i in count() if all(i % n > 0 for n in factors)) for val in G: print(val, end=' ') if val > 40: break ``` > You might see what we're getting at here: if we were to expand the list of factors appropriately, what we would have the beginnings of is a prime number generator, using the Sieve of Eratosthenes algorithm. We'll explore this more momentarily. 上面的例子你应该已经看出来了:如果我们使用Sieve of Eratosthenes算法,将factors列表进行合适的扩展的话,那么我们将会得到一个质数的生成器。 ### A list can be iterated multiple times; a generator expression is single-use ### 列表可以被迭代多次;生成器只能是一次使用 > This is one of those potential gotchas of generator expressions. With a list, we can straightforwardly do this: 这是生成器的一个著名的坑。使用列表时,我们可以如下做: ``` L = [n ** 2 for n in range(12)] for val in L: print(val, end=' ') print() for val in L: print(val, end=' ') ``` > A generator expression, on the other hand, is used-up after one iteration: 生成器表达式则不一样,只能迭代一次: ``` G = (n ** 2 for n in range(12)) list(G) list(G) ``` > This can be very useful because it means iteration can be stopped and started: 这是非常有用的特性,因为这意味着迭代能停止和开始: ``` G = (n**2 for n in range(12)) for n in G: print(n, end=' ') if n > 30: break # 生成器停止运行 print("\ndoing something in between") for n in G: # 生成器继续运行 print(n, end=' ') ``` > One place I've found this useful is when working with collections of data files on disk; it means that you can quite easily analyze them in batches, letting the generator keep track of which ones you have yet to see. 作者发现这个特性在使用磁盘上存储的数据文件时特别有用;它意味着你可以很容易的按批次来分析数据,让生成器记录下目前的处理进度。 ## Generator Functions: Using ``yield`` ## 生成器函数:使用 `yield` > We saw in the previous section that list comprehensions are best used to create relatively simple lists, while using a normal ``for`` loop can be better in more complicated situations. The same is true of generator expressions: we can make more complicated generators using *generator functions*, which make use of the ``yield`` statement. 从上面的讨论中,我们可以知道列表解析适用于创建相对简单的列表,如果列表的生成规则比较复杂,还是使用普通`for`循环更加合适。对于生成器表达式来说也一样:我们可以使用*生成器函数*创建更加复杂的生成器,这里需要用到`yield`关键字。 > Here we have two ways of constructing the same list: 我们有两种方式来构建同一个列表: ``` L1 = [n ** 2 for n in range(12)] L2 = [] for n in range(12): L2.append(n ** 2) print(L1) print(L2) ``` > Similarly, here we have two ways of constructing equivalent generators: 类似的,我们也有两种方法来构建相同的生成器: ``` G1 = (n ** 2 for n in range(12)) def gen(): for n in range(12): yield n ** 2 G2 = gen() print(*G1) print(*G2) ``` > A generator function is a function that, rather than using ``return`` to return a value once, uses ``yield`` to yield a (potentially infinite) sequence of values. Just as in generator expressions, the state of the generator is preserved between partial iterations, but if we want a fresh copy of the generator we can simply call the function again. 生成器函数与普通函数的区别在于,生成器函数不是使用`return`来一次性返回值,而是使用`yield`来产生一系列(可能无穷多个)值。就像生成器表达式一样,生成器的状态会被生成器自己保留并记录,如果你需要一个新的生成器,你可以再次调用函数。 ## Example: Prime Number Generator ## 例子:质数生成器 > Here I'll show my favorite example of a generator function: a function to generate an unbounded series of prime numbers. A classic algorithm for this is the *Sieve of Eratosthenes*, which works something like this: 下面作者将介绍他最喜欢的生成器函数的例子:一个可以产生无穷多个质数序列的函数。计算质数又一个经典算法*Sieve of Eratosthenes*,它的工作原理如下: ``` # 产生可能的质数序列 L = [n for n in range(2, 40)] print(L) # 剔除所有被第一个元素整除的数 L = [n for n in L if n == L[0] or n % L[0] > 0] print(L) # 剔除所有被第二个元素整除的数 L = [n for n in L if n == L[1] or n % L[1] > 0] print(L) # 剔除所有被第三个元素整除的数 L = [n for n in L if n == L[2] or n % L[2] > 0] print(L) ``` > If we repeat this procedure enough times on a large enough list, we can generate as many primes as we wish. 如果我们在一个很大的列表上重复这个过程足够多次,我们可以生成我们需要的质数。 > Let's encapsulate this logic in a generator function: 我们将这个逻辑封装到一个生成器函数中: ``` def gen_primes(N): """Generate primes up to N""" primes = set() # 使用primes集合存储找到的质数 for n in range(2, N): if all(n % p > 0 for p in primes): # primes中的元素都不能整除n -> n是质数 primes.add(n) # 将n加入primes集合 yield n # 产生序列 print(*gen_primes(100)) ``` > That's all there is to it! While this is certainly not the most computationally efficient implementation of the Sieve of Eratosthenes, it illustrates how convenient the generator function syntax can be for building more complicated sequences. 虽然这可能不是最优化的Sieve of Eratosthenes算法实现,但是它表明声称其函数语法是多么简便,而且可以用来构建很复杂的序列。
github_jupyter
<a href="https://colab.research.google.com/github/reallygooday/60daysofudacity/blob/master/Basic_Image_Classifier.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> hand-written digits dataset from UCI: http://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits ``` # Importing load_digits() from the sklearn.datasets package from sklearn.datasets import load_digits import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline digits_data = load_digits() digits_data.keys() labels = pd.Series(digits_data['target']) data = pd.DataFrame(digits_data['data']) data.head(1) first_image = data.iloc[0] np_image = first_image.values np_image = np_image.reshape(8,8) plt.imshow(np_image, cmap='gray_r') f, axarr = plt.subplots(2, 4) axarr[0, 0].imshow(data.iloc[0].values.reshape(8,8), cmap='gray_r') axarr[0, 1].imshow(data.iloc[99].values.reshape(8,8), cmap='gray_r') axarr[0, 2].imshow(data.iloc[199].values.reshape(8,8), cmap='gray_r') axarr[0, 3].imshow(data.iloc[299].values.reshape(8,8), cmap='gray_r') axarr[1, 0].imshow(data.iloc[999].values.reshape(8,8), cmap='gray_r') axarr[1, 1].imshow(data.iloc[1099].values.reshape(8,8), cmap='gray_r') axarr[1, 2].imshow(data.iloc[1199].values.reshape(8,8), cmap='gray_r') axarr[1, 3].imshow(data.iloc[1299].values.reshape(8,8), cmap='gray_r') from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import KFold # 50% Train / test validation def train_knn(nneighbors, train_features, train_labels): knn = KNeighborsClassifier(n_neighbors = nneighbors) knn.fit(train_features, train_labels) return knn def test(model, test_features, test_labels): predictions = model.predict(test_features) train_test_df = pd.DataFrame() train_test_df['correct_label'] = test_labels train_test_df['predicted_label'] = predictions overall_accuracy = sum(train_test_df["predicted_label"] == train_test_df["correct_label"])/len(train_test_df) return overall_accuracy def cross_validate(k): fold_accuracies = [] kf = KFold(n_splits = 4, random_state=2) for train_index, test_index in kf.split(data): train_features, test_features = data.loc[train_index], data.loc[test_index] train_labels, test_labels = labels.loc[train_index], labels.loc[test_index] model = train_knn(k, train_features, train_labels) overall_accuracy = test(model, test_features, test_labels) fold_accuracies.append(overall_accuracy) return fold_accuracies knn_one_accuracies = cross_validate(1) np.mean(knn_one_accuracies) k_values = list(range(1,10)) k_overall_accuracies = [] for k in k_values: k_accuracies = cross_validate(k) k_mean_accuracy = np.mean(k_accuracies) k_overall_accuracies.append(k_mean_accuracy) plt.figure(figsize=(8,4)) plt.title("Mean Accuracy vs. k") plt.plot(k_values, k_overall_accuracies) #Neural Network With One Hidden Layer from sklearn.neural_network import MLPClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import KFold # 50% Train / test validation def train_nn(neuron_arch, train_features, train_labels): mlp = MLPClassifier(hidden_layer_sizes=neuron_arch) mlp.fit(train_features, train_labels) return mlp def test(model, test_features, test_labels): predictions = model.predict(test_features) train_test_df = pd.DataFrame() train_test_df['correct_label'] = test_labels train_test_df['predicted_label'] = predictions overall_accuracy = sum(train_test_df["predicted_label"] == train_test_df["correct_label"])/len(train_test_df) return overall_accuracy def cross_validate(neuron_arch): fold_accuracies = [] kf = KFold(n_splits = 4, random_state=2) for train_index, test_index in kf.split(data): train_features, test_features = data.loc[train_index], data.loc[test_index] train_labels, test_labels = labels.loc[train_index], labels.loc[test_index] model = train_nn(neuron_arch, train_features, train_labels) overall_accuracy = test(model, test_features, test_labels) fold_accuracies.append(overall_accuracy) return fold_accuracies from sklearn.neural_network import MLPClassifier nn_one_neurons = [ (8,), (16,), (32,), (64,), (128,), (256,) ] nn_one_accuracies = [] for n in nn_one_neurons: nn_accuracies = cross_validate(n) nn_mean_accuracy = np.mean(nn_accuracies) nn_one_accuracies.append(nn_mean_accuracy) plt.figure(figsize=(8,4)) plt.title("Mean Accuracy vs. Neurons In Single Hidden Layer") x = [i[0] for i in nn_one_neurons] plt.plot(x, nn_one_accuracies) # Neural Network With Two Hidden Layers nn_two_neurons = [ (64,64), (128, 128), (256, 256) ] nn_two_accuracies = [] for n in nn_two_neurons: nn_accuracies = cross_validate(n) nn_mean_accuracy = np.mean(nn_accuracies) nn_two_accuracies.append(nn_mean_accuracy) plt.figure(figsize=(8,4)) plt.title("Mean Accuracy vs. Neurons In Two Hidden Layers") x = [i[0] for i in nn_two_neurons] plt.plot(x, nn_two_accuracies) nn_two_accuracies #Neural Network With Three Hidden Layers from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import KFold # 50% Train / test validation def train_nn(neuron_arch, train_features, train_labels): mlp = MLPClassifier(hidden_layer_sizes=neuron_arch) mlp.fit(train_features, train_labels) return mlp def test(model, test_features, test_labels): predictions = model.predict(test_features) train_test_df = pd.DataFrame() train_test_df['correct_label'] = test_labels train_test_df['predicted_label'] = predictions overall_accuracy = sum(train_test_df["predicted_label"] == train_test_df["correct_label"])/len(train_test_df) return overall_accuracy def cross_validate_six(neuron_arch): fold_accuracies = [] kf = KFold(n_splits = 6, random_state=2) for train_index, test_index in kf.split(data): train_features, test_features = data.loc[train_index], data.loc[test_index] train_labels, test_labels = labels.loc[train_index], labels.loc[test_index] model = train_nn(neuron_arch, train_features, train_labels) overall_accuracy = test(model, test_features, test_labels) fold_accuracies.append(overall_accuracy) return fold_accuracies nn_three_neurons = [ (10, 10, 10), (64, 64, 64), (128, 128, 128) ] nn_three_accuracies = [] for n in nn_three_neurons: nn_accuracies = cross_validate_six(n) nn_mean_accuracy = np.mean(nn_accuracies) nn_three_accuracies.append(nn_mean_accuracy) plt.figure(figsize=(8,4)) plt.title("Mean Accuracy vs. Neurons In Three Hidden Layers") x = [i[0] for i in nn_three_neurons] plt.plot(x, nn_three_accuracies) nn_three_accuracies ``` #Image Classification with PyTorch ``` import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable import torchvision from torchvision import datasets, transforms import numpy as np import matplotlib.pyplot as plt %matplotlib inline train_loader = torch.utils.data.DataLoader( datasets.MNIST('./data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor() ])), batch_size=32, shuffle=False) test_loader = torch.utils.data.DataLoader( datasets.MNIST('./data', train=False, transform=transforms.Compose([ transforms.ToTensor() ])), batch_size=32, shuffle=False) class BasicNN(nn.Module): def __init__(self): super(BasicNN, self).__init__() self.net = nn.Linear(28 * 28, 10) def forward(self, x): batch_size = x.size(0) x = x.view(batch_size, -1) output = self.net(x) return F.softmax(output) model = BasicNN() optimizer = optim.SGD(model.parameters(), lr=0.001) def test(): total_loss = 0 correct = 0 for image, label in test_loader: image, label = Variable(image), Variable(label) output = model(image) total_loss += F.cross_entropy(output, label) correct += (torch.max(output, 1)[1].view(label.size()).data == label.data).sum() total_loss = total_loss.data[0]/ len(test_loader) accuracy = correct / len(test_loader.dataset) return total_loss, accuracy def train(): model.train() for image, label in train_loader: image, label = Variable(image), Variable(label) optimizer.zero_grad() output = model(image) loss = F.cross_entropy(output, label) loss.backward() optimizer.step() best_test_loss = None for e in range(1, 150): train() test_loss, test_accuracy = test() print("\n[Epoch: %d] Test Loss:%5.5f Test Accuracy:%5.5f" % (e, test_loss, test_accuracy)) # Save the model if the test_loss is the lowest if not best_test_loss or test_loss < best_test_loss: best_test_loss = test_loss else: break print("\nFinal Results\n-------------\n""Loss:", best_test_loss, "Test Accuracy: ", test_accuracy) ```
github_jupyter
<h1>REGIONE LOMBARDIA</h1> Confronto dei dati relativi ai decessi registrati dall'ISTAT e i decessi causa COVID-19 registrati dalla Protezione Civile Italiana con i decessi previsti dal modello predittivo SARIMA. <h2>DECESSI MENSILI REGIONE LOMBARDIA ISTAT</h2> Il DataFrame contiene i dati relativi ai decessi mensili della regione <b>Lombardia</b> dal <b>2015</b> al <b>30 settembre 2020</b>. ``` import matplotlib.pyplot as plt import pandas as pd decessi_istat = pd.read_csv('../../csv/regioni/lombardia.csv') decessi_istat.head() decessi_istat['DATA'] = pd.to_datetime(decessi_istat['DATA']) decessi_istat.TOTALE = pd.to_numeric(decessi_istat.TOTALE) ``` <h3>Recupero dei dati inerenti al periodo COVID-19</h3> ``` decessi_istat = decessi_istat[decessi_istat['DATA'] > '2020-02-29'] decessi_istat.head() ``` <h3>Creazione serie storica dei decessi ISTAT</h3> ``` decessi_istat = decessi_istat.set_index('DATA') decessi_istat = decessi_istat.TOTALE decessi_istat ``` <h2>DECESSI MENSILI REGIONE LOMBARDIA CAUSATI DAL COVID</h2> Il DataFrame contine i dati forniti dalla Protezione Civile relativi ai decessi mensili della regione <b>Lombardia</b> da <b> marzo 2020</b> al <b>30 settembre 2020</b>. ``` covid = pd.read_csv('../../csv/regioni_covid/lombardia.csv') covid.head() covid['data'] = pd.to_datetime(covid['data']) covid.deceduti = pd.to_numeric(covid.deceduti) covid = covid.set_index('data') covid.head() ``` <h3>Creazione serie storica dei decessi COVID-19</h3> ``` covid = covid.deceduti ``` <h2>PREDIZIONE DECESSI MENSILI REGIONE SECONDO MODELLO SARIMA</h2> Il DataFrame contiene i dati riguardanti i decessi mensili della regione <b>Lombardia</b> secondo la predizione del modello SARIMA applicato. ``` predictions = pd.read_csv('../../csv/pred/predictions_SARIMA_lombardia.csv') predictions.head() predictions.rename(columns={'Unnamed: 0': 'Data', 'predicted_mean':'Totale'}, inplace=True) predictions.head() predictions['Data'] = pd.to_datetime(predictions['Data']) predictions.Totale = pd.to_numeric(predictions.Totale) ``` <h3>Recupero dei dati inerenti al periodo COVID-19</h3> ``` predictions = predictions[predictions['Data'] > '2020-02-29'] predictions.head() predictions = predictions.set_index('Data') predictions.head() ``` <h3>Creazione serie storica dei decessi secondo la predizione del modello</h3> ``` predictions = predictions.Totale ``` <h1>INTERVALLI DI CONFIDENZA </h1> <h3>Limite massimo</h3> ``` upper = pd.read_csv('../../csv/upper/predictions_SARIMA_lombardia_upper.csv') upper.head() upper.rename(columns={'Unnamed: 0': 'Data', 'upper TOTALE':'Totale'}, inplace=True) upper['Data'] = pd.to_datetime(upper['Data']) upper.Totale = pd.to_numeric(upper.Totale) upper.head() upper = upper[upper['Data'] > '2020-02-29'] upper = upper.set_index('Data') upper.head() upper = upper.Totale ``` <h3>Limite minimo ``` lower = pd.read_csv('../../csv/lower/predictions_SARIMA_lombardia_lower.csv') lower.head() lower.rename(columns={'Unnamed: 0': 'Data', 'lower TOTALE':'Totale'}, inplace=True) lower['Data'] = pd.to_datetime(lower['Data']) lower.Totale = pd.to_numeric(lower.Totale) lower.head() lower = lower[lower['Data'] > '2020-02-29'] lower = lower.set_index('Data') lower.head() lower = lower.Totale ``` <h1> CONFRONTO DELLE SERIE STORICHE </h1> Di seguito il confronto grafico tra le serie storiche dei <b>decessi totali mensili</b>, dei <b>decessi causa COVID-19</b> e dei <b>decessi previsti dal modello SARIMA</b> della regione <b>Lombardia</b>. <br /> I mesi di riferimento sono: <b>marzo</b>, <b>aprile</b>, <b>maggio</b>, <b>giugno</b>, <b>luglio</b>, <b>agosto</b> e <b>settembre</b>. ``` plt.figure(figsize=(15,4)) plt.title('LOMBARDIA - Confronto decessi totali, decessi causa covid e decessi del modello predittivo', size=18) plt.plot(covid, label='decessi accertati covid') plt.plot(decessi_istat, label='decessi totali') plt.plot(predictions, label='predizione modello') plt.legend(prop={'size': 12}) plt.show() plt.figure(figsize=(15,4)) plt.title("LOMBARDIA - Confronto decessi totali ISTAT con decessi previsti dal modello", size=18) plt.plot(predictions, label='predizione modello') plt.plot(upper, label='limite massimo') plt.plot(lower, label='limite minimo') plt.plot(decessi_istat, label='decessi totali') plt.legend(prop={'size': 12}) plt.show() ``` <h2>Calcolo dei decessi COVID-19 secondo il modello predittivo</h2> Differenza tra i decessi totali rilasciati dall'ISTAT e i decessi secondo la previsione del modello SARIMA. ``` n = decessi_istat - predictions n_upper = decessi_istat - lower n_lower = decessi_istat - upper plt.figure(figsize=(15,4)) plt.title("LOMBARDIA - Confronto decessi accertati covid con decessi covid previsti dal modello", size=18) plt.plot(covid, label='decessi covid accertati - Protezione Civile') plt.plot(n, label='devessi covid previsti - modello SARIMA') plt.plot(n_upper, label='limite massimo - modello SARIMA') plt.plot(n_lower, label='limite minimo - modello SARIMA') plt.legend(prop={'size': 12}) plt.show() ``` Gli <b>intervalli</b> corrispondono alla differenza tra i decessi totali forniti dall'ISTAT per i mesi di marzo, aprile, maggio e giugno 2020 e i valori degli <b>intervalli di confidenza</b> (intervallo superiore e intervallo inferiore) del modello predittivo SARIMA dei medesimi mesi. ``` d = decessi_istat.sum() print("Decessi 2020:", d) d_m = predictions.sum() print("Decessi attesi dal modello 2020:", d_m) d_lower = lower.sum() print("Decessi attesi dal modello 2020 - livello mimino:", d_lower) ``` <h3>Numero totale dei decessi accertati COVID-19 per la regione Lombardia </h3> ``` m = covid.sum() print(int(m)) ``` <h3>Numero totale dei decessi COVID-19 previsti dal modello per la regione Lombardia </h3> <h4>Valore medio ``` total = n.sum() print(int(total)) ``` <h4>Valore massimo ``` total_upper = n_upper.sum() print(int(total_upper)) ``` <h4>Valore minimo ``` total_lower = n_lower.sum() print(int(total_lower)) ``` <h3>Calcolo del numero dei decessi COVID-19 non registrati secondo il modello predittivo SARIMA della regione Lombardia</h3> <h4>Valore medio ``` x = decessi_istat - predictions - covid x = x.sum() print(int(x)) ``` <h4>Valore massimo ``` x_upper = decessi_istat - lower - covid x_upper = x_upper.sum() print(int(x_upper)) ``` <h4>Valore minimo ``` x_lower = decessi_istat - upper - covid x_lower = x_lower.sum() print(int(x_lower)) ```
github_jupyter
# Self-Driving Car Engineer Nanodegree ## Project: **Finding Lane Lines on the Road** *** In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below. Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right. In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project. --- Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image. **Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".** --- **The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.** --- <figure> <img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" /> <figcaption> <p></p> <p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p> </figcaption> </figure> <p></p> <figure> <img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" /> <figcaption> <p></p> <p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p> </figcaption> </figure> **Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** ## Import Packages ``` #importing some useful packages import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 %matplotlib inline ``` ## Read in an Image ``` #reading in an image image = mpimg.imread('test_images/solidWhiteRight.jpg') #printing out some stats and plotting print('This image is:', type(image), 'with dimensions:', image.shape) plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray') ``` ## Ideas for Lane Detection Pipeline **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:** `cv2.inRange()` for color selection `cv2.fillPoly()` for regions selection `cv2.line()` to draw lines on an image given endpoints `cv2.addWeighted()` to coadd / overlay two images `cv2.cvtColor()` to grayscale or change color `cv2.imwrite()` to output images to file `cv2.bitwise_and()` to apply a mask to an image **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!** ## Helper Functions Below are some helper functions to help get you started. They should look familiar from the lesson! ``` import math from scipy import stats def grayscale(img): """Applies the Grayscale transform This will return an image with only one color channel but NOTE: to see the returned image as grayscale (assuming your grayscaled image is called 'gray') you should call plt.imshow(gray, cmap='gray')""" return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Or use BGR2GRAY if you read an image with cv2.imread() # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) def canny(img, low_threshold, high_threshold): """Applies the Canny transform""" return cv2.Canny(img, low_threshold, high_threshold) def gaussian_blur(img, kernel_size): """Applies a Gaussian Noise kernel""" return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) def region_of_interest(img, vertices): """ Applies an image mask. Only keeps the region of the image defined by the polygon formed from `vertices`. The rest of the image is set to black. `vertices` should be a numpy array of integer points. """ #defining a blank mask to start with mask = np.zeros_like(img) #defining a 3 channel or 1 channel color to fill the mask with depending on the input image if len(img.shape) > 2: channel_count = img.shape[2] # i.e. 3 or 4 depending on your image ignore_mask_color = (255,) * channel_count else: ignore_mask_color = 255 #filling pixels inside the polygon defined by "vertices" with the fill color cv2.fillPoly(mask, vertices, ignore_mask_color) #returning the image only where mask pixels are nonzero masked_image = cv2.bitwise_and(img, mask) return masked_image def draw_lines(img, lines, color=[255, 0, 0], thickness=2): """ NOTE: this is the function you might want to use as a starting point once you want to average/extrapolate the line segments you detect to map out the full extent of the lane (going from the result shown in raw-lines-example.mp4 to that shown in P1_example.mp4). Think about things like separating line segments by their slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left line vs. the right line. Then, you can average the position of each of the lines and extrapolate to the top and bottom of the lane. This function draws `lines` with `color` and `thickness`. Lines are drawn on the image inplace (mutates the image). If you want to make the lines semi-transparent, think about combining this function with the weighted_img() function below """ # for line in lines: # for x1,y1,x2,y2 in line: # cv2.line(img, (x1, y1), (x2, y2), color, thickness) sizeY = img.shape[0] sizeX = img.shape[1] pointsLeft = [] pointsRight = [] for line in lines: for x1,y1,x2,y2 in line: #cv2.line(img, (x1 , y1) , (x2 , y2) , [0, 255, 0], thickness) # Gets the midpoint of a line posX = (x1 + x2) * 0.5 posY = (y1 + y2) * 0.5 # Determines whether the midpoint is loaded on the right or left side of the image and classifies it if posX < sizeX * 0.5 : pointsLeft.append((posX, posY)) else: pointsRight.append((posX, posY)) # Get m and b from linear regression left = stats.linregress(pointsLeft) right = stats.linregress(pointsRight) left_m = left.slope right_m = right.slope left_b = left.intercept right_b = right.intercept # Define the points of left line x = (y - b) / m left_y1 = int(sizeY) left_x1 = int((left_y1 - left_b) / left_m) left_y2 = int(sizeY * 0.6) left_x2 = int((left_y2 - left_b) / left_m) # Define the points of right line x = (y - b) / m right_y1 = int(sizeY) right_x1 = int((right_y1 - right_b) / right_m) right_y2 = int(sizeY * 0.6) right_x2 = int((right_y2 - right_b) / right_m) # Draw two lane lines cv2.line(img, (left_x1 , left_y1 ) , (left_x2 , left_y2 ) , color, thickness) cv2.line(img, (right_x1 , right_y1) , (right_x2 , right_y2) , color, thickness) def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap): """ `img` should be the output of a Canny transform. Returns an image with hough lines drawn. """ lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap) line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) draw_lines(line_img, lines) return line_img # Python 3 has support for cool math symbols. def weighted_img(img, initial_img, α=0.8, β=1., γ=0.): """ `img` is the output of the hough_lines(), An image with lines drawn on it. Should be a blank image (all black) with lines drawn on it. `initial_img` should be the image before any processing. The result image is computed as follows: initial_img * α + img * β + γ NOTE: initial_img and img must be the same shape! """ return cv2.addWeighted(initial_img, α, img, β, γ) ``` ## Test Images Build your pipeline to work on the images in the directory "test_images" **You should make sure your pipeline works well on these images before you try the videos.** ``` import os os.listdir("test_images/") ``` ## Build a Lane Finding Pipeline Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report. Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters. ``` # TODO: Build your pipeline that will draw lane lines on the test_images # then save them to the test_images_output directory. image = mpimg.imread("test_images/"+os.listdir("test_images/")[4]) weighted_image = process_image(image) plt.imshow(weighted_image) ``` ## Test on Videos You know what's cooler than drawing lanes over images? Drawing lanes over video! We can test our solution on two provided videos: `solidWhiteRight.mp4` `solidYellowLeft.mp4` **Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** **If you get an error that looks like this:** ``` NeedDownloadError: Need ffmpeg exe. You can download it by calling: imageio.plugins.ffmpeg.download() ``` **Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.** ``` # Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTML def process_image(image): # NOTE: The output you return should be a color image (3 channel) for processing video below # TODO: put your pipeline here, # you should return the final output (image where lines are drawn on lanes) gray = grayscale(image) kernel_size = 9 blur_gray = gaussian_blur(gray, kernel_size) low_threshold = 100 high_threshold = 150 edges = canny(blur_gray, low_threshold, high_threshold) ysize = image.shape[0] xsize = image.shape[1] vertices = np.array([[(xsize * 0.10 , ysize * 0.90), (xsize * 0.46 , ysize * 0.60), (xsize * 0.54 , ysize * 0.60), (xsize * 0.90 , ysize * 0.90)]], dtype=np.int32) # imshape = image.shape # vertices = np.array([[(0,imshape[0]),(0, 0), (imshape[1], 0), (imshape[1],imshape[0])]], dtype=np.int32) # vertices = np.array([[(0,imshape[0]),(450, 320), (490, 320), (imshape[1],imshape[0])]], dtype=np.int32) masked_edges = region_of_interest(edges, vertices) rho = 2 # distance resolution in pixels of the Hough grid theta = np.pi/180 # angular resolution in radians of the Hough grid threshold = 10 # minimum number of votes (intersections in Hough grid cell) min_line_len = 5 #minimum number of pixels making up a line max_line_gap = 5 # maximum gap in pixels between connectable line segments line_image = np.copy(image)*0 # creating a blank to draw lines on line_img = hough_lines(masked_edges, rho, theta, threshold, min_line_len, max_line_gap) weighted_image = weighted_img(line_img, image) return weighted_image ``` Let's try the one with the solid white lane on the right first ... ``` white_output = 'test_videos_output/solidWhiteRight.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5) clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4") white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!! %time white_clip.write_videofile(white_output, audio=False) ``` Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice. ``` HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(white_output)) ``` ## Improve the draw_lines() function **At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".** **Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.** Now for the one with the solid yellow lane on the left. This one's more tricky! ``` yellow_output = 'test_videos_output/solidYellowLeft.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5) clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4') yellow_clip = clip2.fl_image(process_image) %time yellow_clip.write_videofile(yellow_output, audio=False) HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(yellow_output)) ``` ## Writeup and Submission If you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file. ## Optional Challenge Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project! ``` challenge_output = 'test_videos_output/challenge.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds # clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5) clip3 = VideoFileClip('test_videos/challenge.mp4') challenge_clip = clip3.fl_image(process_image) %time challenge_clip.write_videofile(challenge_output, audio=False) HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(challenge_output)) ```
github_jupyter
``` from imp import reload import autoargs; reload(autoargs); ``` ## argparse made easy! ``` # pass your function and args from your sys.argv, and you're off to the races! def myprint(arg1, arg2): print("arg1:", arg1) print("arg2:", arg2) autoargs.autocall(myprint, ["first", "second"]) # if you want your arguments to be types, use any function that expects a string # and returns the type you want in your arg annotation def str_repeat(s: str, n: int): print((s * n).strip()) autoargs.autocall(str_repeat, ["args are easy!\n", "3"]) # if your args value is a string, it gets split using shlex autoargs.autocall(str_repeat, "'still easy!\n' 3") import functools import operator # varargs are supported too! def product(*args: float): return functools.reduce(operator.mul, args, 1.0) print(autoargs.autocall(product, ["5", "10", "0.5"])) def join(delimiter, *args): return delimiter.join(args) print(autoargs.autocall(join, [", ", "pretty easy", "right?"])) def aggregate(*args: float, op: {'sum', 'mul'}): if op == "sum": return sum(args) elif op == "mul": return product(*args) autoargs.autocall(aggregate, ["--help"]) # kwargs are supported using command-line syntax def land_of_defaults(a="default-a", argb="b default"): print(a, argb) autoargs.autocall(land_of_defaults, []) # => "" (no args in call) autoargs.autocall(land_of_defaults, ['-aOverride!']) # => "-aOverride!" autoargs.autocall(land_of_defaults, ['-a', 'Override!']) # => "-a Override!" autoargs.autocall(land_of_defaults, ['--argb', 'Override!']) # => "--argb Override!" # warning! if an argument has a default, it can only be given via this kwarg syntax # if you want to require a kwarg, use a kwonly-arg def required_arg(normal, default="boring", *, required): print(normal, default, required) autoargs.autocall(required_arg, ["normal", "--required", "val"]) autoargs.autocall(required_arg, ["normal"]) ``` ### Invalid Arg Handling Speaking of errors, invalid arguments are caught by the parser. This means that you get CLI-like error messages, like the user would be expecting if this were a CLI interface. ``` def oops(arg: int): return "%s is an integer!" % arg autoargs.autocall(oops, []) autoargs.autocall(oops, ["spam"]) autoargs.autocall(oops, ["20", "spam"]) ``` ## parser ``` # if you want access to the parser, go right ahead! parser = autoargs.autoparser(myprint) parser parsed = parser.parse_args(["first", "second"]) parsed vars(parsed) ``` ## todo: - parsing a whole module/object (fns become subparsers) - using autoargs to call other module's fns from command line - setup.py - add to pypi - proper docs - all of the above with appropriate testing stay tuned for these and (potentially) other ideas! feel free to add issues
github_jupyter
## Convolutional Layer In this notebook, we visualize four filtered outputs (a.k.a. feature maps) of a convolutional layer. ### Import the image ``` import cv2 import matplotlib.pyplot as plt %matplotlib inline # TODO: Feel free to try out your own images here by changing img_path # to a file path to another image on your computer! img_path = 'images/udacity_sdc.png' #img_path = 'C:/Users/oanag/Pictures/2019/FranceCoteDAzur_2019-04-26/FranceCoteDAzur-134.JPG' # load color image bgr_img = cv2.imread(img_path) # convert to grayscale gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY) # normalize, rescale entries to lie in [0,1] gray_img = gray_img.astype("float32")/255 # plot image plt.imshow(gray_img, cmap='gray') plt.show() ``` ### Define and visualize the filters ``` import numpy as np ## TODO: Feel free to modify the numbers here, to try out another filter! filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]]) print('Filter shape: ', filter_vals.shape) #nicely print matrix print(filter_vals) # Defining four different filters, # all of which are linear combinations of the `filter_vals` defined above # define four filters filter_1 = filter_vals filter_2 = -filter_1 filter_3 = filter_1.T filter_4 = -filter_3 filters = np.array([filter_1, filter_2, filter_3, filter_4]) # For an example, print out the values of filter 1 print(filters) ### do not modify the code below this line ### # visualize all four filters fig = plt.figure(figsize=(10, 5)) for i in range(4): ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[]) ax.imshow(filters[i], cmap='gray') ax.set_title('Filter %s' % str(i+1)) width, height = filters[i].shape for x in range(width): for y in range(height): ax.annotate(str(filters[i][x][y]), xy=(y,x), horizontalalignment='center', verticalalignment='center', color='white' if filters[i][x][y]<0 else 'black') ``` ### Define a convolutional layer Initialize a single convolutional layer so that it contains all your created filters. Note that you are not training this network; you are initializing the weights in a convolutional layer so that you can visualize what happens after a forward pass through this network! ``` import torch import torch.nn as nn import torch.nn.functional as F # define a neural network with a single convolutional layer with four filters class Net(nn.Module): def __init__(self, weight): super(Net, self).__init__() # initializes the weights of the convolutional layer to be the weights of the 4 defined filters k_height, k_width = weight.shape[2:] # assumes there are 4 grayscale filters self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False) self.conv.weight = torch.nn.Parameter(weight) def forward(self, x): # calculates the output of a convolutional layer # pre- and post-activation conv_x = self.conv(x) activated_x = F.relu(conv_x) # returns both layers return conv_x, activated_x # instantiate the model and set the weights weight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor) model = Net(weight) # print out the layer in the network print(model) ``` ### Visualize the output of each filter First, we'll define a helper function, `viz_layer` that takes in a specific layer and number of filters (optional argument), and displays the output of that layer once an image has been passed through. ``` # helper function for visualizing the output of a given layer # default number of filters is 4 def viz_layer(layer, n_filters= 4): fig = plt.figure(figsize=(20, 20)) for i in range(n_filters): ax = fig.add_subplot(1, n_filters, i+1, xticks=[], yticks=[]) # grab layer outputs ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray') ax.set_title('Output %s' % str(i+1)) ``` Let's look at the output of a convolutional layer, before and after a ReLu activation function is applied. ``` # plot original image plt.imshow(gray_img, cmap='gray') # visualize all filters fig = plt.figure(figsize=(12, 6)) fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05) for i in range(4): ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[]) ax.imshow(filters[i], cmap='gray') ax.set_title('Filter %s' % str(i+1)) # convert the image into an input Tensor gray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1) # get the convolutional layer (pre and post activation) conv_layer, activated_layer = model(gray_img_tensor) # visualize the output of a conv layer viz_layer(conv_layer) # after a ReLu is applied # visualize the output of an activated conv layer viz_layer(activated_layer) ```
github_jupyter
# A Chaos Game with Triangles John D. Cook [proposed](https://www.johndcook.com/blog/2017/07/08/the-chaos-game-and-the-sierpinski-triangle/) an interesting "game" from the book *[Chaos and Fractals](https://smile.amazon.com/Chaos-Fractals-New-Frontiers-Science/dp/0387202293)*: start at a vertex of an equilateral triangle. Then move to a new point halfway between the current point and one of the three vertexes of the triangle, chosen at random. Repeat to create *N* points, and plot them. What do you get? I'll refactor Cook's code a bit and then we'll see: ``` import matplotlib.pyplot as plt import random def random_walk(vertexes, N): "Walk halfway from current point towards a random vertex; repeat for N points." points = [random.choice(vertexes)] for _ in range(N-1): points.append(midpoint(points[-1], random.choice(vertexes))) return points def show_walk(vertexes, N=5000): "Walk halfway towards a random vertex for N points; show reults." Xs, Ys = transpose(random_walk(vertexes, N)) Xv, Yv = transpose(vertexes) plt.plot(Xs, Ys, 'r.') plt.plot(Xv, Yv, 'bs') plt.gca().set_aspect('equal') plt.gcf().set_size_inches(9, 9) plt.axis('off') plt.show() def midpoint(p, q): return ((p[0] + q[0])/2, (p[1] + q[1])/2) def transpose(matrix): return zip(*matrix) triangle = ((0, 0), (0.5, (3**0.5)/2), (1, 0)) show_walk(triangle, 20) ``` OK, the first 20 points don't tell me much. What if I try 20,000 points? ``` show_walk(triangle, 20000) ``` Wow! The [Sierpinski Triangle](https://en.wikipedia.org/wiki/Sierpinski_triangle)! What happens if we start with a different set of vertexes, like a square? ``` square = ((0, 0), (0, 1), (1, 0), (1, 1)) show_walk(square) ``` There doesn't seem to be any structure there. Let's try again to make sure: ``` show_walk(square, 20000) ``` I'm still not seeing anything but random points. How about a right triangle? ``` right_triangle = ((0, 0), (0, 1), (1, 0)) show_walk(right_triangle, 20000) ``` We get a squished Serpinski triangle. How about a pentagon? (I'm lazy so I had Wolfram Alpha [compute the vertexes](https://www.wolframalpha.com/input/?i=vertexes+of+regular+pentagon).) ``` pentagon = ((0.5, -0.688), (0.809, 0.262), (0., 0.850), (-0.809, 0.262), (-0.5, -0.688)) show_walk(pentagon) ``` To clarify, let's try again with different numbers of points: ``` show_walk(pentagon, 10000) show_walk(pentagon, 20000) ``` I definitely see a central hole, and five secondary holes surrounding that, and then, maybe 15 holes surrounding those? Or maybe not 15; hard to tell. Is a "Sierpinski Pentagon" a thing? I hadn't heard of it but a [quick search](https://www.google.com/search?q=sierpinski+pentagon) reveals that yes indeed, it is [a thing](http://ecademy.agnesscott.edu/~lriddle/ifs/pentagon/sierngon.htm), and it does have 15 holes surrounding the 5 holes. Let's try the hexagon: ``` hexagon = ((0.5, -0.866), (1, 0), (0.5, 0.866), (-0.5, 0.866), (-1, 0), (-0.5, -0.866)) show_walk(hexagon) show_walk(hexagon, 20000) ``` You can see a little of the six-fold symmetry, but it is not as clear as the triangle and pentagon.
github_jupyter
# Part 2: Intro to Private Training with Remote Execution In the last section, we learned about PointerTensors, which create the underlying infrastructure we need for privacy preserving Deep Learning. In this section, we're going to see how to use these basic tools to train our first deep learning model using remote execution. Authors: - Yann Dupis - Twitter: [@YannDupis](https://twitter.com/YannDupis) - Andrew Trask - Twitter: [@iamtrask](https://twitter.com/iamtrask) ### Why use remote execution? Let's say you are an AI startup who wants to build a deep learning model to detect [diabetic retinopathy (DR)](https://ai.googleblog.com/2016/11/deep-learning-for-detection-of-diabetic.html), which is the fastest growing cause of blindness. Before training your model, the first step would be to acquire a dataset of retinopathy images with signs of DR. One approach could be to work with a hospital and ask them to send you a copy of this dataset. However because of the sensitivity of the patients' data, the hospital might be exposed to liability risks. That's where remote execution comes into the picture. Instead of bringing training data to the model (a central server), you bring the model to the training data (wherever it may live). In this case, it would be the hospital. The idea is that this allows whoever is creating the data to own the only permanent copy, and thus maintain control over who ever has access to it. Pretty cool, eh? # Section 2.1 - Private Training on MNIST For this tutorial, we will train a model on the [MNIST dataset](http://yann.lecun.com/exdb/mnist/) to classify digits based on images. We can assume that we have a remote worker named Bob who owns the data. ``` import tensorflow as tf import syft as sy hook = sy.TensorFlowHook(tf) bob = sy.VirtualWorker(hook, id="bob") ``` Let's download the MNIST data from `tf.keras.datasets`. Note that we are converting the data from numpy to `tf.Tensor` in order to have the PySyft functionalities. ``` mnist = tf.keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 x_train, y_train = tf.convert_to_tensor(x_train), tf.convert_to_tensor(y_train) x_test, y_test = tf.convert_to_tensor(x_test), tf.convert_to_tensor(y_test) ``` As decribed in Part 1, we can send this data to Bob with the `send` method on the `tf.Tensor`. ``` x_train_ptr = x_train.send(bob) y_train_ptr = y_train.send(bob) ``` Excellent! We have everything to start experimenting. To train our model on Bob's machine, we just have to perform the following steps: - Define a model, including optimizer and loss - Send the model to Bob - Start the training process - Get the trained model back Let's do it! ``` # Define the model model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation='softmax') ]) # Compile with optimizer, loss and metrics model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) ``` Once you have defined your model, you can simply send it to Bob calling the `send` method. It's the exact same process as sending a tensor. ``` model_ptr = model.send(bob) model_ptr ``` Now, we have a pointer pointing to the model on Bob's machine. We can validate that's the case by inspecting the attribute `_objects` on the virtual worker. ``` bob._objects[model_ptr.id_at_location] ``` Everything is ready to start training our model on this remote dataset. You can call `fit` and pass `x_train_ptr` `y_train_ptr` which are pointing to Bob's data. Note that's the exact same interface as normal `tf.keras`. ``` model_ptr.fit(x_train_ptr, y_train_ptr, epochs=2, validation_split=0.2) ``` Fantastic! you have trained your model acheiving an accuracy greater than 95%. You can get your trained model back by just calling `get` on it. ``` model_gotten = model_ptr.get() model_gotten ``` It's good practice to see if your model can generalize by assessing its accuracy on an holdout dataset. You can simply call `evaluate`. ``` model_gotten.evaluate(x_test, y_test, verbose=2) ``` Boom! The model remotely trained on Bob's data is more than 95% accurate on this holdout dataset. If your model doesn't fit into the Sequential paradigm, you can use Keras's functional API, or even subclass [tf.keras.Model](https://www.tensorflow.org/guide/keras/custom_layers_and_models#building_models) to create custom models. ``` class CustomModel(tf.keras.Model): def __init__(self, num_classes=10): super(CustomModel, self).__init__(name='custom_model') self.num_classes = num_classes self.flatten = tf.keras.layers.Flatten(input_shape=(28, 28)) self.dense_1 = tf.keras.layers.Dense(128, activation='relu') self.dropout = tf.keras.layers.Dropout(0.2) self.dense_2 = tf.keras.layers.Dense(num_classes, activation='softmax') def call(self, inputs, training=False): x = self.flatten(inputs) x = self.dense_1(x) x = self.dropout(x, training=training) return self.dense_2(x) model = CustomModel(10) # need to call the model on dummy data before sending it # in order to set the input shape (required when saving to SavedModel) model.predict(tf.ones([1, 28, 28])) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model_ptr = model.send(bob) model_ptr.fit(x_train_ptr, y_train_ptr, epochs=2, validation_split=0.2) ``` ## Well Done! And voilà! We have trained a Deep Learning model on Bob's data by sending the model to him. Never in this process do we ever see or request access to the underlying training data! We preserve the privacy of Bob!!! # Congratulations!!! - Time to Join the Community! Congratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement toward privacy preserving, decentralized ownership of AI and the AI supply chain (data), you can do so in the following ways! ### Star PySyft on GitHub The easiest way to help our community is just by starring the Repos! This helps raise awareness of the cool tools we're building. - Star PySyft on GitHub! - [https://github.com/OpenMined/PySyft](https://github.com/OpenMined/PySyft) - Star PySyft-TensorFlow on GitHub! - [https://github.com/OpenMined/PySyft-TensorFlow] ### Join our Slack! The best way to keep up to date on the latest advancements is to join our community! You can do so by filling out the form at [http://slack.openmined.org](http://slack.openmined.org) ### Join a Code Project! The best way to contribute to our community is to become a code contributor! At any time you can go to PySyft GitHub Issues page and filter for "Projects". This will show you all the top level Tickets giving an overview of what projects you can join! If you don't want to join a project, but you would like to do a bit of coding, you can also look for more "one off" mini-projects by searching for GitHub issues marked "good first issue". - [PySyft Projects](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3AProject) - [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) ### Donate If you don't have time to contribute to our codebase, but would still like to lend support, you can also become a Backer on our Open Collective. All donations go toward our web hosting and other community expenses such as hackathons and meetups! [OpenMined's Open Collective Page](https://opencollective.com/openmined)
github_jupyter
<h1> Training on Cloud ML Engine </h1> This notebook illustrates distributed training and hyperparameter tuning on Cloud ML Engine. ``` # change these to try this notebook out BUCKET = 'cloud-training-demos-ml' PROJECT = 'cloud-training-demos' REGION = 'us-central1' import os os.environ['BUCKET'] = BUCKET os.environ['PROJECT'] = PROJECT os.environ['REGION'] = REGION %%bash if ! gsutil ls | grep -q gs://${BUCKET}/; then gsutil mb -l ${REGION} gs://${BUCKET} # copy canonical set of preprocessed files if you didn't do previous notebook gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET} fi %bash gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000* ``` Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud ML Engine. <p> <h2> Train on Cloud ML Engine </h2> <p> Training on Cloud ML Engine requires: <ol> <li> Making the code a Python package <li> Using gcloud to submit the training code to Cloud ML Engine </ol> <p> The code in model.py is the same as in the TensorFlow notebook. I just moved it to a file so that I could package it up as a module. (explore the <a href="babyweight/trainer">directory structure</a>). ``` %bash grep "^def" babyweight/trainer/model.py ``` After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about <b>a minute</b> in which you won't see any output ... ``` %bash echo "bucket=${BUCKET}" rm -rf babyweight_trained export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight python -m trainer.task \ --bucket=${BUCKET} \ --output_dir=babyweight_trained \ --job-dir=./tmp \ --pattern="00000-of-" --train_examples=500 ``` Once the code works in standalone mode, you can run it on Cloud ML Engine. Because this is on the entire dataset, it will take a while. The training run took about <b> an hour </b> for me. You can monitor the job from the GCP console in the Cloud Machine Learning Engine section. ``` %bash OUTDIR=gs://${BUCKET}/babyweight/trained_model JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S) echo $OUTDIR $REGION $JOBNAME gsutil -m rm -rf $OUTDIR gcloud ml-engine jobs submit training $JOBNAME \ --region=$REGION \ --module-name=trainer.task \ --package-path=$(pwd)/babyweight/trainer \ --job-dir=$OUTDIR \ --staging-bucket=gs://$BUCKET \ --scale-tier=STANDARD_1 \ --runtime-version=1.4 \ -- \ --bucket=${BUCKET} \ --output_dir=${OUTDIR} \ --train_examples=200000 ``` When I ran it, training finished, and the evaluation happened three times (filter in Stackdriver on the word "dict"): <pre> Saving dict for global step 390632: average_loss = 1.06578, global_step = 390632, loss = 545.55 </pre> The final RMSE was 1.066 pounds. ``` from google.datalab.ml import TensorBoard TensorBoard().start('gs://{}/babyweight/trained_model'.format(BUCKET)) for pid in TensorBoard.list()['pid']: TensorBoard().stop(pid) print 'Stopped TensorBoard with pid {}'.format(pid) ``` <h2> Hyperparameter tuning </h2> <p> All of these are command-line parameters to my program. To do hyperparameter tuning, create hyperparam.xml and pass it as --configFile. This step will take <b>1 hour</b> -- you can increase maxParallelTrials or reduce maxTrials to get it done faster. Since maxParallelTrials is the number of initial seeds to start searching from, you don't want it to be too large; otherwise, all you have is a random search. ``` %writefile hyperparam.yaml trainingInput: scaleTier: STANDARD_1 hyperparameters: hyperparameterMetricTag: average_loss goal: MINIMIZE maxTrials: 30 maxParallelTrials: 3 params: - parameterName: batch_size type: INTEGER minValue: 8 maxValue: 512 scaleType: UNIT_LOG_SCALE - parameterName: nembeds type: INTEGER minValue: 3 maxValue: 30 scaleType: UNIT_LINEAR_SCALE - parameterName: nnsize type: INTEGER minValue: 64 maxValue: 512 scaleType: UNIT_LOG_SCALE ``` In reality, you would hyper-parameter tune over your entire dataset, and not on a smaller subset (see --pattern). But because this is a demo, I wanted it to finish quickly. ``` %bash OUTDIR=gs://${BUCKET}/babyweight/hyperparam JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S) echo $OUTDIR $REGION $JOBNAME gsutil -m rm -rf $OUTDIR gcloud ml-engine jobs submit training $JOBNAME \ --region=$REGION \ --module-name=trainer.task \ --package-path=$(pwd)/babyweight/trainer \ --job-dir=$OUTDIR \ --staging-bucket=gs://$BUCKET \ --scale-tier=STANDARD_1 \ --config=hyperparam.yaml \ --runtime-version=1.4 \ -- \ --bucket=${BUCKET} \ --output_dir=${OUTDIR} \ --pattern="00000-of-" --train_examples=5000 %bash gcloud ml-engine jobs describe babyweight_180123_202458 ``` <h2> Repeat training </h2> <p> This time with tuned parameters (note last line) ``` %bash OUTDIR=gs://${BUCKET}/babyweight/trained_model JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S) echo $OUTDIR $REGION $JOBNAME gsutil -m rm -rf $OUTDIR gcloud ml-engine jobs submit training $JOBNAME \ --region=$REGION \ --module-name=trainer.task \ --package-path=$(pwd)/babyweight/trainer \ --job-dir=$OUTDIR \ --staging-bucket=gs://$BUCKET \ --scale-tier=STANDARD_1 \ -- \ --bucket=${BUCKET} \ --output_dir=${OUTDIR} \ --train_examples=200000 --batch_size=35 --nembeds=16 --nnsize=281 ``` Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
github_jupyter
<a href="https://colab.research.google.com/github/Mengxue12/tensorflow-1-public/blob/main/C4/W4/ungraded_labs/C4_W4_Lab_1_LSTM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` **Note:** This notebook can run using TensorFlow 2.5.0 ``` #!pip install tensorflow==2.5.0 import tensorflow as tf import numpy as np import matplotlib.pyplot as plt print(tf.__version__) def plot_series(time, series, format="-", start=0, end=None): plt.plot(time[start:end], series[start:end], format) plt.xlabel("Time") plt.ylabel("Value") plt.grid(True) def trend(time, slope=0): return slope * time def seasonal_pattern(season_time): """Just an arbitrary pattern, you can change it if you wish""" return np.where(season_time < 0.4, np.cos(season_time * 2 * np.pi), 1 / np.exp(3 * season_time)) def seasonality(time, period, amplitude=1, phase=0): """Repeats the same pattern at each period""" season_time = ((time + phase) % period) / period return amplitude * seasonal_pattern(season_time) def noise(time, noise_level=1, seed=None): rnd = np.random.RandomState(seed) return rnd.randn(len(time)) * noise_level time = np.arange(4 * 365 + 1, dtype="float32") baseline = 10 series = trend(time, 0.1) baseline = 10 amplitude = 40 slope = 0.05 noise_level = 5 # Create the series series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude) # Update with noise series += noise(time, noise_level, seed=42) split_time = 1000 time_train = time[:split_time] x_train = series[:split_time] time_valid = time[split_time:] x_valid = series[split_time:] window_size = 20 batch_size = 32 shuffle_buffer_size = 1000 def windowed_dataset(series, window_size, batch_size, shuffle_buffer): series = tf.expand_dims(series, axis=-1) ds = tf.data.Dataset.from_tensor_slices(series) ds = ds.window(window_size + 1, shift=1, drop_remainder=True) ds = ds.flat_map(lambda w: w.batch(window_size + 1)) ds = ds.shuffle(shuffle_buffer) ds = ds.map(lambda w: (w[:-1], w[1:])) return ds.batch(batch_size).prefetch(1) def model_forecast(model, series, window_size): ds = tf.data.Dataset.from_tensor_slices(series) ds = ds.window(window_size, shift=1, drop_remainder=True) ds = ds.flat_map(lambda w: w.batch(window_size)) ds = ds.batch(32).prefetch(1) forecast = model.predict(ds) return forecast tf.keras.backend.clear_session() tf.random.set_seed(51) np.random.seed(51) window_size = 30 train_set = windowed_dataset(x_train, window_size, batch_size=128, shuffle_buffer=shuffle_buffer_size) model = tf.keras.models.Sequential([ tf.keras.layers.Conv1D(filters=32, kernel_size=5, strides=1, padding="causal", activation="relu", input_shape=[None, 1]), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)), tf.keras.layers.Dense(1), tf.keras.layers.Lambda(lambda x: x * 200) ]) lr_schedule = tf.keras.callbacks.LearningRateScheduler( lambda epoch: 1e-8 * 10**(epoch / 20)) optimizer = tf.keras.optimizers.SGD(learning_rate=1e-8, momentum=0.9) model.compile(loss=tf.keras.losses.Huber(), optimizer=optimizer, metrics=["mae"]) history = model.fit(train_set, epochs=100, callbacks=[lr_schedule]) plt.semilogx(history.history["lr"], history.history["loss"]) plt.axis([1e-8, 1e-4, 0, 30]) tf.keras.backend.clear_session() tf.random.set_seed(51) np.random.seed(51) #batch_size = 16 dataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size) model = tf.keras.models.Sequential([ tf.keras.layers.Conv1D(filters=32, kernel_size=3, strides=1, padding="causal", activation="relu", input_shape=[None, 1]), tf.keras.layers.LSTM(32, return_sequences=True), tf.keras.layers.LSTM(32, return_sequences=True), tf.keras.layers.Dense(1), tf.keras.layers.Lambda(lambda x: x * 200) ]) optimizer = tf.keras.optimizers.SGD(learning_rate=1e-5, momentum=0.9) model.compile(loss=tf.keras.losses.Huber(), optimizer=optimizer, metrics=["mae"]) history = model.fit(dataset,epochs=500) rnn_forecast = model_forecast(model, series[..., np.newaxis], window_size) rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0] plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid) plot_series(time_valid, rnn_forecast) tf.keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy() import matplotlib.image as mpimg import matplotlib.pyplot as plt #----------------------------------------------------------- # Retrieve a list of list results on training and test data # sets for each training epoch #----------------------------------------------------------- mae=history.history['mae'] loss=history.history['loss'] epochs=range(len(loss)) # Get number of epochs #------------------------------------------------ # Plot MAE and Loss #------------------------------------------------ plt.plot(epochs, mae, 'r') plt.plot(epochs, loss, 'b') plt.title('MAE and Loss') plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.legend(["MAE", "Loss"]) plt.figure() epochs_zoom = epochs[200:] mae_zoom = mae[200:] loss_zoom = loss[200:] #------------------------------------------------ # Plot Zoomed MAE and Loss #------------------------------------------------ plt.plot(epochs_zoom, mae_zoom, 'r') plt.plot(epochs_zoom, loss_zoom, 'b') plt.title('MAE and Loss') plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.legend(["MAE", "Loss"]) plt.figure() ```
github_jupyter
# MIDAS Examples If you're reading this you probably already know that MIDAS stands for Mixed Data Sampling, and it is a technique for creating time-series forecast models that allows you to mix series of different frequencies (ie, you can use monthly data as predictors for a quarterly series, or daily data as predictors for a monthly series, etc.). The general approach has been described in a series of papers by Ghysels, Santa-Clara, Valkanov and others. This notebook attempts to recreate some of the examples from the paper [_Forecasting with Mixed Frequencies_](https://research.stlouisfed.org/publications/review/2010/11/01/forecasting-with-mixed-frequencies/) by Michelle T. Armesto, Kristie M. Engemann, and Michael T. Owyang. ``` %matplotlib inline import datetime import numpy as np import pandas as pd from midas.mix import mix_freq from midas.adl import estimate, forecast, midas_adl, rmse ``` # MIDAS ADL This package currently implements the MIDAS ADL (autoregressive distributed lag) method. We'll start with an example using quarterly GDP and monthly payroll data. We'll then show the basic steps in setting up and fitting this type of model, although in practice you'll probably used the top-level __midas_adl__ function to do forecasts. TODO: MIDAS equation and discussion # Example 1: GDP vs Non-Farm Payroll ``` gdp = pd.read_csv('../tests/data/gdp.csv', parse_dates=['DATE'], index_col='DATE') pay = pd.read_csv('../tests/data/pay.csv', parse_dates=['DATE'], index_col='DATE') gdp.tail() pay.tail() ``` ## Figure 1 This is a variation of Figure 1 from the paper comparing year-over-year growth of GDP and employment. ``` gdp_yoy = ((1. + (np.log(gdp.GDP) - np.log(gdp.GDP.shift(3)))) ** 4) - 1. emp_yoy = ((1. + (np.log(pay.PAY) - np.log(pay.PAY.shift(1)))) ** 12) - 1. df = pd.concat([gdp_yoy, emp_yoy], axis=1) df.columns = ['gdp_yoy', 'emp_yoy'] df[['gdp_yoy','emp_yoy']].loc['1980-1-1':].plot(figsize=(15,4), style=['o','-']) ``` ## Mixing Frequencies The first step is to do the actual frequency mixing. In this case we're mixing monthly data (employment) with quarterly data (GDP). This may sometimes be useful to do directly, but again you'll probably used __midas_adl__ to do forecasting. ``` gdp['gdp_growth'] = (np.log(gdp.GDP) - np.log(gdp.GDP.shift(1))) * 100. pay['emp_growth'] = (np.log(pay.PAY) - np.log(pay.PAY.shift(1))) * 100. y, yl, x, yf, ylf, xf = mix_freq(gdp.gdp_growth, pay.emp_growth, "3m", 1, 3, start_date=datetime.datetime(1985,1,1), end_date=datetime.datetime(2009,1,1)) x.head() ``` The arguments here are as follows: - First, the dependent (low frequency) and independent (high-frequency) data are given as Pandas series, and they are assumed to be indexed by date. - xlag The number of lags for the high-frequency variable - ylag The number of lags for the low-frequency variable (the autoregressive part) - horizon: How much the high-frequency data is lagged before frequency mixing - start_date, end_date: The start and end date over which the model is fitted. If these are outside the range of the low-frequency data, they will be adjusted The _horizon_ argument is a little tricky (the argument name was retained from the MatLab version). This is used both the align the data and to do _nowcasting_ (more on that later). For example, if it's September 2017 then the latest GDP data from FRED will be for Q2 and this will be dated 2017-04-01. The latest monthly data from non-farm payroll will be for August, which will be dated 2017-08-01. If we aligned just on dates, the payroll data for April (04-01), March (03-01), and February(02-01) would be aligned with Q2 (since xlag = "3m"), but what we want is June, May, and April, so here the horizon argument is 3 indicating that the high-frequency data should be lagged three months before being mixed with the quarterly data. ### Fitting the Model Because of the form of the MIDAS model, fitting the model requires using non-linear least squares. For now, if you call the __estimate__ function directly, you'll get back a results of type scipy.optimize.optimize.OptimizeResult ``` res = estimate(y, yl, x, poly='beta') res.x ``` You can also call __forecast__ directly. This will use the optimization results returned from __eatimate__ to produce a forecast for every date in the index of the forecast inputs (here xf and ylf): ``` fc = forecast(xf, ylf, res, poly='beta') forecast_df = fc.join(yf) forecast_df['gap'] = forecast_df.yfh - forecast_df.gdp_growth forecast_df gdp.join(fc)[['gdp_growth','yfh']].loc['2005-01-01':].plot(style=['-o','-+'], figsize=(12, 4)) ``` ### Comparison against univariate ARIMA model ``` import statsmodels.tsa.api as sm m = sm.AR(gdp['1975-01-01':'2011-01-01'].gdp_growth,) r = m.fit(maxlag=1) r.params fc_ar = r.predict(start='2005-01-01') fc_ar.name = 'xx' df_p = gdp.join(fc)[['gdp_growth','yfh']] df_p.join(fc_ar)[['gdp_growth','yfh','xx']].loc['2005-01-01':].plot(style=['-o','-+'], figsize=(12, 4)) ``` ## The midas_adl function The __midas\_adl__ function wraps up frequency-mixing, fitting, and forecasting into one process. The default mode of forecasting is _fixed_, which means that the data between start_date and end_date will be used to fit the model, and then any data in the input beyond end_date will be used for forecasting. For example, here we're fitting from the beginning of 1985 to the end of 2008, but the gdp data extends to Q1 of 2011 so we get nine forecast points. Three monthly lags of the high-frequency data are specified along with one quarterly lag of GDP. ``` rmse_fc, fc = midas_adl(gdp.gdp_growth, pay.emp_growth, start_date=datetime.datetime(1985,1,1), end_date=datetime.datetime(2009,1,1), xlag="3m", ylag=1, horizon=3) rmse_fc ``` You can also change the polynomial used to weight the MIDAS coefficients. The default is 'beta', but you can also specify exponential Almom weighting ('expalmon') or beta with non-zero last term ('betann') ``` rmse_fc, fc = midas_adl(gdp.gdp_growth, pay.emp_growth, start_date=datetime.datetime(1985,1,1), end_date=datetime.datetime(2009,1,1), xlag="3m", ylag=1, horizon=3, poly='expalmon') rmse_fc ``` ### Rolling and Recursive Forecasting As mentioned above the default forecasting method is fixed where the model is fit once and then all data after end_date is used for forecasting. Two other methods are supported _rolling window_ and _recursive_. The _rolling window_ method is just what it sounds like. The start_date and end_date are used for the initial window, and then each new forecast moves that window forward by one period so that you're always doing one step ahead forecasts. Of course, to do anything useful this also assumes that the date range of the dependent data extends beyond end_date accounting for the lags implied by _horizon_. Generally, you'll get lower RMSE values here since the forecasts are always one step ahead. ``` results = {h: midas_adl(gdp.gdp_growth, pay.emp_growth, start_date=datetime.datetime(1985,10,1), end_date=datetime.datetime(2009,1,1), xlag="3m", ylag=1, horizon=3, forecast_horizon=h, poly='beta', method='rolling') for h in (1, 2, 5)} results[1][0] ``` The _recursive_ method is similar except that the start date does not change, so the range over which the fitting happens increases for each new forecast. ``` results = {h: midas_adl(gdp.gdp_growth, pay.emp_growth, start_date=datetime.datetime(1985,10,1), end_date=datetime.datetime(2009,1,1), xlag="3m", ylag=1, horizon=3, forecast_horizon=h, poly='beta', method='recursive') for h in (1, 2, 5)} results[1][0] ``` ## Nowcasting Per the manual for the MatLab Matlab Toolbox Version 1.0, you can do _nowcasting_ (or MIDAS with leads) basically by adjusting the _horizon_ parameter. For example, below we change the _horizon_ paremter to 1, we're now forecasting with a one month horizon rather than a one quarter horizon: ``` rmse_fc, fc = midas_adl(gdp.gdp_growth, pay.emp_growth, start_date=datetime.datetime(1985,1,1), end_date=datetime.datetime(2009,1,1), xlag="3m", ylag=1, horizon=1) rmse_fc ``` Not surprisingly the RMSE drops considerably. ## CPI vs. Federal Funds Rate __UNDER CONSTRUCTION: Note that these models take considerably longer to fit__ ``` cpi = pd.read_csv('CPIAUCSL.csv', parse_dates=['DATE'], index_col='DATE') ffr = pd.read_csv('DFF_2_Vintages_Starting_2009_09_28.txt', sep='\t', parse_dates=['observation_date'], index_col='observation_date') cpi.head() ffr.head(10) cpi_yoy = ((1. + (np.log(cpi.CPIAUCSL) - np.log(cpi.CPIAUCSL.shift(1)))) ** 12) - 1. cpi_yoy.head() df = pd.concat([cpi_yoy, ffr.DFF_20090928 / 100.], axis=1) df.columns = ['cpi_growth', 'dff'] df.loc['1980-1-1':'2010-1-1'].plot(figsize=(15,4), style=['-+','-.']) cpi_growth = (np.log(cpi.CPIAUCSL) - np.log(cpi.CPIAUCSL.shift(1))) * 100. y, yl, x, yf, ylf, xf = mix_freq(cpi_growth, ffr.DFF_20090928, "1m", 1, 1, start_date=datetime.datetime(1975,10,1), end_date=datetime.datetime(1991,1,1)) x.head() res = estimate(y, yl, x) fc = forecast(xf, ylf, res) fc.join(yf).head() pd.concat([cpi_growth, fc],axis=1).loc['2008-01-01':'2010-01-01'].plot(style=['-o','-+'], figsize=(12, 4)) results = {h: midas_adl(cpi_growth, ffr.DFF_20090928, start_date=datetime.datetime(1975,7,1), end_date=datetime.datetime(1990,11,1), xlag="1m", ylag=1, horizon=1, forecast_horizon=h, method='rolling') for h in (1, 2, 5)} (results[1][0], results[2][0], results[5][0]) results[1][1].plot(figsize=(12,4)) results = {h: midas_adl(cpi_growth, ffr.DFF_20090928, start_date=datetime.datetime(1975,10,1), end_date=datetime.datetime(1991,1,1), xlag="1m", ylag=1, horizon=1, forecast_horizon=h, method='recursive') for h in (1, 2, 5)} results[1][0] results[1][1].plot() ```
github_jupyter
``` import sys, os if 'google.colab' in sys.modules and not os.path.exists('.setup_complete'): !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/spring20/setup_colab.sh -O- | bash !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/grading.py -O ../grading.py !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/week1_intro/submit.py !touch .setup_complete # This code creates a virtual display to draw game images on. # It will have no effect if your machine has a monitor. if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0: !bash ../xvfb start os.environ['DISPLAY'] = ':1' import numpy as np import matplotlib.pyplot as plt %matplotlib inline ``` ### OpenAI Gym We're gonna spend several next weeks learning algorithms that solve decision processes. We are then in need of some interesting decision problems to test our algorithms. That's where OpenAI gym comes into play. It's a python library that wraps many classical decision problems including robot control, videogames and board games. So here's how it works: ``` import gym env = gym.make("MountainCar-v0") env.reset() plt.imshow(env.render('rgb_array')) print("Observation space:", env.observation_space) print("Action space:", env.action_space) ``` Note: if you're running this on your local machine, you'll see a window pop up with the image above. Don't close it, just alt-tab away. ### Gym interface The three main methods of an environment are * __reset()__ - reset environment to initial state, _return first observation_ * __render()__ - show current environment state (a more colorful version :) ) * __step(a)__ - commit action __a__ and return (new observation, reward, is done, info) * _new observation_ - an observation right after commiting the action __a__ * _reward_ - a number representing your reward for commiting action __a__ * _is done_ - True if the MDP has just finished, False if still in progress * _info_ - some auxilary stuff about what just happened. Ignore it ~~for now~~. ``` obs0 = env.reset() print("initial observation code:", obs0) # Note: in MountainCar, observation is just two numbers: car position and velocity print("taking action 2 (right)") new_obs, reward, is_done, _ = env.step(2) print("new observation code:", new_obs) print("reward:", reward) print("is game over?:", is_done) # Note: as you can see, the car has moved to the right slightly (around 0.0005) ``` ### Play with it Below is the code that drives the car to the right. However, if you simply use the default policy, the car will not reach the flag at the far right due to gravity. __Your task__ is to fix it. Find a strategy that reaches the flag. You are not required to build any sophisticated algorithms for now, feel free to hard-code :) ``` from IPython import display # Create env manually to set time limit. Please don't change this. TIME_LIMIT = 250 env = gym.wrappers.TimeLimit( gym.envs.classic_control.MountainCarEnv(), max_episode_steps=TIME_LIMIT + 1, ) actions = {'left': 0, 'stop': 1, 'right': 2} def policy(obs, t): # Write the code for your policy here. You can use the observation # (a tuple of position and velocity), the current time step, or both, # if you want. position, velocity = obs if velocity > 0: a = actions['right'] else: a = actions['left'] # This is an example policy. You can try running it, but it will not work. # Your goal is to fix that. return a plt.figure(figsize=(4, 3)) display.clear_output(wait=True) obs = env.reset() for t in range(TIME_LIMIT): plt.gca().clear() action = policy(obs, t) # Call your policy obs, reward, done, _ = env.step(action) # Pass the action chosen by the policy to the environment # We don't do anything with reward here because MountainCar is a very simple environment, # and reward is a constant -1. Therefore, your goal is to end the episode as quickly as possible. # Draw game image on display. plt.imshow(env.render('rgb_array')) display.clear_output(wait=True) display.display(plt.gcf()) print(obs) if done: print("Well done!") break else: print("Time limit exceeded. Try again.") display.clear_output(wait=True) from submit import submit_interface submit_interface(policy, <EMAIL>, <TOKEN>) ```
github_jupyter
![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/25.Date_Normalizer.ipynb) ## Colab Setup ``` import json from google.colab import files license_keys = files.upload() with open(list(license_keys.keys())[0]) as f: license_keys = json.load(f) license_keys['JSL_VERSION'] %%capture for k,v in license_keys.items(): %set_env $k=$v !wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jsl_colab_setup.sh !bash jsl_colab_setup.sh import json import os from pyspark.ml import Pipeline, PipelineModel from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * import sparknlp_jsl import sparknlp from sparknlp.util import * from sparknlp.pretrained import ResourceDownloader from pyspark.sql import functions as F import pandas as pd spark = sparknlp_jsl.start(license_keys['SECRET']) spark ``` # **Date Normalizer** New Annotator that transforms chunks Dates to a normalized Date with format YYYY/MM/DD. This annotator identifies dates in chunk annotations and transforms those dates to the format YYYY/MM/DD. We going to create a chunks dates with different formats: ``` dates = [ '08/02/2018', '11/2018', '11/01/2018', '12Mar2021', 'Jan 30, 2018', '13.04.1999', '3April 2020', 'next monday', 'today', 'next week' ] from pyspark.sql.types import StringType df_dates = spark.createDataFrame(dates,StringType()).toDF('ner_chunk') ``` We going to transform that text to documents in spark-nlp. ``` document_assembler = DocumentAssembler().setInputCol('ner_chunk').setOutputCol('document') documents_DF = document_assembler.transform(df_dates) ``` After that we going to transform that documents to chunks. ``` from sparknlp.functions import map_annotations_col chunks_df = map_annotations_col(documents_DF.select("document","ner_chunk"), lambda x: [Annotation('chunk', a.begin, a.end, a.result, a.metadata, a.embeddings) for a in x], "document", "chunk_date", "chunk") chunks_df.select('chunk_date').show(truncate=False) ``` Now we going to normalize that chunks using the DateNormalizer. ``` date_normalizer = DateNormalizer().setInputCols('chunk_date').setOutputCol('date') date_normaliced_df = date_normalizer.transform(chunks_df) ``` We going to show how the date is normalized. ``` dateNormalizedClean = date_normaliced_df.selectExpr("ner_chunk","date.result as dateresult","date.metadata as metadata") dateNormalizedClean.withColumn("dateresult", dateNormalizedClean["dateresult"] .getItem(0)).withColumn("metadata", dateNormalizedClean["metadata"] .getItem(0)['normalized']).show(truncate=False) ``` We can configure the `anchorDateYear`,`anchorDateMonth` and `anchorDateDay` for the relatives dates. In the following example we will use as a relative date 2021/02/22, to make that possible we need to set up the `anchorDateYear` to 2020, the `anchorDateMonth` to 2 and the `anchorDateDay` to 27. I will show you the configuration with the following example. ``` date_normalizer = DateNormalizer().setInputCols('chunk_date').setOutputCol('date')\ .setAnchorDateDay(27)\ .setAnchorDateMonth(2)\ .setAnchorDateYear(2021) date_normaliced_df = date_normalizer.transform(chunks_df) dateNormalizedClean = date_normaliced_df.selectExpr("ner_chunk","date.result as dateresult","date.metadata as metadata") dateNormalizedClean.withColumn("dateresult", dateNormalizedClean["dateresult"] .getItem(0)).withColumn("metadata", dateNormalizedClean["metadata"] .getItem(0)['normalized']).show(truncate=False) ``` As you see the relatives dates like `next monday` , `today` and `next week` takes the `2021/02/22` as reference date.
github_jupyter
# Code to download The Guardian UK data and clean data for text analysis @Jorge de Leon This script allows you to download news articles that match your parameters from the Guardian newspaper, https://www.theguardian.com/us. ## Set-up ``` import os import re import glob import json import requests import pandas as pd from glob import glob from os import makedirs from textblob import TextBlob from os.path import join, exists from datetime import date, timedelta os.chdir("..") import nltk nltk.download('punkt') nltk.download('wordnet') nltk.download('stopwords') from nltk import sent_tokenize, word_tokenize from nltk.stem.snowball import SnowballStemmer from nltk.stem.wordnet import WordNetLemmatizer from nltk.corpus import stopwords ``` ## API and news articles requests This section contains the code that will be used to download articles from the Guardian website. the initial variables will be determined as user-defined parameters. ``` #Enter API and parameters - these parameters can be obtained by playing around with the Guardian API tool: # https://open-platform.theguardian.com/explore/ # Set up initial and end date start_date_global = date(2000, 1, 1) end_date_global = date(2020, 5, 17) query = "JPMorgan" term = ('stock') #Enter API key, endpoint and parameters my_api_key = open("..\\input files\\creds_guardian.txt").read().strip() api_endpoint = "http://content.guardianapis.com/search?" my_params = { 'from-date': '', 'to-date': '', 'show-fields': 'bodyText', 'q': query, 'page-size': 200, 'api-key': my_api_key } articles_dir = join('theguardian','jpmorgan') makedirs(articles_dir, exist_ok=True) # day iteration from here: # http://stackoverflow.com/questions/7274267/print-all-day-dates-between-two-dates start_date = start_date_global end_date = end_date_global dayrange = range((end_date - start_date).days + 1) for daycount in dayrange: dt = start_date + timedelta(days=daycount) datestr = dt.strftime('%Y-%m-%d') fname = join(articles_dir, datestr + '.json') if not exists(fname): # then let's download it print("Downloading", datestr) all_results = [] my_params['from-date'] = datestr my_params['to-date'] = datestr current_page = 1 total_pages = 1 while current_page <= total_pages: print("...page", current_page) my_params['page'] = current_page resp = requests.get(api_endpoint, my_params) data = resp.json() all_results.extend(data['response']['results']) # if there is more than one page current_page += 1 total_pages = data['response']['pages'] with open(fname, 'w') as f: print("Writing to", fname) # re-serialize it for pretty indentation f.write(json.dumps(all_results, indent=2)) #Read all json files that will be concatenated test_files = sorted(glob('theguardian/jpmorgan/*.json')) #intialize empty list that we will append dataframes to all_files = [] #write a for loop that will go through each of the file name through globbing and the end result will be the list #of dataframes for file in test_files: try: articles = pd.read_json(file) all_files.append(articles) except pd.errors.EmptyDataError: print('Note: filename.csv ws empty. Skipping') continue #will skip the rest of the bloc and move to next file #create dataframe with data from json files theguardian_rawdata = pd.concat(all_files, axis=0, ignore_index=True) ``` ## Text Analysis ``` #Drop empty columns theguardian_rawdata = theguardian_rawdata.iloc[:,0:12] #show types of media that was downloaded by type theguardian_rawdata['type'].unique() #filter only for articles theguardian_rawdata = theguardian_rawdata[theguardian_rawdata['type'].str.match('article',na=False)] #remove columns that do not contain relevant information for analysis theguardian_dataset = theguardian_rawdata.drop(['apiUrl','id', 'isHosted', 'pillarId', 'pillarName', 'sectionId', 'sectionName', 'type','webTitle', 'webUrl'], axis=1) #Modify the column webPublicationDate to Date and the fields to string and lower case theguardian_dataset["date"] = pd.to_datetime(theguardian_dataset["webPublicationDate"]).dt.strftime('%Y-%m-%d') theguardian_dataset['fields'] = theguardian_dataset['fields'].astype(str).str.lower() #Clean the articles from URLS, remove punctuaction and numbers. theguardian_dataset['fields'] = theguardian_dataset['fields'].str.replace('<.*?>','') # remove HTML tags theguardian_dataset['fields'] = theguardian_dataset['fields'].str.replace('[^\w\s]','') # remove punc. #Generate sentiment analysis for each article #Using TextBlob obtain polarity theguardian_dataset['sentiment_polarity'] = theguardian_dataset['fields'].apply(lambda row: TextBlob(row).sentiment.polarity) #Using TextBlob obtain subjectivity theguardian_dataset['sentiment_subjectivity'] = theguardian_dataset['fields'].apply(lambda row: TextBlob(row).sentiment.subjectivity) #Remove numbers from text theguardian_dataset['fields'] = theguardian_dataset['fields'].str.replace('\d+','') # remove numbers #Then I will tokenize each word and remover stop words theguardian_dataset['tokenized_fields'] = theguardian_dataset.apply(lambda row: nltk.word_tokenize(row['fields']), axis=1) #Stop words stop_words=set(stopwords.words("english")) #Remove stop words theguardian_dataset['tokenized_fields'] = theguardian_dataset['tokenized_fields'].apply(lambda x: [item for item in x if item not in stop_words]) #Count number of words and create a column with the most common 5 words per article from collections import Counter theguardian_dataset['high_recurrence'] = theguardian_dataset['tokenized_fields'].apply(lambda x: [k for k, v in Counter(x).most_common(5)]) #Create a word count for the word "stock" theguardian_dataset['word_ocurrence'] = theguardian_dataset['tokenized_fields'].apply(lambda x: [w for w in x if re.search(term, w)]) theguardian_dataset['word_count'] = theguardian_dataset['word_ocurrence'].apply(len) #Create a count of the total number of words theguardian_dataset['total_words'] = theguardian_dataset['tokenized_fields'].apply(len) #Create new table with average polarity, subjectivity, count of the word "stock" per day guardian_microsoft = theguardian_dataset.groupby('date')['sentiment_polarity','sentiment_subjectivity','word_count','total_words'].agg('mean') #Create a variable for the number of articles per day count_articles = theguardian_dataset count_articles['no_articles'] = count_articles.groupby(['date'])['fields'].transform('count') count_articles = count_articles[["date","no_articles"]] count_articles_df = count_articles.drop_duplicates(subset = "date", keep = "first", inplace=False) #Join tables by date guardian_microsoft = guardian_microsoft.merge(count_articles_df, on='date', how ='left') #Save dataframes into CSV theguardian_dataset.to_csv('theguardian/jpmorgan/theguardian_jpmorgan_text.csv', encoding='utf-8') guardian_microsoft.to_csv('theguardian/jpmorgan/theguardian_jpmorgan_data.csv', encoding='utf-8') ```
github_jupyter
``` import torch import matplotlib.pyplot as plt import numpy as np import seaborn as sns from pathlib import Path sns.color_palette("tab10") sns.set(rc={ "figure.dpi": 150, "text.usetex": True, "xtick.labelsize": "small", "ytick.labelsize": "small", "axes.labelsize": "small", "axes.titlesize": "small", "figure.titlesize": "medium", "axes.titlepad": 2.0, "xtick.major.pad": -4.0, #"figure.subplot.hspace": 0.0, "figure.constrained_layout.use": True, }) def attribute_to_matrix(sequences, attribute): attribute_list = [] for seq in sequences: attribute_list.append(seq[attribute]) return np.array(attribute_list) def cold_starts(init_times): num_activations = init_times.size num_cold_starts = np.sum(init_times > 0) return num_activations, num_cold_starts def app_name(filename: str) -> str: app_name = filename[filename.find(":")+4:filename.find("_fetched_")] if "_rand" in filename: app_name += "_rand" return app_name def get_index(applist: list, filename: str) -> int: return applist.index(app_name(filename)) # Number of activations vs number of cold starts. # Configuration dir = "final_high_load_n_1000" #dir = "final_batched_high_load_n_400" files = Path(f"../data/{dir}").glob('*.pkl') for f in sorted(files): print(f) with open(f, "rb") as stream: data = torch.load(stream) num_activations, num_cold_starts = cold_starts(attribute_to_matrix(data["sequences"], "init_times")) #print(f"Number of activations: {num_activations}") #print(f"Number of cold starts: {num_cold_starts}") print(f"Cold start to activation ratio: {round(num_cold_starts / num_activations, 4)}") # Average inter-event time for each step. # Configuration dir = "final_low_load_n_1000" #dir = "final_high_load_n_1000" save = False def boxplot(applist: list, file2data: dict, title: str, ylabel: str) -> plt.Figure: fig, ax = plt.subplots(len(applist), 1) fig.set_size_inches(5.5, 6.8) fig.suptitle(title) for filename, data in file2data.items(): if app_name(filename) in applist: sns.boxplot(ax=ax[get_index(applist, filename)], data=data, width=0.5, showfliers=False, linewidth=0.9) ax[get_index(applist, filename)].set_title(app_name(filename).replace("_", "\_")) ax[get_index(applist, filename)].set_ylabel(ylabel) ax[get_index(applist, filename)].set_xticklabels(list(range(1, data.shape[-1] + 1))) ax[-1].set_xlabel("i") return fig prestr = "ll_" if "high_load" in dir: prestr = "hl_" poststr = " (no cold starts)" if "high_load" in dir: poststr = " (30\% cold starts)" apps = ["sequence", "parallel_small", "tree_small", "fanout_small", "parallel_large", "tree_large", "fanout_large"] apps_rand = ["sequence_rand", "parallel_small_rand", "tree_small_rand", "fanout_small_rand", "parallel_large_rand", "tree_large_rand", "fanout_large_rand"] inter_dict = {} init_dict = {} wait_dict = {} files = Path(f"../data/{dir}").glob('*.pkl') for f in sorted(files): with open(f, "rb") as stream: data = torch.load(stream) inter_dict[str(f)] = np.diff(attribute_to_matrix(data["sequences"], "arrival_times"), axis=-1) init_dict[str(f)] = attribute_to_matrix(data["sequences"], "init_times") wait_dict[str(f)] = attribute_to_matrix(data["sequences"], "wait_times") / 1000 title = fr"Distribution of inter-event time $\tau_i${poststr}" ylabel = "ms" fig1 = boxplot(apps, inter_dict, title, ylabel) if save: plt.savefig(f"data_plots/{prestr}dist_inter.pdf") fig2 = boxplot(apps_rand, inter_dict, title, ylabel) if save: plt.savefig(f"data_plots/{prestr}dist_inter_rand.pdf") if "high_load" in dir: title = fr"Distribution of initTime $i_i${poststr}" ylabel = "ms" fig1 = boxplot(apps, init_dict, title, ylabel) if save: plt.savefig(f"data_plots/{prestr}dist_init.pdf") fig2 = boxplot(apps_rand, init_dict, title, ylabel) if save: plt.savefig(f"data_plots/{prestr}dist_init_rand.pdf") title = fr"Distribution of waitTime $w_i${poststr}" ylabel = "sec" fig1 = boxplot(apps, wait_dict, title, ylabel) if save: plt.savefig(f"data_plots/{prestr}dist_wait.pdf") fig2 = boxplot(apps_rand, wait_dict, title, ylabel) if save: plt.savefig(f"data_plots/{prestr}dist_wait_rand.pdf") # Distribution of inter-event, init and wait times. # Configuration dir = "final_low_load_n_1000" #dir = "final_high_load_n_1000" #dir = "final_batched_high_load_n_400" plot_init_times = True plot_wait_times = True save = False def compose_title(filename): def app_name(filename): app_name = filename[filename.find(":")+4:filename.find("_fetched_")] return app_name.replace("_", "\_") title = app_name(filename) if "_rand" in filename: title += "\_rand" if "_b_" in filename: title += " (30% cold starts)" return title apps = ["sequence", "parallel_small", "tree_small", "fanout_small", "parallel_large", "tree_large", "fanout_large", "sequence_rand", "parallel_small_rand", "tree_small_rand", "fanout_small_rand", "parallel_large_rand", "tree_large_rand", "fanout_large_rand"] files = Path(f"../data/{dir}").glob('*.pkl') fig_inter, ax_inter = plt.subplots(14, 1) fig_inter.set_size_inches(6.4, 18) fig_init, ax_init = plt.subplots(14, 1) fig_init.set_size_inches(6.4, 18) fig_wait, ax_wait = plt.subplots(14, 1) fig_wait.set_size_inches(6.4, 18) for i, f in enumerate(sorted(files)): with open(f, "rb") as stream: data = torch.load(stream) if "small" in str(f): numb_traces = 200 else: numb_traces = 100 inter_event_times = np.diff(attribute_to_matrix(data["sequences"], "arrival_times"), axis=-1)[:numb_traces].reshape(-1) init_times = attribute_to_matrix(data["sequences"], "init_times")[:numb_traces].reshape(-1) wait_times = attribute_to_matrix(data["sequences"], "wait_times")[:numb_traces].reshape(-1) / 1000 max_range = int(np.quantile(inter_event_times, 0.99)) ax_inter[get_index(apps, str(f))].hist(inter_event_times, bins=300, range=(0.0, max_range), log=True) ax_inter[get_index(apps, str(f))].set_xlabel("milliseconds") ax_inter[get_index(apps, str(f))].set_ylabel("count") ax_inter[get_index(apps, str(f))].title.set_text(f"Distribution over all inter-event times - {compose_title(str(f))}") if plot_init_times: max_range = int(np.quantile(init_times, 0.99)) ax_init[get_index(apps, str(f))].hist(init_times, bins=300, range=(0.0, max_range), log=True) ax_init[get_index(apps, str(f))].set_xlabel("milliseconds") ax_init[get_index(apps, str(f))].set_ylabel("count") ax_init[get_index(apps, str(f))].title.set_text(f"Distribution over all initTimes - {compose_title(str(f))}") if plot_wait_times: max_range = int(np.quantile(wait_times, 0.99)) ax_wait[get_index(apps, str(f))].hist(wait_times, bins=300, range=(0.0, max_range), log=True) ax_wait[get_index(apps, str(f))].set_xlabel("seconds") ax_wait[get_index(apps, str(f))].set_ylabel("count") ax_wait[get_index(apps, str(f))].title.set_text(f"Distribution over all waitTimes - {compose_title(str(f))}") #fig_inter.tight_layout() #fig_init.tight_layout() #fig_wait.tight_layout() if save: filename = f"{str(f)[str(f).rfind('/')+1:str(f).rfind('.pkl')]}.png" plt.savefig(filename) # Standard deviation of inter-event, init and wait time # Configuration dir = "final_low_load_n_1000" files = Path(f"../data/{dir}").glob('*.pkl') def app_name(filename): app_name = filename[filename.find(":")+4:filename.find("_fetched_")] if "rand" in filename: app_name += "_rand" return app_name for f in sorted(files): with open(f, "rb") as stream: data = torch.load(stream) inter_event_times = np.diff(attribute_to_matrix(data["sequences"], "arrival_times"), axis=-1) init_times = attribute_to_matrix(data["sequences"], "init_times") wait_times = attribute_to_matrix(data["sequences"], "wait_times") #/ 1000 print(f"app={app_name(str(f))}\n" f"std_inter_event={np.std(inter_event_times)}\n" f"std_init={np.std(init_times)}\n" f"std_wait={np.std(wait_times)}\n") ```
github_jupyter
Neuroon cross-validation ------------------------ Neuroon and PSG recordings were simultanously collected over the course of two nights. This analysis will show whether Neuroon is able to accurately classify sleep stages. The PSG classification will be a benchmark against which Neuroon performance will be tested. "The AASM Manual for te Scoring of Sleep ad Associated Events" identifies 5 sleep stages: * Stage W (Wakefulness) * Stage N1 (NREM 1) * Stage N1 (NREM 2) * Stage N1 (NREM 3) * Stage R (REM) <img src="images/sleep_stages.png"> These stages can be identified following the rules guidelines in [1] either visually or digitally using combined information from EEG, EOG and EMG. Extensive research is beeing conducted on developing automated and simpler methods for sleep stage classification suitable for everyday home use (for a review see [2]). Automatic methods based on single channel EEG, which is the Neuroon category, were shown to work accurately when compared to PSG scoring [3]. [1] Berry RB BR, Gamaldo CE, Harding SM, Lloyd RM, Marcus CL, Vaughn BV; for the American Academy of Sleep Medicine. The AASM Manual for the Scoring of Sleep and Associated Events: Rules, Terminology and Technical Specifications.,Version 2.0.3. Darien, IL: American Academy of Sleep Medicine; 2014. [2] Van De Water, A. T. M., Holmes, A., & Hurley, D. a. (2011). Objective measurements of sleep for non-laboratory settings as alternatives to polysomnography - a systematic review. Journal of Sleep Research, 20, 183–200. [3] Berthomier, C., Drouot, X., Herman-Stoïca, M., Berthomier, P., Prado, J., Bokar-Thire, D. d’Ortho, M.P. (2007). Automatic analysis of single-channel sleep EEG: validation in healthy individuals. Sleep, 30(11), 1587–1595. Signals time-synchronization using crosscorelation -------------------------------------------------- Neuroon and PSG were recorded on devices with (probably) unsycnhronized clocks. First we will use a cross-correlation method [4] to find the time offset between the two recordings. [4] Fridman, L., Brown, D. E., Angell, W., Abdić, I., Reimer, B., & Noh, H. Y. (2016). Automated synchronization of driving data using vibration and steering events. Pattern Recognition Letters, 75, 9-15. Define cross correlation function - code from: (http://lexfridman.com/blogs/research/2015/09/18/fast-cross-correlation-and-time-series-synchronization-in-python/) for other examlpes see: (http://stackoverflow.com/questions/4688715/find-time-shift-between-two-similar-waveforms) ``` %matplotlib inline %load_ext autoreload %autoreload 2 import numpy as np import matplotlib.pyplot as plt from itertools import tee import pandas as pd import seaborn as sns from numpy.fft import fft, ifft, fft2, ifft2, fftshift from collections import OrderedDict from datetime import timedelta plt.rcParams['figure.figsize'] = (9.0, 5.0) from parse_signal import load_psg, load_neuroon # Cross-correlation function. Equivalent to numpy.correlate(x,y mode = 'full') but faster for large arrays # This function was tested against other cross correlation methods in -- LINK TO OTHER NOTEBOOK def cross_correlation_using_fft(x, y): f1 = fft(x) f2 = fft(np.flipud(y)) cc = np.real(ifft(f1 * f2)) return fftshift(cc) # shift < 0 means that y starts 'shift' time steps before x # shift > 0 means that y starts 'shift' time steps after x def compute_shift(x, y): assert len(x) == len(y) c = cross_correlation_using_fft(x, y) assert len(c) == len(x) zero_index = int(len(x) / 2) - 1 shift = zero_index - np.argmax(c) return shift,c def cross_correlate(): # Load the signal from hdf database and parse it to pandas series with datetime index psg_signal = load_psg('F3-A2') neuroon_signal = load_neuroon() # Resample the signal to 100hz, to have the same length for cross correlation psg_10 = psg_signal.resample('10ms').mean() neuroon_10 = neuroon_signal.resample('10ms').mean() # Create ten minute intervals dates_range = pd.date_range(psg_signal.head(1).index.get_values()[0], neuroon_signal.tail(1).index.get_values()[0], freq="10min") # Convert datetime interval boundaries to string with only hours, minutes and seconds dates_range = [d.strftime('%H:%M:%S') for d in dates_range] all_coefs = [] # iterate over overlapping pairs of 10 minutes boundaries for start, end in pairwise(dates_range): # cut 10 minutes piece of signal neuroon_cut = neuroon_10.between_time(start, end) psg_cut = psg_10.between_time(start, end) # Compute the correlation using fft convolution shift, coeffs = compute_shift(neuroon_cut, psg_cut) #normalize the coefficients because they will be shown on the same heatmap and need a common color scale all_coefs.append((coeffs - coeffs.mean()) / coeffs.std()) #print('max corr at shift %s is at sample %i'%(start, shift)) all_coefs = np.array(all_coefs) return all_coefs, dates_range # This function is used to iterate over a list, taking two consecutive items at each iteration def pairwise(iterable): "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = tee(iterable) next(b, None) return zip(a, b) # Construct a matrix where each row represents a 10 minute window from the recording # and each column represent correlation coefficient between neuroon and psg signals offset by samples number. # 0 samples offset coefficient is stored at the middle column -1. Negative offset and positive offset span left and right from the center. # offset < 0 means that psg starts 'shift' time steps before neuroon # offset > 0 means that psg starts 'shift' time steps after neuroon coeffs_matrix, dates = cross_correlate() from plotting_collection import plot_crosscorrelation_heatmap #Plot part of the coefficients matrix centered around the max average correlation for all 10 minute windows plot_crosscorrelation_heatmap(coeffs_matrix, dates) ``` Hipnogram time-delay -------------------- From the crosscorrelation of the eeg signals we can see the two devices are off by 2 minutes 41 seconds. Now we'll see if there is a point in time where the hipnograms are most simmilar. The measure of hipnogram simmilarity will be the sum of times when two devices classified the same sleep stage. ``` import parse_hipnogram as ph def get_hipnogram_intersection(neuroon_hipnogram, psg_hipnogram, time_shift): neuroon_hipnogram.index = neuroon_hipnogram.index + timedelta(seconds = int(time_shift)) combined = psg_hipnogram.join(neuroon_hipnogram, how = 'outer', lsuffix = '_psg', rsuffix = '_neuro') combined.loc[:, ['stage_num_psg', 'stage_name_psg', 'stage_num_neuro', 'stage_name_neuro', 'event_number_psg', 'event_number_neuro']] = combined.loc[:, ['stage_num_psg', 'stage_name_psg', 'stage_num_neuro', 'stage_name_neuro', 'event_number_psg', 'event_number_neuro']].fillna( method = 'bfill') combined.loc[:, ['stage_shift_psg', 'stage_shift_neuro']] = combined.loc[:, ['stage_shift_psg', 'stage_shift_neuro']].fillna( value = 'inside') # From the occupied room number subtract the room occupied by another mouse. combined['overlap'] = combined['stage_num_psg'] - combined['stage_num_neuro'] same_stage = combined.loc[combined['overlap'] == 0] same_stage.loc[:, 'event_union'] = same_stage['event_number_psg'] + same_stage['event_number_neuro'] # common_window = np.array([neuroon_hipnogram.tail(1).index.get_values()[0] - psg_hipnogram.head(1).index.get_values()[0]],dtype='timedelta64[m]').astype(int)[0] all_durations = OrderedDict() for stage_name, intersection in same_stage.groupby('event_union'): # Subtract the first row timestamp from the last to get the duration. Store as the duration in milliseconds. duration = (intersection.index.to_series().iloc[-1]- intersection.index.to_series().iloc[0]).total_seconds() stage_id = intersection.iloc[0, intersection.columns.get_loc('stage_name_neuro')] # Keep appending results to a list stored in a dict. Check if the list exists, if not create it. if stage_id not in all_durations.keys(): all_durations[stage_id] = [duration] else: all_durations[stage_id].append(duration) means = OrderedDict() stds = OrderedDict() sums = OrderedDict() stages_sum = 0 #Adding it here so its first in ordered dict and leftmost on the plot sums['stages_sum'] = 0 for key, value in all_durations.items(): #if key != 'wake': means[key] = np.array(value).mean() stds[key] = np.array(value).std() sums[key] = np.array(value).sum() stages_sum += np.array(value).sum() sums['stages_sum'] = stages_sum # Divide total seconds by 60 to get minutes #return stages_sum return sums, means, stds def intersect_with_shift(): psg_hipnogram = ph.parse_psg_stages() neuroon_hipnogram = ph.parse_neuroon_stages() intersection = OrderedDict([('wake', []), ('rem',[]), ('N1',[]), ('N2',[]), ('N3', []), ('stages_sum', [])]) shift_range = np.arange(-500, 100, 10) for shift in shift_range: sums, _, _ = get_hipnogram_intersection(neuroon_hipnogram.copy(), psg_hipnogram.copy(), shift) for stage, intersect_dur in sums.items(): intersection[stage].append(intersect_dur) return intersection, shift_range def plot_intersection(intersection, shift_range): psg_hipnogram = ph.parse_psg_stages() neuroon_hipnogram = ph.parse_neuroon_stages() stage_color_dict = {'N1' : 'royalblue', 'N2' :'forestgreen', 'N3' : 'coral', 'rem' : 'plum', 'wake' : 'lightgrey', 'stages_sum': 'dodgerblue'} fig, axes = plt.subplots(2) zscore_ax = axes[0].twinx() for stage in ['rem', 'N2', 'N3', 'wake']: intersect_sum = np.array(intersection[stage]) z_scored = (intersect_sum - intersect_sum.mean()) / intersect_sum.std() zscore_ax.plot(shift_range, z_scored, color = stage_color_dict[stage], label = stage, alpha = 0.5, linestyle = '--') max_overlap = shift_range[np.argmax(intersection['stages_sum'])] fig.suptitle('max overlap at %i seconds offset'%max_overlap) axes[0].plot(shift_range, intersection['stages_sum'], label = 'stages sum', color = 'dodgerblue') axes[0].axvline(max_overlap, color='k', linestyle='--') axes[0].set_ylabel('time in the same sleep stage') axes[0].set_xlabel('offset in seconds') axes[0].legend(loc = 'center right') zscore_ax.grid(b=False) zscore_ax.legend() sums0, means0, stds0 = get_hipnogram_intersection(neuroon_hipnogram.copy(), psg_hipnogram.copy(), 0) # width = 0.35 ind = np.arange(5) colors_inorder = ['dodgerblue', 'lightgrey', 'forestgreen', 'coral', 'plum'] #Plot the non shifted overlaps axes[1].bar(left = ind, height = list(sums0.values()),width = width, alpha = 0.8, tick_label =list(sums0.keys()), edgecolor = 'black', color= colors_inorder) sumsMax, meansMax, stdsMax = get_hipnogram_intersection(neuroon_hipnogram.copy(), psg_hipnogram.copy(), max_overlap) # Plot the shifted overlaps axes[1].bar(left = ind +width, height = list(sumsMax.values()),width = width, alpha = 0.8, tick_label =list(sumsMax.keys()), edgecolor = 'black', color = colors_inorder) axes[1].set_xticks(ind + width) plt.tight_layout() intersection, shift_range = intersect_with_shift() plot_intersection(intersection, shift_range) ``` hipnogram analysis indicates the same direction of time delay - psg is 4 minutes 10 seconds before neuroon. The time delay is larger for the hipnograms than for the signals by 1 minute 30 seconds. Todo: * add second axis with percentages * see if the overlap increased in proportion with offset * plot parts of time corrected signals and hipnograms * add different correlation tests notebook * add spectral and pca analysis
github_jupyter
# PI-ICR analysis Created on 17 July 2019 for the ISOLTRAP experiment - V1.1 (24 June 2020): Maximum likelihood estimation was simplified based on SciPy PDF's and the CERN-ROOT6 minimizer via the iminuit package (→ great performance) - V1.2 (20 February 2021): Preparations for scientific publication and iminuit v2 update integration @author: Jonas Karthein<br> @contact: [email protected]<br> @license: MIT license ### References [1]: https://doi.org/10.1007/s00340-013-5621-0 [2]: https://doi.org/10.1103/PhysRevLett.110.082501 [3]: https://doi.org/10.1007/s10751-019-1601-z [4]: https://doi.org/10.1103/PhysRevLett.124.092502 [1] S. Eliseev, _et al._ Appl. Phys. B (2014) 114: 107.<br> [2] S. Eliseev, _et al._ Phys. Rev. Lett. 110, 082501 (2013).<br> [3] J. Karthein, _et al._ Hyperfine Interact (2019) 240: 61.<br> ### Application The code was used to analyse data for the following publications: [3] J. Karthein, _et al._ Hyperfine Interact (2019) 240: 61.<br> [4] V. Manea and J. Karthein, _et al._ Phys. Rev. Lett. 124, 092502 (2020)<br> [5] M. Mougeot, _et al._ in preparation (2020)<br> ### Introduction The following code was written to reconstruct raw Phase-Imaging Ion-Cyclotron-Resonance (PI-ICR) data, to fit PI-ICR position information and calculate a frequency using the patter 1/2 scheme described in Ref. [1] and to determine a frequency ratio between a measurement ion and a reference ion. Additionally, the code allows to analyze isomeric states separated in pattern 2. data, to fit PI-ICR position information and calculate a frequency using the patter 1/2 scheme described in Ref. [1] and to determine a frequency ratio between a measurement ion and a reference ion. Additionally, the code allows to analyze isomeric states separated in pattern 2. ### Required software and libraries The following code was written in Python 3.7. The required libraries are listed below with a rough description for their task in the code. It doesn't claim to be a full description of the library. * pandas (data storage and calculation) * numpy (calculation) * matplotlib (plotting) * scipy (PDFs, least squares estimation) * configparser (configuration file processing) * jupyter (Python notebook environment) * iminuit (CERN-ROOT6 minimizer) All packages can be fetched using pip: ``` !pip3 install --user pandas numpy matplotlib scipy configparser jupyter iminuit ``` Instead of the regular jupyter environment, one can also use CERN's SWAN service or Google Colab. ``` google_colab = False if google_colab: try: from google.colab import drive drive.mount('/content/drive') %cd /content/drive/My\ Drive/Colab/pi-icr/ except: %cd ~/cernbox/Documents/Colab/pi-icr/ ``` ### Data files Specify, whether the analysis involves one or two states separated in pattern 2 by commenting out the not applicable case in lines 10 or 11. Then enter the file paths for all your data files without the `*.txt` extension. In the following, `ioi` represents the Ion of interest, and `ref` the reference ion. ``` %config InlineBackend.figure_format ='retina' import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl import pickle, os # analysis = {'ioi_g': {},'ref': {}} analysis = {'ioi_g': {},'ioi_m': {},'ref': {}} files_ioi_g = ['data/ioi_ground/85Rb_c_000', 'data/ioi_ground/85Rb_002', 'data/ioi_ground/85Rb_004', 'data/ioi_ground/85Rb_006'] # files_ioi_m = ['data/ioi_isomer/101In_c_000', # 'data/ioi_isomer/101In_005'] files_ref = ['data/ref/133Cs_c_000', 'data/ref/133Cs_003', 'data/ref/133Cs_005', 'data/ref/133Cs_007'] latex_ioi_g = '$^{88}$Rb' # latex_ioi_m = '$^{101}$In$^m$' latex_ref = '$^{133}$Cs' ``` ### Load pre-analyzed data from file or reconstruct raw data All files are loaded and reconstructed in one big dictionary of dictionaries. It contains besides the positions and timestamps also information about the measurement conditions (excitation frequencies, rounds etc). One can load a whole beamtime at once. Center files must be indicated by a `_c_` in the name (e.g. regular name: `101In_001.txt` $\rightarrow$ center name `101In_c_000.txt`). All the data is at later stages saved in a `pickle` file. This enables quick loading of the data dictionary without the need of re-reconstructing the data. The reconstruction code is parallelized and can be found in the subfolder `bin/reconstruction.py` ``` from bin.reconstruction import PIICR piicr = PIICR() if os.path.isfile('data/data-save.p'): analysis = pickle.load(open('data/data-save.p','rb')) print('\nLoading finished!') else: for file in files_ioi_g: analysis['ioi_g'].update({file: piicr.prepare(file)}) if analysis['ioi_m'] != {}: for file in files_ioi_m: analysis['ioi_m'].update({file: piicr.prepare(file)}) for file in files_ref: analysis['ref'].update({file: piicr.prepare(file)}) print('\nReconstruction finished!') ``` ### Individual file selection The analysis dictionary contains all files. The analysis however is intended to be performed on a file-by-file basis. Please select the individual files here in the variable `file_name`. ``` # load P1, P2 and C data in panda dataframes for selected file # file_name = files_ioi_g[1] # file_name = files_ioi_m[1] # file_name = files_ref[1] file_name = files_ioi_g[3] print('Selected file:',file_name) if 'ground' in file_name: df_p1 = pd.DataFrame(analysis['ioi_g'][file_name]['p1'], columns=['event','x','y','time']) df_p2 = pd.DataFrame(analysis['ioi_g'][file_name]['p2'], columns=['event','x','y','time']) df_c = pd.DataFrame(analysis['ioi_g'][file_name.split('_0', 1)[0]+'_c_000']['c'], columns=['event','x','y','time']) elif 'isomer' in file_name: df_p1 = pd.DataFrame(analysis['ioi_m'][file_name]['p1'], columns=['event','x','y','time']) df_p2 = pd.DataFrame(analysis['ioi_m'][file_name]['p2'], columns=['event','x','y','time']) df_c = pd.DataFrame(analysis['ioi_m'][file_name.split('_0', 1)[0]+'_c_000']['c'], columns=['event','x','y','time']) else: df_p1 = pd.DataFrame(analysis['ref'][file_name]['p1'], columns=['event','x','y','time']) df_p2 = pd.DataFrame(analysis['ref'][file_name]['p2'], columns=['event','x','y','time']) df_c = pd.DataFrame(analysis['ref'][file_name.split('_0', 1)[0]+'_c_000']['c'], columns=['event','x','y','time']) ``` ### Manual space and time cut Please perform a rough manual space cut for each file to improve results on the automatic space cutting tool. This is necessary if one deals with two states in pattern two or if there is a lot of background. This selection will be ellipsoidal. Additionally, please perform a rough time of flight (ToF) cut. ``` # manual_space_cut = [x_peak_pos, x_peak_spread, y_peak_pos, y_peak_spread] manual_space_cut = {'data/ioi_ground/85Rb_002': [150, 150, 100, 150], 'data/ioi_ground/85Rb_004': [150, 150, 100, 150], 'data/ioi_ground/85Rb_006': [150, 150, 100, 150], 'data/ref/133Cs_003': [120, 150, 80, 150], 'data/ref/133Cs_005': [120, 150, 80, 150], 'data/ref/133Cs_007': [120, 150, 80, 150]} # manual_tof_cut = [tof_min, tof_max] manual_tof_cut = [20, 50] # manual_z_cut <= number of ions in the trap manual_z_cut = 5 ``` ### Automatic time and space cuts based on Gaussian distribution This section contains all cuts in time and space in different steps. 1. In the time domain contaminants are removed by fitting a gaussian distribution via maximum likelihood estimation to the largest peak in the ToF spectrum and cutting +/- 5 $\sigma$ (change cut range in lines 70 & 71). The ToF distribution has to be binned first before the maximum can be found, but the fit is performed on the unbinned data set. 2. Manual space cut is applied for pattern 1 and pattern 2 (not for the center spot) 3. Outlyers/wrongly excited ions are removed +/- 3 $\sigma$ by measures of a simple mean in x and y after applying the manual cut (change cut range in lines). 4. Ejections with more than `manual_z_cut` number of ions in the trap (without taking into account the detector efficiency) are rejected (= z-class cut) ``` %config InlineBackend.figure_format ='retina' import matplotlib as mpl from scipy.stats import norm from iminuit import Minuit # Utopia LaTeX font with greek letters mpl.rc('font', family='serif', serif='Linguistics Pro') mpl.rc('text', usetex=False) mpl.rc('mathtext', fontset='custom', rm='Linguistics Pro', it='Linguistics Pro:italic', bf='Linguistics Pro:bold') mpl.rcParams.update({'font.size': 18}) col = ['#FFCC00', '#FF2D55', '#00A2FF', '#61D935', 'k', 'grey', 'pink'] # yellow, red, blue, green df_list = [df_p1, df_p2, df_c] pattern = ['p1', 'p2', 'c'] bin_time_df = [0,0,0] # [p1,p2,c] list of dataframes containing the time-binned data result_t = [0,0,0] # [p1,p2,c] list of MLE fit result dicts cut_df = [0,0,0] # [p1,p2,c] list of dataframes containing the time- and space-cut data excludes_df = [0,0,0] # [p1,p2,c] list of dataframes containing the time- and space-cut excluded data fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(12, 15)) for df_nr in range(len(df_list)): ############################## ### BINNING, FITTING TOF DISTR ############################## bin_time_df[df_nr] = pd.DataFrame(pd.value_counts(pd.cut(df_list[df_nr].time, bins=np.arange(manual_tof_cut[0], manual_tof_cut[1],0.02))).sort_index()).rename(index=str, columns={'time': 'counts'}).reset_index(drop=True) bin_time_df[df_nr]['time'] = np.arange(manual_tof_cut[0]+0.01,manual_tof_cut[1]-0.01,0.02) # fit gaussian to time distribution using unbinned maximum likelihood estimation def NLL_1D(mean, sig): '''Negative log likelihood function for (n=1)-dimensional Gaussian distribution.''' return( -np.sum(norm.logpdf(x=data_t, loc=mean, scale=sig)) ) def Start_Par(data): '''Starting parameter based on simple mean of 1D numpy array.''' return(np.array([data.mean(), # meanx data.std()])) #rho # minimize negative log likelihood function first for the symmetric case data_t = df_list[df_nr][(df_list[df_nr].time > bin_time_df[df_nr].time[bin_time_df[df_nr].counts.idxmax()] - 1.0) & (df_list[df_nr].time < bin_time_df[df_nr].time[bin_time_df[df_nr].counts.idxmax()] + 1.0)].time.to_numpy() result_t[df_nr] = Minuit(NLL_1D, mean=Start_Par(data_t)[0], sig=Start_Par(data_t)[1]) result_t[df_nr].errors = (0.1, 0.1) # initital step size result_t[df_nr].limits =[(None, None), (None, None)] # fit ranges result_t[df_nr].errordef = Minuit.LIKELIHOOD # MLE definition (instead of Minuit.LEAST_SQUARES) result_t[df_nr].migrad() # finds minimum of mle function result_t[df_nr].hesse() # computes errors for p in result_t[df_nr].parameters: print("{} = {:3.5f} +/- {:3.5f}".format(p, result_t[df_nr].values[p], result_t[df_nr].errors[p])) ############################## ### VISUALIZE TOF DISTRIBUTION # kind='bar' is VERY time consuming -> use kind='line' instead! ############################## # whole distribution bin_time_df[df_nr].plot(x='time', y='counts', kind='line', xticks=np.arange(manual_tof_cut[0],manual_tof_cut[1]+1,5), ax=axes[df_nr,0]) # reduced peak plus fit bin_time_df[df_nr][bin_time_df[df_nr].counts.idxmax()-50:bin_time_df[df_nr].counts.idxmax()+50].plot(x='time', y='counts', kind='line', ax=axes[df_nr,1]) pdf_x = np.arange(bin_time_df[df_nr].time[bin_time_df[df_nr].counts.idxmax()-50], bin_time_df[df_nr].time[bin_time_df[df_nr].counts.idxmax()+51], (bin_time_df[df_nr].time[bin_time_df[df_nr].counts.idxmax()+51] -bin_time_df[df_nr].time[bin_time_df[df_nr].counts.idxmax()-50])/100) pdf_y = norm.pdf(pdf_x, result_t[df_nr].values['mean'], result_t[df_nr].values['sig']) axes[df_nr,0].plot(pdf_x, pdf_y/pdf_y.max()*bin_time_df[df_nr].counts.max(), 'r', label='PDF') axes[df_nr,1].plot(pdf_x, pdf_y/pdf_y.max()*bin_time_df[df_nr].counts.max(), 'r', label='PDF') # mark events in t that will be cut away (+/- 3 sigma = 99.73% of data) bin_time_df[df_nr][(bin_time_df[df_nr].time < result_t[df_nr].values['mean'] - 3*result_t[df_nr].values['sig']) | (bin_time_df[df_nr].time > result_t[df_nr].values['mean'] + 3*result_t[df_nr].values['sig'])].plot(x='time', y='counts', kind='scatter', ax=axes[df_nr,0], c='y', marker='x', s=50, label='excluded') bin_time_df[df_nr][(bin_time_df[df_nr].time < result_t[df_nr].values['mean'] - 3*result_t[df_nr].values['sig']) | (bin_time_df[df_nr].time > result_t[df_nr].values['mean'] + 3*result_t[df_nr].values['sig'])].plot(x='time', y='counts', kind='scatter', ax=axes[df_nr,1], c='y', marker='x', s=50, label='excluded') # legend title shows total number of events and reduced number of events axes[df_nr,0].legend(title='total: {}'.format(bin_time_df[df_nr].counts.sum()),loc='upper right', fontsize=16) axes[df_nr,1].legend(title='considered: {}'.format(bin_time_df[df_nr].counts.sum()-bin_time_df[df_nr][(bin_time_df[df_nr].time < result_t[df_nr].values['mean'] - 3*result_t[df_nr].values['sig']) | (bin_time_df[df_nr].time > result_t[df_nr].values['mean'] + 3*result_t[df_nr].values['sig'])].counts.sum()),loc='upper left', fontsize=16) ############################## ### APPYING ALL CUTS ############################## # cutting in t: mean +/- 5 sigma cut_df[df_nr] = df_list[df_nr][(df_list[df_nr].time > (result_t[df_nr].values['mean'] - 5*result_t[df_nr].values['sig']))& (df_list[df_nr].time < (result_t[df_nr].values['mean'] + 5*result_t[df_nr].values['sig']))] len1 = cut_df[df_nr].shape[0] # applying manual cut in x and y: if df_nr < 2: # only for p1 and p2, not for c cut_df[df_nr] = cut_df[df_nr][((cut_df[df_nr].x-manual_space_cut[file_name][0])**2 + (cut_df[df_nr].y-manual_space_cut[file_name][2])**2) < manual_space_cut[file_name][1]*manual_space_cut[file_name][3]] len2 = cut_df[df_nr].shape[0] # applyig automatic cut in x and y: mean +/- 3 std in an ellipsoidal cut cut_df[df_nr] = cut_df[df_nr][((cut_df[df_nr].x-cut_df[df_nr].x.mean())**2 + (cut_df[df_nr].y-cut_df[df_nr].y.mean())**2) < 3*cut_df[df_nr].x.std()*3*cut_df[df_nr].y.std()] len3 = cut_df[df_nr].shape[0] # applying automatic z-class-cut (= cut by number of ions per event) for z>5 ions per event to reduce space-charge effects: cut_df[df_nr] = cut_df[df_nr][cut_df[df_nr].event.isin(cut_df[df_nr].event.value_counts()[cut_df[df_nr].event.value_counts() <= 6].index)] # printing the reduction of the number of ions per file in each of the cut steps print('\n{}: data size: {} -> time cut: {} -> manual space cut: {} -> automatic space cut: {} -> z-class-cut: {}\n'.format(pattern[df_nr], df_list[df_nr].shape[0], len1, len2, len3, cut_df[df_nr].shape[0])) # saves excluded data (allows visual checking later) excludes_df[df_nr] = pd.concat([df_list[df_nr], cut_df[df_nr]]).drop_duplicates(keep=False).reset_index(drop=True) plt.savefig('{}-tof.pdf'.format(file_name)) plt.show() ``` ### Spot fitting 2D multivariate gaussian maximum likelihood estimations of the cleaned pattern 1, pattern 2 and center spot positions are performed SciPy PDF's and ROOT's minimizer. Displayed are all uncut data with a blue-transparent point. This allows displaying a density of points by the shade of blue without the need of binning the data (= reducing the information; also: binning is much more time-consuming). The cut data is displayed with a black "x" at the position of the blue point. These points are not considered in the fit (represented by the red (6-$\sigma$ band) but allow for an additional check of the cutting functions. The scale of the MCP-position plots is given in the time unit of the position-sensitive MCP data. There is no need in converting it into a mm-unit since one is only interested in the angle. ``` %config InlineBackend.figure_format ='retina' # activate interactive matplotlib plot -> uncomment line below! # %matplotlib notebook import pickle, os from scipy.stats import multivariate_normal, linregress, pearsonr from scipy.optimize import minimize import numpy as np from iminuit import Minuit # open preanalyzed dataset if existing if os.path.isfile('data/data-save.p'): analysis = pickle.load(open('data/data-save.p','rb')) df_list = [df_p1, df_p2, df_c] result = [{},{},{}] root_res = [0,0,0] parameters = ['meanx', 'meany', 'sigx', 'sigy', 'theta'] fig2, axes2 = plt.subplots(nrows=3, ncols=1, figsize=(7.5, 20)) piicr_scheme_names = ['p1','p2','c'] ############################## ### Prepare maximum likelihood estimation ############################## def Rot(theta): '''Rotation (matrix) of angle theta to cartesian coordinates.''' return np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) def NLL_2D(meanx, meany, sigx, sigy, theta): '''Negative log likelihood function for (n=2)-dimensional Gaussian distribution for Minuit.''' cov = Rot(theta) @ np.array([[np.power(sigx,2),0],[0,np.power(sigy,2)]]) @ Rot(theta).T return( -np.sum(multivariate_normal.logpdf(x=data, mean=np.array([meanx, meany]), cov=cov, allow_singular=True)) ) def NLL_2D_scipy(param): '''Negative log likelihood function for (n=2)-dimensional Gaussian distribution for SciPy.''' meanx, meany, sigx, sigy, theta = param cov = Rot(theta) @ np.array([[np.power(sigx,2),0],[0,np.power(sigy,2)]]) @ Rot(theta).T return( -np.sum(multivariate_normal.logpdf(x=data, mean=np.array([meanx, meany]), cov=cov, allow_singular=True)) ) def Start_Par(data): '''Starting parameter based on simple linear regression and 2D numpy array.''' # simple linear regression to guess the rotation angle based on slope slope, intercept, r_value, p_value, std_err = linregress(data[:, 0], data[:, 1]) theta_guess = -np.arctan(slope) # data rotated based on theta guess data_rotated_guess = np.dot(Rot(theta_guess), [data[:,0], data[:,1]]) first_guess = np.array([data[:,0].mean()+0.2, # meanx data[:,1].mean()+0.2, # meany data_rotated_guess[1].std(), # sigma-x data_rotated_guess[0].std(), # sigma-y theta_guess]) # rot. angle based on slope of lin. reg. # based on a first guess, a minimization based on a robust simplex is performed start_par = minimize(NLL_2D_scipy, first_guess, method='Nelder-Mead') return(start_par['x']) ############################## ### Fitting and visualization of P1, P2, C ############################## for df_nr in range(len(df_list)): # minimize negative log likelihood function first for the symmetric case data = cut_df[df_nr][['x', 'y']].to_numpy() root_res[df_nr] = Minuit(NLL_2D, meanx=Start_Par(data)[0], meany=Start_Par(data)[1], sigx=Start_Par(data)[2], sigy=Start_Par(data)[3], theta=Start_Par(data)[4]) root_res[df_nr].errors = (0.1, 0.1, 0.1, 0.1, 0.1) # initital step size root_res[df_nr].limits =[(None, None), (None, None), (None, None), (None, None), (None, None)] # fit ranges root_res[df_nr].errordef = Minuit.LIKELIHOOD # MLE definition (instead of Minuit.LEAST_SQUARES) root_res[df_nr].migrad() # finds minimum of mle function root_res[df_nr].hesse() # computes errors # plotting of data, excluded data, reference MCP circle, and fit results axes2[df_nr].plot(df_list[df_nr].x.to_numpy(),df_list[df_nr].y.to_numpy(),'o',alpha=0.15,label='data',zorder=0) axes2[df_nr].plot(excludes_df[df_nr].x.to_numpy(), excludes_df[df_nr].y.to_numpy(), 'x k', label='excluded data',zorder=1) mcp_circ = mpl.patches.Ellipse((0,0), 1500, 1500, edgecolor='k', fc='None', lw=2) axes2[df_nr].add_patch(mcp_circ) axes2[df_nr].scatter(root_res[df_nr].values['meanx'], root_res[df_nr].values['meany'], marker='o', color=col[1], linewidth=0, zorder=2) sig = mpl.patches.Ellipse((root_res[df_nr].values['meanx'], root_res[df_nr].values['meany']), 3*root_res[df_nr].values['sigx'], 3*root_res[df_nr].values['sigy'], np.degrees(root_res[df_nr].values['theta']), edgecolor=col[1], fc='None', lw=2, label='6-$\sigma$ band (fit)', zorder=2) axes2[df_nr].add_patch(sig) axes2[df_nr].legend(title='fit(x) = {:1.0f}({:1.0f})\nfit(y) = {:1.0f}({:1.0f})'.format(root_res[df_nr].values['meanx'],root_res[df_nr].errors['meanx'], root_res[df_nr].values['meany'],root_res[df_nr].errors['meany']), loc='lower left', fontsize=14) axes2[df_nr].axis([-750,750,-750,750]) axes2[df_nr].grid(True) axes2[df_nr].text(-730, 660, '{}: {}'.format(file_name.split('/',1)[-1], piicr_scheme_names[df_nr])) plt.tight_layout() # save fit information for each parameter: # 'parameter': [fitresult, fiterror, Hesse-covariance matrix] for i in range(len(parameters)): result[df_nr].update({'{}'.format(parameters[i]): [np.array(root_res[df_nr].values)[i], np.array(root_res[df_nr].errors)[i], root_res[df_nr].covariance]}) if 'ground' in file_name: analysis['ioi_g'][file_name]['fit-{}'.format(piicr_scheme_names[df_nr])] = result[df_nr] elif 'isomer' in file_name: analysis['ioi_m'][file_name]['fit-{}'.format(piicr_scheme_names[df_nr])] = result[df_nr] else: analysis['ref'][file_name]['fit-{}'.format(piicr_scheme_names[df_nr])] = result[df_nr] plt.savefig('{}-fit.pdf'.format(file_name)) plt.show() # save all data using pickle pickle.dump(analysis, open('data/data-save.p','wb')) ``` --- # !!! <font color='red'>REPEAT</font> CODE ABOVE FOR ALL INDIVIDUAL FILES !!! --- <br> <br> ### Save fit data to dataframe and *.csv file <br>Continue here after analyzing all files individually. The following command saves all necessary data and fit information in a `*.csv` file. ``` calc_df = pd.DataFrame() for key in analysis.keys(): for subkey in analysis[key].keys(): if '_c_' not in subkey: calc_df = calc_df.append(pd.DataFrame({'file': subkey, 'p1_x': analysis[key][subkey]['fit-p1']['meanx'][0], 'p1_y': analysis[key][subkey]['fit-p1']['meany'][0], 'p2_x': analysis[key][subkey]['fit-p2']['meanx'][0], 'p2_y': analysis[key][subkey]['fit-p2']['meany'][0], 'c_x': analysis[key][subkey]['fit-c']['meanx'][0], 'c_y': analysis[key][subkey]['fit-c']['meany'][0], 'p1_x_unc': analysis[key][subkey]['fit-p1']['meanx'][1], 'p1_y_unc': analysis[key][subkey]['fit-p1']['meany'][1], 'p2_x_unc': analysis[key][subkey]['fit-p2']['meanx'][1], 'p2_y_unc': analysis[key][subkey]['fit-p2']['meany'][1], 'c_x_unc': analysis[key][subkey]['fit-c']['meanx'][1], 'c_y_unc': analysis[key][subkey]['fit-c']['meany'][1], 'cyc_freq_guess': analysis[key][subkey]['cyc_freq'], 'red_cyc_freq': analysis[key][subkey]['red_cyc_freq'], 'mag_freq': analysis[key][subkey]['mag_freq'], 'cyc_acc_time': analysis[key][subkey]['cyc_acc_time'], 'n_acc': analysis[key][subkey]['n_acc'], 'time_start': pd.to_datetime('{} {}'.format(analysis[key][subkey]['time-info'][0], analysis[key][subkey]['time-info'][1]), format='%m/%d/%Y %H:%M:%S', errors='ignore'), 'time_end': pd.to_datetime('{} {}'.format(analysis[key][subkey]['time-info'][2], analysis[key][subkey]['time-info'][3]), format='%m/%d/%Y %H:%M:%S', errors='ignore')}, index=[0]), ignore_index=True) calc_df.to_csv('data/analysis-summary.csv') calc_df ``` ### Calculate $\nu_c$ from position fits [1]: https://doi.org/10.1007/s00340-013-5621-0 [2]: https://doi.org/10.1103/PhysRevLett.110.082501 [3]: https://doi.org/10.1007/s10751-019-1601-z Can be run independently from everything above by loading the `analysis-summary.csv` file!<br> A detailed description of the $\nu_c$ calculation can be found in Ref. [1], [2] and [3]. ``` import pandas as pd import numpy as np # load fit-data file, datetime has to be converted calc_df = pd.read_csv('data/analysis-summary.csv', header=0, index_col=0) # calculate angle between the P1-vector (P1_x/y - C_x/y) and the P2-vector (P2_x/y - C_x/y) calc_df['p1p2_angle'] = np.arctan2(calc_df.p1_y - calc_df.c_y, calc_df.p1_x - calc_df.c_x) \ - np.arctan2(calc_df.p2_y - calc_df.c_y, calc_df.p2_x - calc_df.c_x) # calculate the uncertainty on the angle between the P1/P2 vectors # see https://en.wikipedia.org/wiki/Atan2 calc_df['p1p2_angle_unc'] = np.sqrt( ( calc_df.p1_x_unc * (calc_df.c_y - calc_df.p1_y) / ( (calc_df.p1_x - calc_df.c_x)**2 + (calc_df.p1_y - calc_df.c_y)**2 ) )**2 + ( calc_df.p1_y_unc * (calc_df.p1_x - calc_df.c_x) / ( (calc_df.p1_x - calc_df.c_x)**2 + (calc_df.p1_y - calc_df.c_y)**2 ) )**2 + ( calc_df.p2_x_unc * (calc_df.c_y - calc_df.p2_y) / ( (calc_df.p2_x - calc_df.c_x)**2 + (calc_df.p2_y - calc_df.c_y)**2 ) )**2 + ( calc_df.p2_y_unc * (calc_df.p2_x - calc_df.c_x) / ( (calc_df.p2_x - calc_df.c_x)**2 + (calc_df.p2_y - calc_df.c_y)**2 ) )**2 + ( calc_df.c_x_unc * ( -(calc_df.c_y - calc_df.p1_y) / ( (calc_df.p1_x - calc_df.c_x)**2 + (calc_df.p1_y - calc_df.c_y)**2 ) -(calc_df.c_y - calc_df.p2_y) / ( (calc_df.p2_x - calc_df.c_x)**2 + (calc_df.p2_y - calc_df.c_y)**2 ) ) )**2 + ( calc_df.c_y_unc * ( (calc_df.p1_x - calc_df.c_x) / ( (calc_df.p1_x - calc_df.c_x)**2 + (calc_df.p1_y - calc_df.c_y)**2 ) +(calc_df.p2_x - calc_df.c_x) / ( (calc_df.p2_x - calc_df.c_x)**2 + (calc_df.p2_y - calc_df.c_y)**2 ) ) )**2 ) # calculate cyc freq: total phase devided by total time calc_df['cyc_freq'] = (calc_df.p1p2_angle + 2*np.pi * calc_df.n_acc) / (2*np.pi * calc_df.cyc_acc_time * 0.000001) calc_df['cyc_freq_unc'] = calc_df.p1p2_angle_unc / (2*np.pi * calc_df.cyc_acc_time * 0.000001) calc_df.to_csv('data/analysis-summary.csv') calc_df.head() ``` ### Frequency-ratio calculation [1]: https://doi.org/10.1007/s00340-013-5621-0 [2]: https://doi.org/10.1103/PhysRevLett.110.082501 [3]: https://doi.org/10.1007/s10751-019-1601-z In order to determine the frequency ratio between the ioi and the ref, simultaneous fits of all for the data set possible polynomial degrees are performed. The code calculates the reduced $\chi^2_{red}$ for each fit and returns only the one with a $\chi^2_{red}$ closest to 1. A detailed description of the procedure can be found in Ref. [3]. If problems in the fitting occur, please try to vary the starting parameter section in lines 125-135 of `~/bin/freq_ratio.py` ``` import pandas as pd import numpy as np from bin.freq_ratio import Freq_ratio freq = Freq_ratio() # load fit-data file calc_df = pd.read_csv('data/analysis-summary.csv', header=0, index_col=0) # save average time of measurement: t_start+(t_end-t_start)/2 calc_df.time_start = pd.to_datetime(calc_df.time_start) calc_df.time_end = pd.to_datetime(calc_df.time_end) calc_df['time'] = calc_df.time_start + (calc_df.time_end - calc_df.time_start)/2 calc_df.to_csv('data/analysis-summary.csv') # convert avg.time to difference in minutes from first measurement -> allows fitting with small number as x value calc_df['time_delta'] = ((calc_df['time']-calc_df['time'].min())/np.timedelta64(1, 's')/60) # selecting data for isotopes df_ioi_g = calc_df[calc_df.file.str.contains('ground')][['time_delta','cyc_freq','cyc_freq_unc','time','file']] df_ioi_m = calc_df[calc_df.file.str.contains('isomer')][['time_delta','cyc_freq','cyc_freq_unc','time','file']] # allows to define a subset of reference frequencies for ground and isomer df_ref_g = calc_df[calc_df.file.str.contains('ref')][['time_delta','cyc_freq','cyc_freq_unc','time','file']] df_ref_m = calc_df[calc_df.file.str.contains('ref')][['time_delta','cyc_freq','cyc_freq_unc','time','file']] # simultaneous polynomial fit, see https://doi.org/10.1007/s10751-019-1601-z fit1, fit2, ratio1, ratio_unc1, chi_sq1 = freq.ratio_sim_fit(['ref', 'ioi_g'], df_ref_g.time_delta.tolist(), df_ref_g.cyc_freq.tolist(), df_ref_g.cyc_freq_unc.tolist(), df_ioi_g.time_delta.tolist(), df_ioi_g.cyc_freq.tolist(), df_ioi_g.cyc_freq_unc.tolist()) if len(df_ioi_m) > 0: fit3, fit4, ratio2, ratio_unc2, chi_sq2 = freq.ratio_sim_fit(['ref', 'ioi_m'], df_ref_m.time_delta.tolist(), df_ref_m.cyc_freq.tolist(), df_ref_m.cyc_freq_unc.tolist(), df_ioi_m.time_delta.tolist(), df_ioi_m.cyc_freq.tolist(), df_ioi_m.cyc_freq_unc.tolist()) ``` ### Frequency-ratio plotting ``` %config InlineBackend.figure_format ='retina' import matplotlib.pyplot as plt import matplotlib as mpl import pandas as pd import numpy as np mpl.rc('font', family='serif', serif='Linguistics Pro') # open source Utopia LaTeX font with greek letters mpl.rc('text', usetex=False) mpl.rc('mathtext', fontset='custom', rm='Linguistics Pro', it='Linguistics Pro:italic', bf='Linguistics Pro:bold') mpl.rcParams.update({'font.size': 18}) # prepare fit data x1 = np.linspace(min([df_ioi_g.time_delta.min(),df_ref_g.time_delta.min()]),max([df_ioi_g.time_delta.max(),df_ref_g.time_delta.max()]),500) t1 = pd.date_range(pd.Series([df_ioi_g.time.min(),df_ref_g.time.min()]).min(),pd.Series([df_ioi_g.time.max(),df_ref_g.time.max()]).max(),periods=500) if len(df_ioi_m) > 0: x2 = np.linspace(min([df_ioi_m.time_delta.min(),df_ref_m.time_delta.min()]),max([df_ioi_m.time_delta.max(),df_ref_m.time_delta.max()]),500) t2 = pd.date_range(pd.Series([df_ioi_m.time.min(),df_ref_m.time.min()]).min(),pd.Series([df_ioi_m.time.max(),df_ref_m.time.max()]).max(),periods=500) fit1_y = [np.polyval(fit1, i) for i in x1] fit2_y = [np.polyval(fit2, i) for i in x1] if len(df_ioi_m) > 0: fit3_y = [np.polyval(fit3, i) for i in x2] fit4_y = [np.polyval(fit4, i) for i in x2] ######################### ### PLOTTING ground state ######################### if len(df_ioi_m) > 0: fig, (ax1, ax3) = plt.subplots(figsize=(9,12),nrows=2, ncols=1) else: fig, ax1 = plt.subplots(figsize=(9,6),nrows=1, ncols=1) ax1.errorbar(df_ref_g.time, df_ref_g.cyc_freq, yerr=df_ref_g.cyc_freq_unc, fmt='o', label='{}'.format(latex_ref), marker='d', c='#1E77B4', ms=10, elinewidth=2.5) ax1.set_xlabel('Time', fontsize=24, fontweight='bold') # Make the y-axis label, ticks and tick labels match the line color. ax1.set_ylabel('Frequency (Hz)', fontsize=24, fontweight='bold') ax1.tick_params('y', colors='#1E77B4') ax1.plot(t1, fit1_y, ls=(5.5, (5, 1, 1, 1, 1, 1, 1, 1)),c='#1E77B4', label='poly-fit') # Allowing two axes in one subplot ax2 = ax1.twinx() ax2.errorbar(df_ioi_g.time, df_ioi_g.cyc_freq, yerr=df_ioi_g.cyc_freq_unc, fmt='o', color='#D62728', label='{}'.format(latex_ioi_g), fillstyle='none', ms=10, elinewidth=2.5) # green: #2ca02c ax2.tick_params('y', colors='#D62728') ax2.plot(t1, fit2_y, ls=(0, (5, 3, 1, 3)),c='#D62728', label='poly-fit') # adjust the y axes to be the same height middle_y1 = df_ref_g.cyc_freq.min() + (df_ref_g.cyc_freq.max() - df_ref_g.cyc_freq.min())/2 middle_y2 = df_ioi_g.cyc_freq.min() + (df_ioi_g.cyc_freq.max() - df_ioi_g.cyc_freq.min())/2 range_y1 = df_ref_g.cyc_freq.max() - df_ref_g.cyc_freq.min() + 2 * df_ref_g.cyc_freq_unc.max() range_y2 = df_ioi_g.cyc_freq.max() - df_ioi_g.cyc_freq.min() + 2 * df_ioi_g.cyc_freq_unc.max() ax1.set_ylim(middle_y1 - 1.3 * max([range_y1, middle_y1*range_y2/middle_y2])/2, middle_y1 + 1.1 * max([range_y1, middle_y1*range_y2/middle_y2])/2) # outliers only ax2.set_ylim(middle_y2 - 1.1 * max([middle_y2*range_y1/middle_y1, range_y2])/2, middle_y2 + 1.3 * max([middle_y2*range_y1/middle_y1, range_y2])/2) # most of the data # plotting only hours without the date ax2.xaxis.set_major_formatter(mpl.dates.DateFormatter('%H:%M')) ax2.xaxis.set_minor_locator(mpl.dates.HourLocator()) handles1, labels1 = ax1.get_legend_handles_labels() handles2, labels2 = ax2.get_legend_handles_labels() handles_g = [handles1[1], handles2[1], (handles1[0], handles2[0])] labels_g = [labels1[1], labels2[1], labels1[0]] plt.legend(handles=handles_g, labels=labels_g,fontsize=18,title='Ratio: {:1.10f}\n $\\pm${:1.10f}'.format(ratio1, ratio_unc1), loc='upper right') plt.text(0.03,0.03,'poly-{}: $\chi^2_{{red}}$ {:3.2f}'.format(len(fit1)-1, chi_sq1),transform=ax1.transAxes) ########################### ### PLOTTING isomeric state ########################### if len(df_ioi_m) > 0: ax3.errorbar(df_ref_m.time, df_ref_m.cyc_freq, yerr=df_ref_m.cyc_freq_unc, fmt='o', label='{}'.format(latex_ref), marker='d', c='#1E77B4', ms=10, elinewidth=2.5) ax3.set_xlabel('Time', fontsize=24, fontweight='bold') # Make the y-axis label, ticks and tick labels match the line color. ax3.set_ylabel('Frequency (Hz)', fontsize=24, fontweight='bold') ax3.tick_params('y', colors='#1E77B4') ax3.plot(t2, fit3_y, ls=(5.5, (5, 1, 1, 1, 1, 1, 1, 1)),c='#1E77B4', label='poly-fit') # Allowing two axes in one subplot ax4 = ax3.twinx() ax4.errorbar(df_ioi_m.time, df_ioi_m.cyc_freq, yerr=df_ioi_m.cyc_freq_unc, fmt='o', color='#D62728', label='{}'.format(latex_ioi_m), fillstyle='none', ms=10, elinewidth=2.5) # green: #2ca02c ax4.tick_params('y', colors='#D62728') ax4.plot(t2, fit4_y, ls=(0, (5, 3, 1, 3)),c='#D62728', label='poly-fit') # adjust the y axes to be the same height middle_y3 = df_ref_m.cyc_freq.min() + (df_ref_m.cyc_freq.max() - df_ref_m.cyc_freq.min())/2 middle_y4 = df_ioi_m.cyc_freq.min() + (df_ioi_m.cyc_freq.max() - df_ioi_m.cyc_freq.min())/2 range_y3 = df_ref_m.cyc_freq.max() - df_ref_m.cyc_freq.min() + 2 * df_ref_m.cyc_freq_unc.max() range_y4 = df_ioi_m.cyc_freq.max() - df_ioi_m.cyc_freq.min() + 2 * df_ioi_m.cyc_freq_unc.max() ax3.set_ylim(middle_y3 - 1.3 * max([range_y3, middle_y3*range_y4/middle_y4])/2, middle_y3 + 1.1 * max([range_y3, middle_y3*range_y4/middle_y4])/2) # outliers only ax4.set_ylim(middle_y4 - 1.1 * max([middle_y4*range_y3/middle_y3, range_y4])/2, middle_y4 + 1.3 * max([middle_y4*range_y3/middle_y3, range_y4])/2) # most of the data # plotting only hours without the date ax4.xaxis.set_major_formatter(mpl.dates.DateFormatter('%H:%M')) ax4.xaxis.set_minor_locator(mpl.dates.HourLocator()) handles3, labels3 = ax3.get_legend_handles_labels() handles4, labels4 = ax4.get_legend_handles_labels() handles_m = [handles3[1], handles4[1], (handles3[0], handles4[0])] labels_m = [labels3[1], labels4[1], labels3[0]] plt.legend(handles=handles_m, labels=labels_m, fontsize=18,title='Ratio: {:1.10f}\n $\\pm${:1.10f}'.format(ratio2, ratio_unc2), loc='upper right') plt.text(0.03,0.03,'poly-{}: $\chi^2_{{red}}$ {:3.2f}'.format(len(fit3)-1, chi_sq2),transform=ax3.transAxes) plt.tight_layout() plt.savefig('data/freq-ratios.pdf') plt.show() ```
github_jupyter
# Introdution to Jupyter Notebooks and Text Processing in Python This 'document' is a Jupyter notebook. It allows you to combine explanatory **text** and **code** that executes to produce results you can see on the same page. ## Notebook Basics ### Text cells The box this text is written in is called a *cell*. It is a *text cell* written in a very simple markup language called 'Markdown'. Here is a useful [Markdown cheatsheet](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet). You can edit and then run cells to produce a result. Running this text cell produces formatted text. ### Code cells The other main kind of cell is a *code cell*. The cell immediately below this one is a code cell. Running a code cell runs the code in the cell and produces a result. ``` # This is a comment in a code cell. Comments start with a # symbol. They are ignored and do not do anything. # This box is a code cell. When this cell is run, the code below will execute and produce a result 3 + 4 ``` ## Simple String Manipulation in Python This section introduces some very basic things you can do in Python to create and manipulate *strings*. A string is a simple sequence of characters, like `flabbergast`. This introduction is limited to those things that may be useful to know in order to understand the *Bughunt!* data mining in the following two notebooks. ### Creating and Storing Strings in Variables Strings are simple to create in Python. You can simply write some characters in quote marks. ``` 'Butterflies are important as pollinators.' ``` In order to do something useful with this string, other than print it out, we need to store in a *variable* by using the assignment operator `=` (equals sign). Whatever is on the right-hand side of the `=` is stored into a variable with the name on the left-hand side. ``` # my_variable is the variable on the left # 'manuscripts' is the string on the right that is stored in the variable my_variable my_variable = 'Butterflies are important as pollinators.' ``` Notice that nothing is printing to the screen. That's because the string is stored in the variable `my_variable`. In order to see what is inside the variable `my_variable` we can simply write `my_variable` in a code cell, run it, and the interpreter will print it out for us. ``` my_variable ``` ### Manipulating Bits of Strings #### Accessing Individual Characters A strings is just a sequence (or list) of characters. You can access **individual characters** in a string by specifying which ones you want in square brackets. If you want the first character you specify `1`. ``` my_variable[1] ``` Hang on a minute! Why did it give us `u` instead of `B`? In programming, everything tends to be *zero indexed*, which means that things are counted from 0 rather than 1. Thus, in the example above, `1` gives us the *second* character in the string. If you want the first character in the string, you need to specify the index `0`! ``` my_variable[0] ``` #### Accessing a Range of Characters You can also pick out a **range of characters** from within a string, by giving the *start index* followed by the *end index* with a semi-colon (`:`) in between. The example below gives us the character at index `0` all the way up to, *but not including*, the character at index `20`. ``` my_variable[0:20] ``` ### Changing Whole Strings with Functions Python has some built-in *functions* that allow you to change a whole string at once. You can change all characters to lowercase or uppercase: ``` my_variable.lower() my_variable.upper() ``` NB: These functions do not change the original string but create a new one. Our original string is still the same as it was before: ``` my_variable ``` ### Testing Strings You can also test a string to see if it is passes some test, e.g. is the string all alphabetic characters only? ``` my_variable.isalpha() ``` Does the string have the letter `p` in it? ``` 'p' in my_variable ``` ### Lists of Strings Another important thing we can do with strings is creating a list of strings by listing them inside square brackets `[]`: ``` my_list = ['Butterflies are important as pollinators', 'Butterflies feed primarily on nectar from flowers', 'Butterflies are widely used in objects of art'] my_list ``` ### Manipulating Lists of Strings Just like with strings, we can access individual items inside a list by index number: ``` my_list[0] ``` And we can access a range of items inside a list by *slicing*: ``` my_list[0:2] ``` ### Advanced: Creating Lists of Strings with List Comprehensions We can create new lists in an elegant way by combining some of the things we have covered above. Here is an example where we have taken our original list `my_list` and created a new list `new_list` by going over each string in the list: ``` new_list = [string for string in my_list] new_list ``` Why do this? If we combine it with a test, we can have a list that only contains strings with the letter `p` in them: ``` new_list_p = [string for string in my_list if 'p' in string] new_list_p ``` This is a very powerful way to quickly create lists. We can even change all the strings to uppercase at the same time! ``` new_list_p_upper = [string.upper() for string in my_list if 'p' in string] new_list_p_upper ```
github_jupyter
<font size="+1">This notebook will illustrate how to access DeepLabCut(DLC) results for IBL sessions and how to create short videos with DLC labels printed onto, as well as wheel angle, starting by downloading data from the IBL flatiron server. It requires ibllib, a ONE account and the following script: https://github.com/int-brain-lab/iblapps/blob/master/DLC_labeled_video.py</font> ``` run '/home/mic/Dropbox/scripts/IBL/DLC_labeled_video.py' one = ONE() ``` Let's first find IBL ephys sessions with DLC results: ``` eids= one.search(task_protocol='ephysChoiceworld', dataset_types=['camera.dlc'], details=False) len(eids) ``` For a particular session, we can create a short labeled video by calling the function Viewer, specifying the eid of the desired session, the video type (there's 'left', 'right' and 'body' videos), and a range of trials for which the video should be created. Most sesions have around 700 trials. In the following, this is illustrated with session '3663d82b-f197-4e8b-b299-7b803a155b84', video type 'left', trials range [10,13] and without a zoom for the eye, such that nose, paw and tongue tracking is visible. The eye-zoom option shows only the four points delineating the pupil edges, which are too small to be visible in the normal view. Note that this automatically starts the download of the video from flatiron (in case it is not locally stored already), which may take a while since these videos are about 8 GB large. ``` eid = eids[6] Viewer(eid, 'left', [10,13], save_video=True, eye_zoom=False) ``` As usual when downloading IBL data from flatiron, the dimensions are listed. Below is one frame of the video for illustration. One can see one point for each paw, two points for the edges of the tongue, one point for the nose and there are 4 points close together around the pupil edges. All points for which the DLC network had a confidence probability of below 0.9 are hidden. For instance when the mouse is not licking, there is no tongue and so the network cannot detect it, and no points are shown. The script will display and save the short video in your local folder. ![alt text](video_frame.png "Example frame of video with DLC labels") Sections of the script <code>DLC_labeled_video.py</code> can be recycled to analyse DLC traces. For example let's plot the x coordinate for the right paw in a <code>'left'</code> cam video for a given trial. ``` one = ONE() dataset_types = ['camera.times','trials.intervals','camera.dlc'] video_type = 'left' # get paths to load in data D = one.load('3663d82b-f197-4e8b-b299-7b803a155b84',dataset_types=dataset_types, dclass_output=True) alf_path = Path(D.local_path[0]).parent.parent / 'alf' video_data = alf_path.parent / 'raw_video_data' # get trials start and end times, camera time stamps (one for each frame, synced with DLC trace) trials = alf.io.load_object(alf_path, '_ibl_trials') cam0 = alf.io.load_object(alf_path, '_ibl_%sCamera' % video_type) cam1 = alf.io.load_object(video_data, '_ibl_%sCamera' % video_type) cam = {**cam0,**cam1} # for each tracked point there's x,y in [px] in the frame and a likelihood that indicates the network's confidence cam.keys() ``` There is also <code>'times'</code> in this dictionary, the time stamps for each frame that we'll use to sync it with other events in the experiment. Let's get rid of it briefly to have only DLC points and set coordinates to nan when the likelihood is below 0.9. ``` Times = cam['times'] del cam['times'] points = np.unique(['_'.join(x.split('_')[:-1]) for x in cam.keys()]) cam['times'] = Times # A helper function to find closest time stamps def find_nearest(array, value): array = np.asarray(array) idx = (np.abs(array - value)).argmin() return idx ``` Let's pick say the 5th trial and find all DLC traces for it. ``` frame_start = find_nearest(cam['times'], trials['intervals'][4][0]) frame_stop = find_nearest(cam['times'], trials['intervals'][4][1]) XYs = {} for point in points: x = np.ma.masked_where( cam[point + '_likelihood'] < 0.9, cam[point + '_x']) x = x.filled(np.nan) y = np.ma.masked_where( cam[point + '_likelihood'] < 0.9, cam[point + '_y']) y = y.filled(np.nan) XYs[point] = np.array( [x[frame_start:frame_stop], y[frame_start:frame_stop]]) import matplotlib.pyplot as plt plt.plot(cam['times'][frame_start:frame_stop],XYs['paw_r'][0]) plt.xlabel('time [sec]') plt.ylabel('x location of right paw [px]') ```
github_jupyter
# Week 3 - Functions The real power in any programming language is the **Function**. A function is: * a little block of script (one line or many) that performs specific task or a series of tasks. * reusable and helps us make our code DRY. * triggered when something "invokes" or "calls" it. * ideally modular – it performs a narrow task and you call several functions to perform more complex tasks. ### What we'll cover today: * Simple function * Return statements * ``` ## Build a function called myFunction that adds 2 numbers together ## it should print "The total is (whatever the number is)!" ## build it here ## Call myFunction using 4 and 5 as the arguments ## Call myFunction using 10 and 2 as the arguments ## you might forget what arguments are needed for the function to work. ## you can add notes that appear on shift-tab as you call the function. ## write it here ## test it on 3 and 4 ``` ### To use or not use functions? Let's compare the two options with a simple example: ``` ## You have a list of numbers. mylist1 = [1, -5, 22, -44.2, 33, -45] ## Turn each number into an absolute number. ## a for loop works perfectly fine here. ## The problem is that your project keeps generating more lists. ## Each list of numbers has to be turned into absolute numbers mylist2 = [-56, -34, -75, -111, -22] mylist3 = [-100, -200, 100, -300, -100] mylist4 = [-23, -89, -11, -45, -27] mylist5 = [0, 1, 2, 3, 4, 5] ``` ## DRY ### Do you keep writing for loops for each list? ### No, that's a lot of repetition! ### DRY stands for "Don't Repeat Yourself" ``` ## Instead we write a function that takes a list, ## converts each list item to an absolute number, ## and prints out the number ## Try swapping out different lists into the function: ``` ## Timesaver ### Imagine for a moment that your editor tells you that the calculation needs to be updated. Instead of needing the absolute number, you need the absolute number minus 5. ### Having used multiple for loops, you'd have to change each one. What if you miss one or two? Either way, it's a chore. ### With functions, you just revise the function and the update runs everywhere. ``` ## So if an editor says to actually multiply the absolute number by 1_000_000, ## Try swapping out different lists into the function: ``` ## Return Statements ### So far we have only printed out values processed by a function. ### But we really want to retain the value the function creates. ### We can then pass that value to other parts of our calculations and code. ``` ## Simple example ## A function that adds two numbers together and prints the value: ## call the function with the numbers 2 and 4 ## let's try to save it in a variable called myCalc ## Print myCalc. What does it hold? ``` ### The return Statement ``` ## Tweak our function by adding return statement ## instead of printing a value we want to return a value(or values). ## call the function add_numbers_ret ## and store in variable called myCalc ## print myCalc ## What type is myCalc? ``` ## Return multiple values ``` ## demo function name,age,country = getPerson("David", 35, "France") ``` ### Let's revise our earlier absolute values converter with a return statement #### Here is the earlier version: <img src="https://github.com/sandeepmj/fall20-student-practical-python/blob/master/support_files/abs-function.png?raw=true" style="width: 100%;"> ``` ## Here it is revised with a return statement ## Let's actually make that a list comprehension version of the function: ## Let's test it by storing the return value in variable x ## What type of data object is it? ``` # Make a function more flexible and universal * Currently, we have a function that takes ONLY a list as an argument. * We'd have to write another one for a single number argument. ``` ## try using return_absolutes_lc on a single number like -10 ## it will break ``` # Universalize our absolute numbers function ``` ## call the function make_abs ## try it on -10 ## Try it on mylist3 - it will break! ``` ## We can use the ```map()``` function to tackle this problem. ```map()``` takes 2 arguments: a ```function``` and ```iterable like a list```. ``` ## try it on make_abs and mylist3 ## save it into a list ``` ## ```map()``` also works for multiple iterables #### remember our ```add_numbers_ret``` function. ``` ## here it is again: def add_numbers_ret(number1, number2): return (number1 + number2) ## two lists a_even = [2, 4, 6, 8] a_odd = [1, 3, 5, 7, 9] ## note this has one more item in the list. ## run map on a_even and a_odd b = list(map(add_numbers_ret, a_even, a_odd)) b ``` ## Functions that call other funcions ``` ## let's create a function that returns the square of a number ## what is 9 squared? ``` ### Making a point here with a simple example Let's say we want to add 2 numbers together and then square that result. Instead of writing one "complex" function, we can call on our modular functions. ``` ## a function that calls our modular functions ## call make_point() on 2 and 5 make_point(2,5) ```
github_jupyter
# piston example with explicit Euler scheme ``` %matplotlib inline import matplotlib import matplotlib.pyplot as plt import matplotlib.animation as anim import numpy as np import sys sys.path.insert(0, './code') import ideal_gas ``` ### physical parameters ``` # length of cylinder l = 0.1 # radius of cylinder r = 0.05 # thickness of wall w = 0.006 # derived geometrical data r2 = 2 * r # diameter of cylinder w2 = w / 2 # halved thickness of wall l2 = l - w2 A = r**2 * np.pi # cross-sectional area def get_v_1(q): """first volume""" return A * (q - w2) def get_v_2(q): """second volume""" return A * (l2 - q) # density of aluminium m_Al = 2700.0 m_Cu = 8960.0 # mass of piston m = m_Cu * A * w # thermal conductivity of aluminium κ_Al = 237.0 κ_Cu = 401.0 # thermal conduction coefficient α = κ_Cu * A / w m_inv = 1 / m ``` ### initial conditions determine $n_1$, $n_2$, $s_1$, $s_2$ ``` # wanted conditions v_1 = v_2 = get_v_1(l/2) θ_1 = 273.15 + 25.0 π_1 = 1.5 * 1e5 θ_2 = 273.15 + 20.0 π_2 = 1.0 * 1e5 from scipy.optimize import fsolve n_1 = fsolve(lambda n : ideal_gas.S_π(ideal_gas.U2(θ_1, n), v_1, n) - π_1, x0=2e22)[0] s_1 = ideal_gas.S(ideal_gas.U2(θ_1, n_1), v_1, n_1) # check temperature ideal_gas.U_θ(s_1, v_1, n_1) - 273.15 # check pressure ideal_gas.U_π(s_1, v_1, n_1) * 1e-5 n_2 = fsolve(lambda n : ideal_gas.S_π(ideal_gas.U2(θ_2, n), v_2, n) - π_2, x0=2e22)[0] s_2 = ideal_gas.S(ideal_gas.U2(θ_2, n_2), v_2, n_2) # check temperature ideal_gas.U_θ(s_2, v_2, n_2) - 273.15 # check pressure ideal_gas.U_π(s_2, v_2, n_2) * 1e-5 x_0 = l/2, 0, s_1, s_2 ``` ### simulation ``` def set_state(data, i, x): q, p, s_1, s_2 = x data[i, 0] = q data[i, 1] = p data[i, 2] = v = m_inv * p data[i, 3] = v_1 = get_v_1(q) data[i, 4] = π_1 = ideal_gas.U_π(s_1, v_1, n_1) data[i, 5] = s_1 data[i, 6] = θ_1 = ideal_gas.U_θ(s_1, v_1, n_1) data[i, 7] = v_2 = get_v_2(q) data[i, 8] = π_2 = ideal_gas.U_π(s_2, v_2, n_2) data[i, 9] = s_2 data[i, 10] = θ_2 = ideal_gas.U_θ(s_2, v_2, n_2) data[i, 11] = E_kin = 0.5 * m_inv * p**2 data[i, 12] = u_1 = ideal_gas.U(s_1, v_1, n_1) data[i, 13] = u_2 = ideal_gas.U(s_2, v_2, n_2) data[i, 14] = E = E_kin + u_1 + u_2 data[i, 15] = S = s_1 + s_2 def get_state(data, i): return data[i, (0, 1, 5, 9)] def rhs(x): """right hand side of the explicit system of differential equations """ q, p, s_1, s_2 = x v_1 = get_v_1(q) v_2 = get_v_2(q) π_1 = ideal_gas.U_π(s_1, v_1, n_1) π_2 = ideal_gas.U_π(s_2, v_2, n_2) θ_1 = ideal_gas.U_θ(s_1, v_1, n_1) θ_2 = ideal_gas.U_θ(s_2, v_2, n_2) return np.array((m_inv*p, A*(π_1-π_2), α*(θ_2-θ_1)/θ_1, α*(θ_1-θ_2)/θ_2)) t_f = 1.0 dt = 1e-4 steps = int(t_f // dt) print(f'steps={steps}') t = np.linspace(0, t_f, num=steps) dt = t[1] - t[0] data = np.empty((steps, 16), dtype=float) set_state(data, 0, x_0) x_old = get_state(data, 0) for i in range(1, steps): x_new = x_old + dt * rhs(x_old) set_state(data, i, x_new) x_old = x_new θ_min = np.min(data[:, (6,10)]) θ_max = np.max(data[:, (6,10)]) # plot transient fig, ax = plt.subplots(dpi=200) ax.set_title("piston position q") ax.plot(t, data[:, 0]); fig, ax = plt.subplots(dpi=200) ax.set_title("total entropy S") ax.plot(t, data[:, 15]); fig, ax = plt.subplots(dpi=200) ax.set_title("total energy E") ax.plot(t, data[:, 14]); ``` the total energy is not conserved well
github_jupyter
설치하기 ``` !pip install git+https://github.com/gbolmier/funk-svd from funk_svd.dataset import fetch_ml_ratings from funk_svd import SVD from sklearn.metrics import mean_absolute_error import pandas as pd ds_ratings = pd.read_csv("../ml-latest-small/ratings.csv") ds_movies = pd.read_csv("../ml-latest-small/movies.csv") # funk-svd 는 유저 칼럼이 u_id, 아이템 칼럼이 i_id여서 이름을 변경해줍니다 df = ds_ratings.rename( { "userId": "u_id", "movieId": "i_id" }, axis=1 ) df # 80%를 학습에 사용하고 10%를 validation set, 10%를 test_set으로 사용합니다 train = df.sample(frac=0.8, random_state=7) val = df.drop(train.index.tolist()).sample(frac=0.5, random_state=8) test = df.drop(train.index.tolist()).drop(val.index.tolist()) # SVD를 학습합니다. n_factors가 SVD의 k를 의미합니다 svd = SVD(lr=0.001, reg=0.005, n_epochs=100, n_factors=15, early_stopping=True, shuffle=False, min_rating=1, max_rating=5) svd.fit(X=train, X_val=val) # 학습 결과를 test set 으로 평가합니다. pred = svd.predict(test) mae = mean_absolute_error(test['rating'], pred) print(f'Test MAE: {mae:.2f}') user_ratings = ds_ratings.pivot(index="userId", columns="movieId", values="rating") def get_user_real_score(user_id, item_id): return user_ratings.loc[user_id, item_id] def get_user_unseen_ranks(user_id, max_rank=100): # predict_pair 메서드를 이용해서 # 유저의 모든 영화 예측 평점을 계산합니다. movie_ids = df.i_id.unique() rec = pd.DataFrame( [{ "id": id, "recommendation_score": svd.predict_pair(user_id, id), "real_score": get_user_real_score(user_id, id) } for id in movie_ids ] ) # 유저가 본 영화는 제외합니다 user_seen_movies = train[train.u_id == user_id] rec = rec[~rec.id.isin(user_seen_movies.i_id)] rec.sort_values("recommendation_score", ascending=False, inplace=True) # max_rank 개만 보여줍니다 if max_rank is not None: rec = rec.head(max_rank) # 순위를 컬럼에 추가합니다 rec["rank"] = range(1, len(rec) + 1) # train 에는 포함되지 않았지만 실제로 유저가 봤던 영화 ID를 가져옵니다 # 이후 추천 결과에서 해당 영화들만 필터링해서 몇 위로 추천됬는지 확인합니다 user_unseen_movies = pd.concat([val, test], axis=0) user_unseen_movies = user_unseen_movies[user_unseen_movies.u_id == user_id].i_id rec = rec[rec.id.isin(user_unseen_movies)] rec.index = rec.id del rec["id"] # 실제 추천할 영화 정보와 join합니다. rec = ds_movies.merge(rec, left_on="movieId", right_index=True) rec.sort_values("rank", inplace=True) top_k_accuracy = len(rec) / len(user_unseen_movies) return rec, top_k_accuracy from IPython.display import display user_ids = df.u_id.unique()[:10] total_acc = 0 for uid in user_ids: top100, acc = get_user_unseen_ranks(uid) total_acc += acc print("User: ", uid, "TOP 100 accuracy: ", round(acc, 2)) display(top100) total_acc / len(user_ids) ```
github_jupyter
``` # Import libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import sqlite3 from sklearn.pipeline import Pipeline # used for train/test splits from sklearn.cross_validation import train_test_split # used to impute mean for data from sklearn.preprocessing import Imputer # logistic regression is our model of choice from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegressionCV # used to calculate AUROC/accuracy from sklearn import metrics # used to create confusion matrix from sklearn.metrics import confusion_matrix from sklearn.cross_validation import cross_val_score %matplotlib inline # Connect to MIMIC # be sure to add the password as appropriate! con = psycopg2.connect(dbname='MIMIC', user='workshop', password='' , host='<xxxxx>.amazonaws.com' , port=5432) cur = con.cursor() cur.execute('SET search_path to ''mimiciii_workshop''') query = """ with ce as ( select icustay_id, charttime, itemid, valuenum from chartevents -- specify what data we want from chartevents where itemid in ( 211, -- Heart Rate 618, -- Respiratory Rate 615 -- Resp Rate (Total) ) -- how did we know heart rates were stored using ITEMID 211? Simple, we looked in D_ITEMS! -- Try it for yourself: select * from d_items where lower(label) like '%heart rate%' ) select -- ICUSTAY_ID identifies each unique patient ICU stay -- note that if the same person stays in the ICU more than once, each stay would have a *different* ICUSTAY_ID -- however, since it's the same person, all those stays would have the same SUBJECT_ID ie.icustay_id -- this is the outcome of interest: in-hospital mortality , max(adm.HOSPITAL_EXPIRE_FLAG) as OUTCOME -- this is a case statement - essentially an "if, else" clause , min( case -- if the itemid is 211 when itemid = 211 -- then return the actual value stored in VALUENUM then valuenum -- otherwise, return 'null', which is SQL standard for an empty value else null -- end the case statement end ) as HeartRate_Min -- note we wrapped the above in "min()" -- this takes the minimum of all values inside, and *ignores* nulls -- by calling this on our case statement, we are ignoring all values except those with ITEMID = 211 -- since ITEMID 211 are heart rates, we take the minimum of only heart rates , max(case when itemid = 211 then valuenum else null end) as HeartRate_Max , min(case when itemid in (615,618) then valuenum else null end) as RespRate_Min , max(case when itemid in (615,618) then valuenum else null end) as RespRate_Max from icustays ie -- join to the admissions table to get hospital outcome inner join admissions adm on ie.hadm_id = adm.hadm_id -- join to the chartevents table to get the observations left join ce -- match the tables on the patient identifier on ie.icustay_id = ce.icustay_id -- and require that the observation be made after the patient is admitted to the ICU and ce.charttime >= ie.intime -- and *before* their admission time + 1 day, i.e. the observation must be made on their first day in the ICU and ce.charttime <= ie.intime + interval '1' day group by ie.icustay_id order by ie.icustay_id """ conn = sqlite3.connect('/Users/danielcphelps/Boxcryptor/Google Drive/daniel.c.phelps/Research/HealthcareAnalytics/mimic-workshop/data/mimicdata.sqlite') data = pd.read_sql_query(query,conn) print(data.head()) # close the connection as we are done loading data from server cur.close() con.close() # move from a data frame into a numpy array X = data.values y = X[:,1] # delete first 2 columns: the ID and the outcome X = np.delete(X,0,axis=1) X = np.delete(X,0,axis=1) # evaluate a logistic regression model using an 80%-20% training/test split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # impute mean for missing values imp = Imputer(missing_values='NaN', strategy='mean', axis=0) imp.fit(X_train) X_train = imp.transform(X_train) X_test = imp.transform(X_test) model = LogisticRegression(fit_intercept=True) model = model.fit(X_train, y_train) # predict class labels for the test set y_pred = model.predict(X_test) # generate class probabilities y_prob = model.predict_proba(X_test) # generate evaluation metrics print('Accuracy = {}'.format(metrics.accuracy_score(y_test, y_pred))) print('AUROC = {}'.format(metrics.roc_auc_score(y_test, y_prob[:, 1]))) print('\nConfusion matrix') print(metrics.confusion_matrix(y_test, y_pred)) print('\nClassification report') print(metrics.classification_report(y_test, y_pred)) # evaluate a logistic regression with L1 regularization # evaluate the model using 5-fold cross-validation # see: http://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter # for list of scoring parameters estimator = Pipeline([("imputer", Imputer(missing_values='NaN', strategy="mean", axis=0)), ("regression", LogisticRegressionCV(penalty='l1', cv=5, scoring='roc_auc', solver='liblinear'))]) scores = cross_val_score(estimator , X, y , scoring='roc_auc', cv=5) print('AUROC for all folds:') print(scores) print('Average AUROC across folds:') print(scores.mean()) ```
github_jupyter
``` #Using our synthetic data library for today's exercise #pip install ydata #Loading the census dataset from kaggle import logging import os import requests import pandas as pd import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder #import ydata.synthetic.regular as synthetic #Dataset URL from kaggle data_url = 'https://www.kaggle.com/uciml/adult-census-income/downloads/adult.csv' # The local path where the data set is saved. local_filename = "adult.csv" # Kaggle Username and Password kaggle_info = {'UserName': "myusername", 'Password': "mypassword"} # Attempts to download the CSV file. Gets rejected because we are not logged in. r = requests.get(data_url) # Login to Kaggle and retrieve the data. r = requests.post(r.url, data = kaggle_info) # Writes the data to a local file one chunk at a time. f = open(local_filename, 'wb') for chunk in r.iter_content(chunk_size = 512 * 1024): # Reads 512KB at a time into memory if chunk: # filter out keep-alive new chunks f.write(chunk) f.close() adult_census = pd.read_csv('adult.csv') #For the purpose of this exercise we will filter information regarding only black and white individuals. adult_census = adult_census[(adult_census['race']=='White') | (adult_census['race']=='Black')] income = adult_census['income'] adult_census = adult_census.drop('education.num', axis=1) adult_census = adult_census.drop('income', axis=1) train_adult, test_adult, income_train, income_test = train_test_split(adult_census, income, test_size=0.33, random_state=42) train_adult['income'] = income_train train_adult.head(10) sns.set(style="dark", rc={'figure.figsize':(11.7,8.27)}) sns.countplot(x="race", palette="Paired", edgecolor=".6", data=train_adult) #Let's tackling the bias present in the dataset. #For that purpose we will need to filter the records belonging to only the black individuals. def filter_fn(row): if row['race'] == 'Black': return True else: return False filt = train_adult.apply(filter_fn, axis=1) train_adult_black = train_adult[filt] ``` ``` print("Number of records belonging to black individuals: {}".format(train_adult_black.shape[0])) sns.set(style="dark", rc={'figure.figsize':(11.7,8.27)}) sns.countplot(x="sex", palette="Paired", edgecolor=".6", data=train_adult_black) #In what concerns sex, we have an equal representation of women and man for the black population of the dataset #Using the YData synthetic data lib to generate new 3000 individuals for the black population synth_model = synthetic.SynthTabular() synth_model.fit(adult_black) synth_data = synth_model.sample(n_samples=3000) synth_data = pd.read_csv('synth_data.csv', index_col=[0]) synth_data = synth_data.drop('education.num', axis=1) synth_data = pd.concat([synth_data[synth_data['income']=='>50K'],synth_data[synth_data['income']=='<=50K'][:1000]]) synth_data.describe() #Now combining both the datasets test_adult['income'] = income_test adult_combined = synth_data.append(test_adult).sample(frac=1) #Let's check again how are we regarding the balancing of our classes for the race variable sns.set(style="dark", rc={'figure.figsize':(11.7,8.27)}) sns.countplot(x="race", palette="Paired", edgecolor=".6", data=adult_combined) #Auxiliar function to encode the categorical variables import numpy as np from sklearn.preprocessing import OneHotEncoder from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score, accuracy_score, average_precision_score def numerical_encoding(df, cat_cols=[], ord_cols=[]): try: assert isinstance(df, pd.DataFrame) except AssertionError as e: logging.error('The df input object must a Pandas dataframe. This action will not be executed.') return ord_cols_val = None cat_cols_val = None dummies = None cont_cols = list(set(df.columns) - set(cat_cols+ord_cols)) cont_vals = df[cont_cols].values if len(ord_cols) > 0: ord_cols_val = df[ord_cols].values label_encoder = LabelEncoder ord_encoded = label_encoder.fit_transform(ord_cols_val) if len(cat_cols) > 0: cat_cols_val = df[cat_cols].values hot_encoder = OneHotEncoder() cat_encoded = hot_encoder.fit_transform(cat_cols_val).toarray() dummies = [] for i, cat in enumerate(hot_encoder.categories_): for j in cat: dummies.append(cat_cols[i]+'_'+str(j)) if ord_cols_val is not None and cat_cols_val is not None: encoded = np.hstack([cont_vals, ord_encoded, cat_encoded]) columns = cont_cols+ord_cols+dummies elif cat_cols_val is not None: encoded = np.hstack([cont_vals, cat_encoded]) columns = cont_cols+ord_cols+dummies else: encoded = cont_vals columns = cont_cols return pd.DataFrame(encoded, columns=columns), dummies #validation functions def score_estimators(estimators, x_test, y_test): #f1_score average='micro' scores = {type(clf).__name__: f1_score(y_test, clf.predict(x_test), average='micro') for clf in estimators} return scores def fit_estimators(estimators, data_train, y_train): estimators_fit = [] for i, estimator in enumerate(estimators): estimators_fit.append(estimator.fit(data_train, y_train)) return estimators_fit def estimator_eval(data, y, cat_cols=[]): def order_cols(df): cols = sorted(df.columns.tolist()) return df[cols] data,_ = numerical_encoding(data, cat_cols=cat_cols) y, uniques = pd.factorize(y) data = order_cols(data) x_train, x_test, y_train, y_test = train_test_split(data, y, test_size=0.33, random_state=42) # Prepare train and test datasets estimators = [ LogisticRegression(multi_class='auto', solver='lbfgs', max_iter=500, random_state=42), RandomForestClassifier(n_estimators=10, random_state=42), DecisionTreeClassifier(random_state=42), SVC(gamma='auto'), KNeighborsClassifier(n_neighbors=5) ] estimators_names = [type(clf).__name__ for clf in estimators] for estimator in estimators: assert hasattr(estimator, 'fit') assert hasattr(estimator, 'score') estimators = fit_estimators(estimators, x_train, y_train) scores = score_estimators(estimators, x_test, y_test) return scores real_scores = estimator_eval(data=test_adult.drop('income', axis=1), y=test_adult['income'], cat_cols=['workclass', 'education', 'marital.status', 'occupation', 'relationship','race', 'sex', 'native.country']) synth_scores = estimator_eval(data=adult_combined.drop('income', axis=1), y=adult_combined['income'], cat_cols=['workclass', 'education', 'marital.status', 'occupation', 'relationship','race', 'sex', 'native.country']) dict_results = {'original': real_scores, 'synthetic': synth_scores} results = pd.DataFrame(dict_results).reset_index() print("Mean average accuracy improvement: {}".format((results['synthetic'] - results['original']).mean())) results_graph = results.melt('index', var_name='data_source', value_name='accuracy') pd.DataFrame(dict_results).transpose() #Final results comparision sns.barplot(x="index", y="accuracy", hue="data_source", data=results_graph, palette="Paired", edgecolor=".6") ```
github_jupyter
# LAB 5b: Deploy and predict with Keras model on Cloud AI Platform. **Learning Objectives** 1. Setup up the environment 1. Deploy trained Keras model to Cloud AI Platform 1. Online predict from model on Cloud AI Platform 1. Batch predict from model on Cloud AI Platform ## Introduction In this notebook, we'll deploying our Keras model to Cloud AI Platform and creating predictions. We will set up the environment, deploy a trained Keras model to Cloud AI Platform, online predict from deployed model on Cloud AI Platform, and batch predict from deployed model on Cloud AI Platform. Each learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/5b_deploy_keras_ai_platform_babyweight.ipynb). ## Set up environment variables and load necessary libraries Import necessary libraries. ``` import os ``` ### Lab Task #1: Set environment variables. Set environment variables so that we can use them throughout the entire lab. We will be using our project name for our bucket, so you only need to change your project and region. ``` %%bash PROJECT=$(gcloud config list project --format "value(core.project)") echo "Your current GCP Project Name is: "$PROJECT # Change these to try this notebook out PROJECT = "cloud-training-demos" # TODO: Replace with your PROJECT BUCKET = PROJECT # defaults to PROJECT REGION = "us-central1" # TODO: Replace with your REGION os.environ["BUCKET"] = BUCKET os.environ["REGION"] = REGION os.environ["TFVERSION"] = "2.1" %%bash gcloud config set compute/region $REGION gcloud config set ai_platform/region global ``` ## Check our trained model files Let's check the directory structure of our outputs of our trained model in folder we exported the model to in our last [lab](../solutions/10_train_keras_ai_platform_babyweight.ipynb). We'll want to deploy the saved_model.pb within the timestamped directory as well as the variable values in the variables folder. Therefore, we need the path of the timestamped directory so that everything within it can be found by Cloud AI Platform's model deployment service. ``` %%bash gsutil ls gs://${BUCKET}/babyweight/trained_model %%bash MODEL_LOCATION=$(gsutil ls -ld -- gs://${BUCKET}/babyweight/trained_model/2* \ | tail -1) gsutil ls ${MODEL_LOCATION} ``` ## Lab Task #2: Deploy trained model. Deploying the trained model to act as a REST web service is a simple gcloud call. Complete __#TODO__ by providing location of saved_model.pb file to Cloud AI Platoform model deployment service. The deployment will take a few minutes. ``` %%bash MODEL_NAME="babyweight" MODEL_VERSION="ml_on_gcp" MODEL_LOCATION=# TODO: Add GCS path to saved_model.pb file. echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION" # gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME} # gcloud ai-platform models delete ${MODEL_NAME} gcloud ai-platform models create ${MODEL_NAME} --regions ${REGION} gcloud ai-platform versions create ${MODEL_VERSION} \ --model=${MODEL_NAME} \ --origin=${MODEL_LOCATION} \ --runtime-version=2.1 \ --python-version=3.7 ``` ## Lab Task #3: Use model to make online prediction. Complete __#TODO__s for both the Python and gcloud Shell API methods of calling our deployed model on Cloud AI Platform for online prediction. ### Python API We can use the Python API to send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances. ``` from oauth2client.client import GoogleCredentials import requests import json MODEL_NAME = # TODO: Add model name MODEL_VERSION = # TODO: Add model version token = GoogleCredentials.get_application_default().get_access_token().access_token api = "https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict" \ .format(PROJECT, MODEL_NAME, MODEL_VERSION) headers = {"Authorization": "Bearer " + token } data = { "instances": [ { "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39 }, { "is_male": "False", "mother_age": 29.0, "plurality": "Single(1)", "gestation_weeks": 38 }, { "is_male": "True", "mother_age": 26.0, "plurality": "Triplets(3)", "gestation_weeks": 39 }, # TODO: Create another instance ] } response = requests.post(api, json=data, headers=headers) print(response.content) ``` The predictions for the four instances were: 5.33, 6.09, 2.50, and 5.86 pounds respectively when I ran it (your results might be different). ### gcloud shell API Instead we could use the gcloud shell API. Create a newline delimited JSON file with one instance per line and submit using gcloud. ``` %%writefile inputs.json {"is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39} {"is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39} ``` Now call `gcloud ai-platform predict` using the JSON we just created and point to our deployed `model` and `version`. ``` %%bash gcloud ai-platform predict \ --model=babyweight \ --json-instances=inputs.json \ --version=# TODO: Add model version ``` ## Lab Task #4: Use model to make batch prediction. Batch prediction is commonly used when you have thousands to millions of predictions. It will create an actual Cloud AI Platform job for prediction. Complete __#TODO__s so we can call our deployed model on Cloud AI Platform for batch prediction. ``` %%bash INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs gsutil cp inputs.json $INPUT gsutil -m rm -rf $OUTPUT gcloud ai-platform jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \ --data-format=TEXT \ --region ${REGION} \ --input-paths=$INPUT \ --output-path=$OUTPUT \ --model=babyweight \ --version=# TODO: Add model version ``` ## Lab Summary: In this lab, we set up the environment, deployed a trained Keras model to Cloud AI Platform, online predicted from deployed model on Cloud AI Platform, and batch predicted from deployed model on Cloud AI Platform. Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
github_jupyter
``` #!/usr/bin/python3 import sys sys.path.insert(0, '../src/') from ntree import * from tree_vis import * import numpy as np import matplotlib.pyplot as plt ``` ### Summary We are given : * a positive integer $n$ which is the number of dimension of the space. * a positive integer $k$ which is the total number of discrete points we need to place inside that space. * each continuous point $x$ queried in that space that follows a probability density function $PDF$. And we are asked for: * the $k$ discrete points that are placed in a way that minimizes the average distance (or mean error $ME$) from those continuous points $x$ . # Proposed approach We propose a method that: * is an almost optimal solution that provides a decent decrease of the $ME$. * works for almost any number $k$ of points. * works for almost any number $n$ of dimensions. * adapts very fast (close to $\log k$ rate). * is always better than the $ME$ provided by a uniform discretization, even before it is adapted. * is "model" free. The $PDF$ of the appearance of $x$'s don't have to be known. ## How it works The core idea behind this method is the $2^n$-trees. Like quadtrees and octrees with $n$ equal to 2 and 3 respectively, n-trees have any positive integer number $n$ as spatial subdivision factor, or also known as branching factor. $n$ describes the dimensions of the space that these trees spread. The $2^n$ child branches of each parent node, expand to all the basic directions in order to fill up all the space as uniform as possible. Each node is a point in space that covers a specified region. This region is the corresponding fraction of its parent's region. In other words, the parent splits the region that is assigned to it to $2^n$ equal sub-regions with volume $\frac{1}{2^n}$ times smaller than their parent's. The sub-regions that are produced are n-dimensional cubes too and have no overlapping except their touching surfaces. Of coursethey fully overlap with their parent because they are actually its sub-set. As the height of the tree is growing, the nodes that are created have smaller and smaller volumes. Each point is located in the middle of the cube so there will be no way for two or more points to be in the same location. So a tree with $k$ nodes will have $k$ different points inside the given space. We assign to the root node the whole space we want to use, in our case the unit cube between the points $[0, 0, ..., 0]$ and $[1, 1, ..., 1]$. For example, lets take $n=2$. Root, the node in level zero, will have the point $[0.5,0.5]$ and determined by the vertices $[0,0],[1,1]$. Its $2^n=4$ branches, will split this area in 4 equal 2d-cubes (aka rectangles) with vertices and points: * cube 1 -> vertices $[0,0],[0.5,0.5]$ , point $[0.25,0.25]$ * cube 2 -> vertices $[0.5,0],[1,0.5]$ , point $[0.75,0.25]$ * cube 3 -> vertices $[0,0.5],[0.5,1]$ , point $[0.25,0.75]$ * cube 4 -> vertices $[0.5,0.5],[1,1]$ , point $[0.75,0.75]$ So now we have 5 points to fill up this space. Those 4 branches can be extended to create 16 new sub-branches (4 more branches each) adding to a total of 21 points. ``` tree = Tree(2, 21) tree.plot() points = tree.get_points() plt.figure() for p in points: plt.plot(p[0], p[1], 'bo', label='Points') plt.plot([0, 0], [0, 1], 'g--', label='Space') plt.plot([0, 1], [0, 0], 'g--') plt.plot([1, 1], [0, 1], 'g--') plt.plot([1, 0], [1, 1], 'g--') plt.xticks(np.linspace(0, 1, 9)) plt.yticks(np.linspace(0, 1, 9)) plt.title('Points spread into space') plt.grid(True) plt.legend() plt.show() ``` Additionally, nodes keep some useful informations that help **evaluate** them. It's very important to know what _ME_ each node produces and what it would have produced if it wasn't there. When user queries a point $x$, we find the corresponding node by starting from root and recursively finding all the sub-branches until that one leaf, that is responsible for the region that this point belongs. This search will not necessarily give us the nearest point to $x$, but it is very useful for two other things. * First of all, the result may not be the nearest point, but it will be the node that is __responsible for this specific area__. This means that if we want to increase the resolution of points, near that particular $x$ we have to expand the node that will be returned from that search. Using our previous example, for $x=[0.4, 0.4]$ the result of the search will be the node with the point $[0.25, 0.25]$ even if root's point $[0.5, 0.5]$ is closer. But expanding this node will create a branch with point $[0.375, 0.375]$. * The second benefit is that while doing this search, __we can easily collect the information we need for the evaluation__, like the distance from the responsible point and the distance from its parent's point. Using again our previous example, searching for $[0.4, 0,4]$ will traverse root and its branch with the point $[0.25, 0.25]$. In the process we can find the Euclidean distance of $x$ to each node point, which is $dist([0.5, 0.5], [0.4, 0.4])=0.141421$ and $dist([0.25, 0.25], [0.4, 0.4])=0.212132$ respectively. This gives us useful information about each node's utility. ## Adaption of the tree We said before that the points of the nodes are have fixed locations, which make the unable to move and adapt by themselves. __By creating and deleting points we try to match the right set of points that match the PDF the most__. So the adaption is actually a fitting. And we have all the required components to do that. We can expand and prune nodes to increase and decrease resolution and we have a way of evaluating them to choose what is the best for each one of them. Also, in order to ensure that the adaption will produce stable results, and will always decrease the total %ME%, we make all the changes only to the outer level. The maximum difference between the old tree and the updated one is restricted to 1 level for each region. ### Increase resolution In order to decrease $ME$ we need to increase the resolution of points in regions that is needed. Obviously, the way to increase resolution in the desired regions is to expand the corresponding nodes. But there are two types of nodes. The expandable ones, that have unexpanded sub-branches, and the not expandable that are already fully expanded. To expand an expandable node we just create all its sub-branches. New points are spread to all directions trying to fill up uniformly the space around the parent point. The goal is actually to search this region for a place of interest. Points that fall into this place will marked as useful by the evaluation process and will "survive" as the other will be deleted in the next update. Expanding a non-expandable node is trickier than expanding an expandable one. As we said before, this node is already fully expanded. But if our evaluation system says that we need to do that, we can't ignore it. Fully expand its closest expandable children would be a solution but it will create too many useless new nodes. We need a more targeted expansion. We can partly expand all the $2^n$ expandable nodes that are closest to the this node. By partly i mean only the node that is towards the desired point. To generalize into a more compact solution, we create all the nodes that will approach the point we want independently of the current state of the corresponding node. This means that direct (if there are any) and more distant sub-branches will be created. So expansion always creates $2^n$ new nodes, but not necessary in the same levels. ### Decrease resolution Decreasing resolution almost any time increases $ME$ but is necessary to maintain the right number of points. So we delete a node to free space for another node to be created. The goal of each prune is to increase the total $ME$ less than the amount that it will decreased by the following expansion. Pruning is done by simply deleting/cutting a leaf node from its parent. Because leaf nodes have no sub-branches, the total number of points is decreased by 1. ### Decide which node to expand and which to cut Our goal is to decrease the $ME$ as much as possible so we get as close as possible to the optimal solution. The only criterion we can use is the evaluation of each node, which depends only on the distances collected while searching. We have available for each discrete point the sum of distances of each continuous point searched near it. Also, we store in every parent node, the total distance that will be collected if each of its child wasn't available. In the example we used earlier, for $x=[0.2, 0.2]$, we will add to the total distance counter of node $[0.25, 0.25]$ (which is the nearest neighbor) the number $dist([0.25, 0.25], [0.2, 0.2])=0.070711$. Also we will store on its parent node, $[0.5, 0.5]$, the information of the increase of the total distance that we would get if node $[0.25, 0.25]$ wasn't there. So the corresponding counter of node $[0.5, 0.5]$ for the node $[0.25, 0.25]$ will increased by $dist([0.5, 0.5], [0.2, 0.2])=0.424264$. With that information, we can predict how much the total $ME$ will increase if we cut node $[0.25, 0.25]$. Now that we have available the information we need, we make an update to our tree. Each update has 2 steps. One pass for pruning first to free some space and one for expanding. * On pruning, we take all the leaf nodes and we cut the ones that will not produce more $ME$ than the average $ME$ of all nodes. * On expanding, expand the nodes with the highest $ME$ until the tree reaches the desired size again. In case that there are not that many nodes to expand, the tree will stay incomplete until the next update. ## Comparison with other approaches This solution contains many ideas and methods used to solve similar problem. Though, none of them have the same goals as this one. The most similar approaches to this one, are quad and octrees used for spatial subdivision([AMR](https://en.wikipedia.org/wiki/Adaptive_mesh_refinement)) and collision detection. Except the fixed dimensions $n$ of these methods, there are differences on the adaption procedure too. First of all, almost none of them have a fixed number of points and they are all increasing resolution on the regions that is needed, while never decrease it on other regions to maintain their upper limit. The increase of the resolution is also different, while these methods use as points only the leaves of the tree and discard all the parent nodes inside it. This means that each single expansion results on an increase of $2^n-1$ (1 for the deleted parent) points, instead of $2^n$ of my approach. This seems like a disadvantage but it is not. When discarding the parent, you become unable (or you make it more difficult) to delete individual leaf nodes and can only merge the whole branch to recreate the parent. In the case that you need only one of the children, you are forced to keep all the other $2^n-1$ of them and this limits the maximum level that your tree can reach. In our method, the overhead is 1 node which means that the maximum level[\*]() the tree can reach is the total number of points $k$. Of coursethis is the worst case scenario for their method and the best for ours, but in any case our method gives more freedom to the tree and reduces the overhead to minimum, as even the overheads are points in space that many times are required too.
github_jupyter
## XYZ Pro Features This notebook demonstrates some of the pro features for XYZ Hub API. XYZ paid features can be found here: [xyz pro features](https://www.here.xyz/xyz_pro/). XYZ plans can be found here: [xyz plans](https://developer.here.com/pricing). ### Virtual Space A virtual space is described by definition which references other existing spaces(the upstream spaces). Queries being done to a virtual space will return the features of its upstream spaces combined. Below are different predefined operations of how to combine the features of the upstream spaces. - [group](#group_cell) - [merge](#merge_cell) - [override](#override_cell) - [custom](#custom_cell) ``` # Make necessary imports. import os import json import warnings from xyzspaces.datasets import get_chicago_parks_data, get_countries_data from xyzspaces.exceptions import ApiError import xyzspaces ``` <div class="alert alert-block alert-warning"> <b>Warning:</b> Before running below cells please make sure you have XYZ Token to interact with xyzspaces. Please see README.md in notebooks folder for more info on XYZ_TOKEN </div> ``` os.environ["XYZ_TOKEN"] = "MY-XYZ-TOKEN" # Replace your token here. xyz = xyzspaces.XYZ() # create two spaces which will act as upstream spaces for virtual space created later. title1 = "Testing xyzspaces" description1 = "Temporary space containing countries data." space1 = xyz.spaces.new(title=title1, description=description1) # Add some data to it space1 gj_countries = get_countries_data() space1.add_features(features=gj_countries) space_id1 = space1.info["id"] title2 = "Testing xyzspaces" description2 = "Temporary space containing Chicago parks data." space2 = xyz.spaces.new(title=title2, description=description2) # Add some data to space2 with open("./data/chicago_parks.geo.json", encoding="utf-8-sig") as json_file: gj_chicago = json.load(json_file) space2.add_features(features=gj_chicago) space_id2 = space2.info["id"] ``` <a id='group_cell'></a> #### Group Group means to combine the content of the specified spaces. All objects of each space will be part of the response when the virtual space is queried by the user. The information about which object came from which space can be found in the XYZ-namespace in the properties of each feature. When writing back these objects to the virtual space they'll be written back to the upstream space from which they were actually coming. ``` # Create a new virtual space by grouping two spaces created above. title = "Virtual Space for coutries and Chicago parks data" description = "Test group functionality of virtual space" upstream_spaces = [space_id1, space_id2] kwargs = {"virtualspace": dict(group=upstream_spaces)} vspace = xyz.spaces.virtual(title=title, description=description, **kwargs) print(json.dumps(vspace.info, indent=2)) # Reading a particular feature from space1 via virtual space. vfeature1 = vspace.get_feature(feature_id="FRA") feature1 = space1.get_feature(feature_id="FRA") assert vfeature1 == feature1 # Reading a particular feature from space2 via virtual space. vfeature2 = vspace.get_feature(feature_id="LP") feature2 = space2.get_feature(feature_id="LP") assert vfeature2 == feature2 # Deleting a feature from virtual space deletes corresponding feature from upstream space. vspace.delete_feature(feature_id="FRA") try: space1.get_feature("FRA") except ApiError as err: print(err) # Delete temporary spaces created. vspace.delete() space1.delete() space2.delete() ``` <a id='merge_cell'></a> #### Merge Merge means that objects with the same ID will be merged together. If there are duplicate feature-IDs in the various data of the upstream spaces, the duplicates will be merged to build a single feature. The result will be a response that is guaranteed to have no features with duplicate IDs. The merge will happen in the order of the space-references in the specified array. That means objects coming from the second space will overwrite potentially existing property values of objects coming from the first space. The information about which object came from which space(s) can be found in the XYZ-namespace in the properties of each feature. When writing back these objects to the virtual space they'll be written back to the upstream space from which they were actually coming, or the last one in the list if none was specified.When deleting features from the virtual space a new pseudo-deleted feature is written to the last space in the list. Trying to read the feature with that ID from the virtual space is not possible afterward. ``` # create two spaces with duplicate data title1 = "Testing xyzspaces" description1 = "Temporary space containing Chicago parks data." space1 = xyz.spaces.new(title=title1, description=description1) with open("./data/chicago_parks.geo.json", encoding="utf-8-sig") as json_file: gj_chicago = json.load(json_file) # Add some data to it space1 space1.add_features(features=gj_chicago) space_id1 = space1.info["id"] title2 = "Testing xyzspaces duplicate" description2 = "Temporary space containing Chicago parks data duplicate" space2 = xyz.spaces.new(title=title1, description=description1) # Add some data to it space2 space2.add_features(features=gj_chicago) space_id2 = space2.info["id"] # update a particular feature of second space so that post merge virtual space will have this feature merged lp = space2.get_feature("LP") space2.update_feature(feature_id="LP", data=lp, add_tags=["foo", "bar"]) # Create a new virtual space by merging two spaces created above. title = "Virtual Space for coutries and Chicago parks data" description = "Test merge functionality of virtual space" upstream_spaces = [space_id1, space_id2] kwargs = {"virtualspace": dict(merge=upstream_spaces)} vspace = xyz.spaces.virtual(title=title, description=description, **kwargs) print(vspace.info) vfeature1 = vspace.get_feature(feature_id="LP") assert vfeature1["properties"]["@ns:com:here:xyz"]["tags"] == ["foo", "bar"] bp = space2.get_feature("BP") space2.update_feature(feature_id="BP", data=lp, add_tags=["foo1", "bar1"]) vfeature2 = vspace.get_feature(feature_id="BP") assert vfeature2["properties"]["@ns:com:here:xyz"]["tags"] == ["foo1", "bar1"] space1.delete() space2.delete() vspace.delete() ``` <a id='override_cell'></a> #### Override Override means that objects with the same ID will be overridden completely. If there are duplicate feature-IDs in the various data of the upstream spaces, the duplicates will be overridden to result in a single feature. The result will be a response that is guaranteed to have no features with duplicate IDs. The override will happen in the order of the space-references in the specified array. That means objects coming from the second space one will override potentially existing features coming from the first space. The information about which object came from which space can be found in the XYZ-namespace in the properties of each feature. When writing back these objects to the virtual space they'll be written back to the upstream space from which they were actually coming. When deleting features from the virtual space the same rules as for merge apply. ``` # create two spaces with duplicate data title1 = "Testing xyzspaces" description1 = "Temporary space containing Chicago parks data." space1 = xyz.spaces.new(title=title1, description=description1) with open("./data/chicago_parks.geo.json", encoding="utf-8-sig") as json_file: gj_chicago = json.load(json_file) # Add some data to it space1 space1.add_features(features=gj_chicago) space_id1 = space1.info["id"] title2 = "Testing xyzspaces duplicate" description2 = "Temporary space containing Chicago parks data duplicate" space2 = xyz.spaces.new(title=title1, description=description1) # Add some data to it space2 space2.add_features(features=gj_chicago) space_id2 = space2.info["id"] # Create a new virtual space by override operation. title = "Virtual Space for coutries and Chicago parks data" description = "Test merge functionality of virtual space" upstream_spaces = [space_id1, space_id2] kwargs = {"virtualspace": dict(override=upstream_spaces)} vspace = xyz.spaces.virtual(title=title, description=description, **kwargs) print(vspace.info) bp = space2.get_feature("BP") space2.update_feature(feature_id="BP", data=bp, add_tags=["foo1", "bar1"]) vfeature2 = vspace.get_feature(feature_id="BP") assert vfeature2["properties"]["@ns:com:here:xyz"]["tags"] == ["foo1", "bar1"] space1.delete() space2.delete() vspace.delete() ``` ### Applying clustering in space ``` # create two spaces which will act as upstream spaces for virtual space created later. title1 = "Testing xyzspaces" description1 = "Temporary space containing countries data." space1 = xyz.spaces.new(title=title1, description=description1) # Add some data to it space1 gj_countries = get_countries_data() space1.add_features(features=gj_countries) space_id1 = space1.info["id"] # Genereate clustering for the space space1.cluster(clustering="hexbin") # Delete created space space1.delete() ``` ### Rule based Tagging Rule based tagging makes tagging multiple features in space tagged to a particular tag, based in rules mentioned based on JSON-path expression. Users can update space with a map of rules where the key is the tag to be applied to all features matching the JSON-path expression being the value. If multiple rules are matching, multiple tags will be applied to the according to matched sets of features. It could even happen that a feature is matched by multiple rules and thus multiple tags will get added to it. ``` # Create a new space title = "Testing xyzspaces" description = "Temporary space containing Chicago parks data." space = xyz.spaces.new(title=title, description=description) # Add data to the space. with open("./data/chicago_parks.geo.json", encoding="utf-8-sig") as json_file: gj_chicago = json.load(json_file) _ = space.add_features(features=gj_chicago) # update space to add tagging rules to the above mentioned space. tagging_rules = { "large": "$.features[?(@.properties.area>=500)]", "small": "$.features[?(@.properties.area<500)]", } _ = space.update(tagging_rules=tagging_rules) # verify that features are tagged correctly based on rules. large_parks = space.search(tags=["large"]) for park in large_parks: assert park["id"] in ["LP", "BP", "JP"] small_parks = space.search(tags=["small"]) for park in small_parks: assert park["id"] in ["MP", "GP", "HP", "DP", "CP", "COP"] # Delete created space space.delete() ``` ### Activity Log The Activity log will enable tracking of changes in your space. To activate it, just create a space with the listener added and enable_uuid set to True. More information on the activity log can be found [here](https://www.here.xyz/api/devguide/activitylogguide/). ``` title = "Activity-Log Test" description = "Activity-Log Test" listeners = { "id": "activity-log", "params": {"states": 5, "storageMode": "DIFF_ONLY", "writeInvalidatedAt": "true"}, "eventTypes": ["ModifySpaceEvent.request"], } space = xyz.spaces.new( title=title, description=description, enable_uuid=True, listeners=listeners, ) from time import sleep # As activity log is async operation adding sleep to get info sleep(5) print(json.dumps(space.info, indent=2)) space.delete() ```
github_jupyter
# Pyber Ride Sharing 3 observations from the data: * Urban drivers typically drive more frequently yet charge on average (i.e., <30) less than rural drivers. * Roughly two-thirds of all rides occur in Urban cities, however, roughly 80% of all drivers work in Urban areas. * While less rides occur in rural cities, there are on average less drivers to manage the load, creating a more favorable driver to ride ratio. * Rural drivers have the greatest fare distribution (i.e., roughly 40 dollars/driver) among drivers of all 3 city types. ``` import pandas as pd import matplotlib.pyplot as plt import numpy as np # Read in City Data csv file city_df = pd.read_csv('city_data.csv') # Read in Ride Data csv file ride_df = pd.read_csv('ride_data.csv') # Combine the 2 dataframes pyber_df = pd.merge(city_df, ride_df, on="city", how='left') pyber_df.head() # Find the total fare per city city_fare_total = pyber_df.groupby('city')['fare'].sum().to_frame() # Find the average fare ($) per city city_fare_avg = pyber_df.groupby('city')['fare'].mean().to_frame() # Find the total number of rides per city city_total_rides = pyber_df.groupby('city')['ride_id'].count().to_frame() # Find the total number of drivers per city city_driver_count = pyber_df.groupby('city')['driver_count'].unique().to_frame() city_driver_count['driver_count'] = city_driver_count['driver_count'].str.get(0) # Find the city type (urban, suburban, rural) city_type = pyber_df.groupby('city')['type'].unique().to_frame() city_type['type'] = city_type['type'].str.get(0) # Combine each dataframe city_fare_avg.columns=["city"] join_one = city_fare_avg.join(city_total_rides, how="left") join_one.columns=["Average Fare", "Total Rides"] join_two = join_one.join(city_fare_total, how="inner") join_two.columns=["Average Fare", "Total Rides", "City Fare Total"] join_three = join_two.join(city_driver_count, how="inner") join_three.columns=["Average Fare", "Total Rides", "City Fare Total", "Driver Count"] city_agg = join_three.join(city_type, how='inner') city_agg.columns=["Average Fare", "Total Rides", "City Fare Total", "Driver Count", "City Type"] city_agg.head() # Separate data by City Type urban_data = city_agg.loc[(city_agg['City Type']=='Urban'), :] suburban_data = city_agg.loc[(city_agg['City Type']=='Suburban'), :] rural_data = city_agg.loc[(city_agg['City Type']=='Rural'), :] ``` ## Bubble Plot ``` ## Bubble Plot Data all_urban_rides = urban_data.groupby('city')['Total Rides'].sum() avg_urban_fare = urban_data.groupby('city')['Average Fare'].mean() all_suburban_rides = suburban_data.groupby('city')['Total Rides'].sum() avg_suburban_fare = suburban_data.groupby('city')['Average Fare'].mean() all_rural_rides = rural_data.groupby('city')['Total Rides'].sum() avg_rural_fare = rural_data.groupby('city')['Average Fare'].mean() ## Bubble Plot # Store driver count as a Numpy Array np_city_driver_count = np.array(city_driver_count) np_city_driver_count = np_city_driver_count * 3 # Add chart note textstr = 'Note: Circle size corresponds to driver count/city' urban = plt.scatter(all_urban_rides, avg_urban_fare, s=np_city_driver_count, color='lightskyblue', alpha=0.65, edgecolors='none') suburban = plt.scatter(all_suburban_rides, avg_suburban_fare, s=np_city_driver_count, color='gold', alpha=0.65, edgecolors='none') rural = plt.scatter(all_rural_rides, avg_rural_fare, s=np_city_driver_count, color='lightcoral', alpha=0.65, edgecolors='none') plt.grid(linestyle='dotted') plt.xlabel('Total Number of Rides (Per City)') plt.ylabel('Average Fare ($)') plt.title('Pyber Ride Sharing Data (2016)') plt.gcf().text(0.95, 0.50, textstr, fontsize=8) plt.legend((urban, suburban, rural),('Urban', 'Suburban', 'Rural'),scatterpoints=1,loc='upper right',ncol=1,\ fontsize=8, markerscale=0.75,title='City Type', edgecolor='none',framealpha=0.25) plt.show() ``` ## Pie Charts ### Total Fares by City Type ``` ## Find Total Fares By City Type urban_fare_total = urban_data['City Fare Total'].sum() suburban_fare_total = suburban_data['City Fare Total'].sum() rural_fare_total = rural_data['City Fare Total'].sum() # Create a Pie Chart to Express the Above Date driver_type = ["Urban", "Suburban", "Rural"] driver_count = [urban_fare_total, suburban_fare_total, rural_fare_total] colors = ["lightskyblue", "gold","lightcoral"] explode = (0.1,0,0) plt.pie(driver_count, explode=explode, labels=driver_type, colors=colors, autopct="%1.1f%%", shadow=True, startangle=68) plt.title("% of Total Fares by City Type") plt.axis("equal") plt.show() ``` ### Total Rides by City Type ``` ## Find Total Rides By City Type urban_rides_count = urban_data['Total Rides'].sum() suburban_rides_count = suburban_data['Total Rides'].sum() rural_rides_count = rural_data['Total Rides'].sum() # Create a Pie Chart to Express the Above Date ride_type = ["Urban", "Suburban", "Rural"] ride_count = [urban_rides_count, suburban_rides_count, rural_rides_count] colors = ["lightskyblue", "gold","lightcoral"] explode = (0.1,0,0) plt.pie(ride_count, explode=explode, labels=ride_type, colors=colors, autopct="%1.1f%%", shadow=True, startangle=60) plt.title("% of Total Rides by City Type") plt.axis("equal") plt.show() ``` ### Total Drivers by City Type ``` ## Find Total Drivers By City Type urban_driver_count = urban_data['Driver Count'].sum() suburban_driver_count = suburban_data['Driver Count'].sum() rural_driver_count = rural_data['Driver Count'].sum() # Create a Pie Chart to Express the Above Date driver_type = ["Urban", "Suburban", "Rural"] driver_count = [urban_driver_count, suburban_driver_count, rural_driver_count] colors = ["lightskyblue", "gold","lightcoral"] explode = (0.1,0,0) plt.pie(driver_count, explode=explode, labels=driver_type, colors=colors, autopct="%1.1f%%", shadow=True, startangle=40) plt.title("% of Total Drivers by City Type") plt.axis("equal") plt.show() ``` ## Average Ride Value Per Driver (by City Type) ``` # Identify the average fare for drivers in each city type urban_avg_driver_pay = urban_fare_total / urban_rides_count suburban_avg_driver_pay = suburban_fare_total / suburban_rides_count rural_avg_driver_pay = rural_fare_total / rural_rides_count # Create a Bar Chart to Express the Above Date driver_type = ["Urban", "Suburban", "Rural"] avg_driver_pay = [urban_avg_driver_pay, suburban_avg_driver_pay, rural_avg_driver_pay] x_axis = np.arange(len(avg_driver_pay)) colors = ["lightskyblue", "gold","lightcoral"] plt.bar(x_axis, avg_driver_pay, color=colors, align='edge') tick_locations = [value+0.4 for value in x_axis] plt.xticks(tick_locations, ["Urban", "Suburban", "Rural"]) plt.ylim(0, max(avg_driver_pay)+1) plt.xlim(-0.25, len(driver_type)) plt.title("Average Per Ride Value for Drivers") plt.show() ``` ## Average Fare Distribution Across All Drivers (by City Type) ``` urban_fare_dist = urban_fare_total / urban_driver_count suburban_fare_dist = suburban_fare_total / suburban_driver_count rural_fare_dist = rural_fare_total / rural_driver_count # Create a Bar Chart to Express the Above Date driver_type = ["Urban", "Suburban", "Rural"] avg_fare_dist = [urban_fare_dist, suburban_fare_dist, rural_fare_dist] x_axis = np.arange(len(avg_fare_dist)) colors = ["lightskyblue", "gold","lightcoral"] plt.bar(x_axis, avg_fare_dist, color=colors, align='edge') tick_locations = [value+0.4 for value in x_axis] plt.xticks(tick_locations, ["Urban", "Suburban", "Rural"]) plt.ylim(0, max(avg_fare_dist)+1) plt.xlim(-0.25, len(driver_type)) plt.title("Average Fare Distribution Across All Drivers") plt.show() ```
github_jupyter
# torchserve.ipynb This notebook contains code for the portions of the benchmark in [the benchmark notebook](./benchmark.ipynb) that use [TorchServe](https://github.com/pytorch/serve). ``` # Imports go here import json import os import requests import scipy.special import transformers # Fix silly warning messages about parallel tokenizers os.environ['TOKENIZERS_PARALLELISM'] = 'False' # Constants go here INTENT_MODEL_NAME = 'mrm8488/t5-base-finetuned-e2m-intent' SENTIMENT_MODEL_NAME = 'cardiffnlp/twitter-roberta-base-sentiment' QA_MODEL_NAME = 'deepset/roberta-base-squad2' GENERATE_MODEL_NAME = 'gpt2' INTENT_INPUT = { 'context': ("I came here to eat chips and beat you up, " "and I'm all out of chips.") } SENTIMENT_INPUT = { 'context': "We're not happy unless you're not happy." } QA_INPUT = { 'question': 'What is 1 + 1?', 'context': """Addition (usually signified by the plus symbol +) is one of the four basic operations of arithmetic, the other three being subtraction, multiplication and division. The addition of two whole numbers results in the total amount or sum of those values combined. The example in the adjacent image shows a combination of three apples and two apples, making a total of five apples. This observation is equivalent to the mathematical expression "3 + 2 = 5" (that is, "3 plus 2 is equal to 5"). """ } GENERATE_INPUT = { 'prompt_text': 'All your base are' } ``` ## Model Packaging TorchServe requires models to be packaged up as model archive files. Documentation for this process (such as it is) is [here](https://github.com/pytorch/serve/blob/master/README.md#serve-a-model) and [here](https://github.com/pytorch/serve/blob/master/model-archiver/README.md). ### Intent Model The intent model requires the caller to call the pre- and post-processing code manually. Only the model and tokenizer are provided on the model zoo. ``` # First we need to dump the model into a local directory. intent_model = transformers.AutoModelForSeq2SeqLM.from_pretrained( INTENT_MODEL_NAME) intent_tokenizer = transformers.AutoTokenizer.from_pretrained('t5-base') intent_model.save_pretrained('torchserve/intent') intent_tokenizer.save_pretrained('torchserve/intent') ``` Next we wrapped the model in a handler class, located at `./torchserve/handler_intent.py`, which needs to be in its own separate Python file in order for the `torch-model-archiver` utility to work. The following command turns this Python file, plus the data files created by the previous cell, into a model archive (`.mar`) file at `torchserve/model_store/intent.mar`. ``` %%time !mkdir -p torchserve/model_store !torch-model-archiver --model-name intent --version 1.0 \ --serialized-file torchserve/intent/pytorch_model.bin \ --handler torchserve/handler_intent.py \ --extra-files "torchserve/intent/config.json,torchserve/intent/special_tokens_map.json,torchserve/intent/tokenizer_config.json,torchserve/intent/tokenizer.json" \ --export-path torchserve/model_store \ --force ``` ### Sentiment Model The sentiment model operates similarly to the intent model. ``` sentiment_tokenizer = transformers.AutoTokenizer.from_pretrained( SENTIMENT_MODEL_NAME) sentiment_model = ( transformers.AutoModelForSequenceClassification .from_pretrained(SENTIMENT_MODEL_NAME)) sentiment_model.save_pretrained('torchserve/sentiment') sentiment_tokenizer.save_pretrained('torchserve/sentiment') contexts = ['hello', 'world'] input_batch = sentiment_tokenizer(contexts, padding=True, return_tensors='pt') inference_output = sentiment_model(**input_batch) scores = inference_output.logits.detach().numpy() scores = scipy.special.softmax(scores, axis=1).tolist() scores = [{k: v for k, v in zip(['positive', 'neutral', 'negative'], row)} for row in scores] # return scores scores ``` As with the intent model, we created a handler class (located at `torchserve/handler_sentiment.py`), then pass that class and the serialized model from two cells ago through the `torch-model-archiver` utility. ``` %%time !torch-model-archiver --model-name sentiment --version 1.0 \ --serialized-file torchserve/sentiment/pytorch_model.bin \ --handler torchserve/handler_sentiment.py \ --extra-files "torchserve/sentiment/config.json,torchserve/sentiment/special_tokens_map.json,torchserve/sentiment/tokenizer_config.json,torchserve/sentiment/tokenizer.json" \ --export-path torchserve/model_store \ --force ``` ### Question Answering Model The QA model uses a `transformers` pipeline. We squeeze this model into the TorchServe APIs by telling the pipeline to serialize all of its parts to a single directory, then passing the parts that aren't `pytorch_model.bin` in as extra files. At runtime, our custom handler uses the model loading code from `transformers` on the reconstituted model directory. ``` qa_pipeline = transformers.pipeline('question-answering', model=QA_MODEL_NAME) qa_pipeline.save_pretrained('torchserve/qa') ``` As with the previous models, we wrote a class (located at `torchserve/handler_qa.py`), then pass that wrapper class and the serialized model through the `torch-model-archiver` utility. ``` %%time !torch-model-archiver --model-name qa --version 1.0 \ --serialized-file torchserve/qa/pytorch_model.bin \ --handler torchserve/handler_qa.py \ --extra-files "torchserve/qa/config.json,torchserve/qa/merges.txt,torchserve/qa/special_tokens_map.json,torchserve/qa/tokenizer_config.json,torchserve/qa/tokenizer.json,torchserve/qa/vocab.json" \ --export-path torchserve/model_store \ --force data = [QA_INPUT, QA_INPUT] # Preprocessing samples = [qa_pipeline.create_sample(**r) for r in data] generators = [qa_pipeline.preprocess(s) for s in samples] # Inference inference_outputs = ((qa_pipeline.forward(example) for example in batch) for batch in generators) post_results = [qa_pipeline.postprocess(o) for o in inference_outputs] post_results ``` ### Natural Language Generation Model The text generation model is roughly similar to the QA model, albeit with important differences in how the three stages of the pipeline operate. At least model loading is the same. ``` generate_pipeline = transformers.pipeline( 'text-generation', model=GENERATE_MODEL_NAME) generate_pipeline.save_pretrained('torchserve/generate') data = [GENERATE_INPUT, GENERATE_INPUT] pad_token_id = generate_pipeline.tokenizer.eos_token_id json_records = data # preprocess() takes a single input at a time, but we need to do # a batch at a time. input_batch = [generate_pipeline.preprocess(**r) for r in json_records] # forward() takes a single input at a time, but we need to run a # batch at a time. inference_output = [ generate_pipeline.forward(r, pad_token_id=pad_token_id) for r in input_batch] # postprocess() takes a single generation result at a time, but we # need to run a batch at a time. generate_result = [generate_pipeline.postprocess(i) for i in inference_output] generate_result ``` Once again, we wrote a class (located at `torchserve/handler_generate.py`), then pass that wrapper class and the serialized model through the `torch-model-archiver` utility. ``` %%time !torch-model-archiver --model-name generate --version 1.0 \ --serialized-file torchserve/generate/pytorch_model.bin \ --handler torchserve/handler_generate.py \ --extra-files "torchserve/generate/config.json,torchserve/generate/merges.txt,torchserve/generate/special_tokens_map.json,torchserve/generate/tokenizer_config.json,torchserve/generate/tokenizer.json,torchserve/generate/vocab.json" \ --export-path torchserve/model_store \ --force ``` ## Testing Now we can fire up TorchServe and test our models. For some reason, starting TorchServe needs to be done in a proper terminal window. Running the command from this notebook has no effect. The commands to run (from the root of the repository) are: ``` > conda activate ./env > cd notebooks/benchmark/torchserve > torchserve --start --ncs --model-store model_store --ts-config torchserve.properties ``` Then pick up a cup of coffee and a book and wait a while. The startup process is like cold-starting a gas turbine and takes about 10 minutes. Once the server has started, we can test our deployed models by making POST requests. ``` # Probe the management API to verify that TorchServe is running. requests.get('http://127.0.0.1:8081/models').json() port = 8080 intent_result = requests.put( f'http://127.0.0.1:{port}/predictions/intent_en', json.dumps(INTENT_INPUT)).json() print(f'Intent result: {intent_result}') sentiment_result = requests.put( f'http://127.0.0.1:{port}/predictions/sentiment_en', json.dumps(SENTIMENT_INPUT)).json() print(f'Sentiment result: {sentiment_result}') qa_result = requests.put( f'http://127.0.0.1:{port}/predictions/qa_en', json.dumps(QA_INPUT)).json() print(f'Question answering result: {qa_result}') generate_result = requests.put( f'http://127.0.0.1:{port}/predictions/generate_en', json.dumps(GENERATE_INPUT)).json() print(f'Natural language generation result: {generate_result}') ``` ## Cleanup TorchServe consumes many resources even when it isn't doing anything. When you're done running the baseline portion of the benchmark, be sure to shut down the server by running: ``` > torchserve --stop ```
github_jupyter
# Lab 1: Markov Decision Processes - Problem 3 ## Lab Instructions All your answers should be written in this notebook. You shouldn't need to write or modify any other files. **You should execute every block of code to not miss any dependency.** *This project was developed by Peter Chen, Rocky Duan, Pieter Abbeel for the Berkeley Deep RL Bootcamp, August 2017. Bootcamp website with slides and lecture videos: https://sites.google.com/view/deep-rl-bootcamp/. It is adapted from CS188 project materials: http://ai.berkeley.edu/project_overview.html.* -------------------------- ``` import numpy as np, numpy.random as nr, gym import matplotlib.pyplot as plt %matplotlib inline np.set_printoptions(precision=3) ``` ### Problem 3: Sampling-based Tabular Q-Learning So far we have implemented Value Iteration and Policy Iteration, both of which require access to an MDP's dynamics model. This requirement can sometimes be restrictive - for example, if the environment is given as a blackbox physics simulator, then we won't be able to read off the whole transition model. We can however use sampling-based Q-Learning to learn from this type of environments. For this exercise, we will learn to control a Crawler robot. Let's first try some completely random actions to see how the robot moves and familiarize ourselves with Gym environment interface again. ``` from crawler_env import CrawlingRobotEnv env = CrawlingRobotEnv() print("We can inspect the observation space and action space of this Gym Environment") print("-----------------------------------------------------------------------------") print("Action space:", env.action_space) print("It's a discrete space with %i actions to take" % env.action_space.n) print("Each action corresponds to increasing/decreasing the angle of one of the joints") print("We can also sample from this action space:", env.action_space.sample()) print("Another action sample:", env.action_space.sample()) print("Another action sample:", env.action_space.sample()) print("Observation space:", env.observation_space, ", which means it's a 9x13 grid.") print("It's the discretized version of the robot's two joint angles") env = CrawlingRobotEnv( render=True, # turn render mode on to visualize random motion ) # standard procedure for interfacing with a Gym environment cur_state = env.reset() # reset environment and get initial state ret = 0. done = False i = 0 while not done: action = env.action_space.sample() # sample an action randomly next_state, reward, done, info = env.step(action) ret += reward cur_state = next_state i += 1 if i == 1500: break # for the purpose of this visualization, let's only run for 1500 steps # also note the GUI won't close automatically # you can close the visualization GUI with the following method env.close_gui() ``` You will see the random controller can sometimes make progress but it won't get very far. Let's implement Tabular Q-Learning with $\epsilon$-greedy exploration to find a better policy piece by piece. ``` from collections import defaultdict import random # dictionary that maps from state, s, to a numpy array of Q values [Q(s, a_1), Q(s, a_2) ... Q(s, a_n)] # and everything is initialized to 0. q_vals = defaultdict(lambda: np.array([0. for _ in range(env.action_space.n)])) print("Q-values for state (0, 0): %s" % q_vals[(0, 0)], "which is a list of Q values for each action") print("As such, the Q value of taking action 3 in state (1,2), i.e. Q((1,2), 3), can be accessed by q_vals[(1,2)][3]:", q_vals[(1,2)][3]) def eps_greedy(q_vals, eps, state): """ Inputs: q_vals: q value tables eps: epsilon state: current state Outputs: random action with probability of eps; argmax Q(s, .) with probability of (1-eps) """ # you might want to use random.random() to implement random exploration # number of actions can be read off from len(q_vals[state]) import random # YOUR CODE HERE # MY CODE ------------------------------------------------------------------- if random.random() <= eps: return np.random.randint(0, len(q_vals[state])) return np.argmax(q_vals[state]) #---------------------------------------------------------------------------- # test 1 dummy_q = defaultdict(lambda: np.array([0. for _ in range(env.action_space.n)])) test_state = (0, 0) dummy_q[test_state][0] = 10. trials = 100000 sampled_actions = [ int(eps_greedy(dummy_q, 0.3, test_state)) for _ in range(trials) ] freq = np.sum(np.array(sampled_actions) == 0) / trials tgt_freq = 0.3 / env.action_space.n + 0.7 if np.isclose(freq, tgt_freq, atol=1e-2): print("Test1 passed") else: print("Test1: Expected to select 0 with frequency %.2f but got %.2f" % (tgt_freq, freq)) # test 2 dummy_q = defaultdict(lambda: np.array([0. for _ in range(env.action_space.n)])) test_state = (0, 0) dummy_q[test_state][2] = 10. trials = 100000 sampled_actions = [ int(eps_greedy(dummy_q, 0.5, test_state)) for _ in range(trials) ] freq = np.sum(np.array(sampled_actions) == 2) / trials tgt_freq = 0.5 / env.action_space.n + 0.5 if np.isclose(freq, tgt_freq, atol=1e-2): print("Test2 passed") else: print("Test2: Expected to select 2 with frequency %.2f but got %.2f" % (tgt_freq, freq)) ``` Next we will implement Q learning update. After we observe a transition $s, a, s', r$, $$\textrm{target}(s') = R(s,a,s') + \gamma \max_{a'} Q_{\theta_k}(s',a')$$ $$Q_{k+1}(s,a) \leftarrow (1-\alpha) Q_k(s,a) + \alpha \left[ \textrm{target}(s') \right]$$ ``` def q_learning_update(gamma, alpha, q_vals, cur_state, action, next_state, reward): """ Inputs: gamma: discount factor alpha: learning rate q_vals: q value table cur_state: current state action: action taken in current state next_state: next state results from taking `action` in `cur_state` reward: reward received from this transition Performs in-place update of q_vals table to implement one step of Q-learning """ # YOUR CODE HERE # MY CODE ------------------------------------------------------------------- target = reward + gamma*np.max(q_vals[next_state]) q_vals[cur_state][action] -= alpha*(q_vals[cur_state][action] - target) #---------------------------------------------------------------------------- # testing your q_learning_update implementation dummy_q = q_vals.copy() test_state = (0, 0) test_next_state = (0, 1) dummy_q[test_state][0] = 10. dummy_q[test_next_state][1] = 10. q_learning_update(0.9, 0.1, dummy_q, test_state, 0, test_next_state, 1.1) tgt = 10.01 if np.isclose(dummy_q[test_state][0], tgt,): print("Test passed") else: print("Q(test_state, 0) is expected to be %.2f but got %.2f" % (tgt, dummy_q[test_state][0])) # now with the main components tested, we can put everything together to create a complete q learning agent env = CrawlingRobotEnv() q_vals = defaultdict(lambda: np.array([0. for _ in range(env.action_space.n)])) gamma = 0.9 alpha = 0.1 eps = 0.5 cur_state = env.reset() def greedy_eval(): """evaluate greedy policy w.r.t current q_vals""" test_env = CrawlingRobotEnv(horizon=np.inf) prev_state = test_env.reset() ret = 0. done = False H = 100 for i in range(H): action = np.argmax(q_vals[prev_state]) state, reward, done, info = test_env.step(action) ret += reward prev_state = state return ret / H for itr in range(300000): # YOUR CODE HERE # Hint: use eps_greedy & q_learning_update # MY CODE -------------------------------------------------------------------- action = eps_greedy(q_vals, eps, cur_state) next_state, reward, done, info = env.step(action) q_learning_update(gamma, alpha, q_vals, cur_state, action, next_state, reward) cur_state = next_state #----------------------------------------------------------------------------- if itr % 50000 == 0: # evaluation print("Itr %i # Average speed: %.2f" % (itr, greedy_eval())) # at the end of learning your crawler should reach a speed of >= 3 ``` After the learning is successful, we can visualize the learned robot controller. Remember we learn this just from interacting with the environment instead of peeking into the dynamics model! ``` env = CrawlingRobotEnv(render=True, horizon=500) prev_state = env.reset() ret = 0. done = False while not done: action = np.argmax(q_vals[prev_state]) state, reward, done, info = env.step(action) ret += reward prev_state = state # you can close the visualization GUI with the following method env.close_gui() ```
github_jupyter
# Exercise 6-3 ## LSTM The following two cells will create a LSTM cell with one neuron. We scale the output of the LSTM linear and add a bias. Then the output will be wrapped by a sigmoid activation. The goal is to predict a time series where every $n^{th}$ ($5^{th}$ in the current example) element is 1 and all others are 0. a) Please read and understand the source code below. b) Consult the output of the predictions. What do you observe? How does the LSTM manage to predict the next element in the sequence? ``` import tensorflow as tf import numpy as np from matplotlib import pyplot as plt tf.reset_default_graph() tf.set_random_seed(12314) epochs=50 zero_steps = 5 learning_rate = 0.01 lstm_neurons = 1 out_dim = 1 num_features = 1 batch_size = zero_steps window_size = zero_steps*2 time_steps = 5 x = tf.placeholder(tf.float32, [None, window_size, num_features], 'x') y = tf.placeholder(tf.float32, [None, out_dim], 'y') lstm = tf.nn.rnn_cell.LSTMCell(lstm_neurons) state = lstm.zero_state(batch_size, dtype=tf.float32) regression_w = tf.Variable(tf.random_normal([lstm_neurons])) regression_b = tf.Variable(tf.random_normal([out_dim])) outputs, state = tf.contrib.rnn.static_rnn(lstm, tf.unstack(x, window_size, 1), state) output = outputs[-1] predicted = tf.nn.sigmoid(output * regression_w + regression_b) cost = tf.reduce_mean(tf.losses.mean_squared_error(y, predicted)) optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost) forget_gate = output.op.inputs[1].op.inputs[0].op.inputs[0].op.inputs[0] input_gate = output.op.inputs[1].op.inputs[0].op.inputs[1].op.inputs[0] cell_candidates = output.op.inputs[1].op.inputs[0].op.inputs[1].op.inputs[1] output_gate_sig = output.op.inputs[0] output_gate_tanh = output.op.inputs[1] X = [ [[ (shift-n) % zero_steps == 0 ] for n in range(window_size) ] for shift in range(batch_size) ] Y = [[ shift % zero_steps == 0 ] for shift in range(batch_size) ] with tf.Session() as sess: sess.run(tf.initializers.global_variables()) loss = 1 epoch = 0 while loss >= 1e-5: epoch += 1 _, loss = sess.run([optimizer, cost], {x:X, y:Y}) if epoch % (epochs//10) == 0: print("loss %.5f" % (loss), end='\t\t\r') print() outs, stat, pred, fg, inpg, cell_cands, outg_sig, outg_tanh = sess.run([outputs, state, predicted, forget_gate, input_gate, cell_candidates, output_gate_sig, output_gate_tanh], {x:X, y:Y}) outs = np.asarray(outs) for batch in reversed(range(batch_size)): print("input:") print(np.asarray(X)[batch].astype(int).reshape(-1)) print("forget\t\t%.4f\ninput gate\t%.4f\ncell cands\t%.4f\nout gate sig\t%.4f\nout gate tanh\t%.4f\nhidden state\t%.4f\ncell state\t%.4f\npred\t\t%.4f\n\n" % ( fg[batch,0], inpg[batch,0], cell_cands[batch,0], outg_sig[batch,0], outg_tanh[batch,0], stat.h[batch,0], stat.c[batch,0], pred[batch,0])) ``` LSTM gates: ![grafik.png](attachment:grafik.png) (image source: https://www.stratio.com/wp-content/uploads/2017/10/6-1.jpg) ### Answers * When the current element is 1, then the forget-gate tells "forget" (value is close to 0) $\Rightarrow$ Reset cell state * The cell state (long term memory) decreases until it reached some certain point. Then the hidden state is activated and thus the prediction is close to 1. * The sigomoid output cell ($o_t$) is always close to 1 $\Rightarrow$ the hidden layer directly dependent on the cell state (no short term memory is used). * The input gate ($i_t$) is always close to 1 thus the cell candidates ($c_t$) will always be accepted * The cell candidates ($c_t$) are mainly dependent on $x_t$. It is close to 1 when $x_t$ is one (resetting the counter) and negative if $x_t$ is 0 (decreasing the counter). Note that with other initial values (different seed) it may result in a different local minimum (the counter could increase, $h_t$ could be negative and be scaled negative, ...)
github_jupyter
# 2020L-WUM Praca domowa 2 Kod: **Bartłomiej Eljasiak** ## Załadowanie bibliotek Z tych bibliotek będziemy korzystać w wielu miejscach, jednak w niektórych fragmentach kodu znajdą się dodatkowe importowania, lecz w takich sytuacjach użytek załadowanej biblioteki jest ograniczony do 'chunku', w którym została załadowana. ``` import pandas as pd import seaborn as sns import numpy as np import sklearn ``` ## Wczytanie danych ``` # local version _data=pd.read_csv('allegro-api-transactions.csv') #online version #_data = pd.read_csv('https://www.dropbox.com/s/360xhh2d9lnaek3/allegro-api-transactions.csv?dl=1') cdata=_data.copy() ``` ### Przyjrzenie się danym ``` cdata.head() ``` # Obróbka danych ``` len(cdata.it_location.unique()) cdata.info() ``` Na pierwszy rzut oka nie mamy żadnych braków w danych, co znacznie ułatwia nam prace. # Kodowanie zmiennych kategorycznych Wiemy, że naszym targetem będzie `price`, chcemy więc w tym akapicie zamienić wszystkie zmienne kategoryczne, z których w dalszej części kodu będziemy korzystać, na zmienne liczbowe. Skuteczna zamiana przy użyciu odpowiednich metod takich jak **target encoding** oraz **on-hot encoding** pozwoli nam przekształcić obecne informacje, tak byśmy mogli je wykorzystać przy operacjach matematycznych. Pominiemy jednak w naszych przekształceniach kolumnę `categories`. ## Target encoding dla `it_location` ``` import category_encoders y=cdata.price te = category_encoders.target_encoder.TargetEncoder(cdata.it_location, smoothing=100) encoded = te.fit_transform(cdata.it_location,y) encoded ``` ## Różne rodzaje zakodowania kolumny `main_category` ## One-hot Coding ``` from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder # integer encode le = LabelEncoder() integer_encoded = le.fit_transform(cdata.main_category) print(integer_encoded) # binary encode onehot_encoder = OneHotEncoder(categories='auto',sparse=False) integer_encoded = integer_encoded.reshape(len(integer_encoded), 1) onehot_encoded = onehot_encoder.fit_transform(integer_encoded) print(onehot_encoded) ``` Zamieniliśmy w ten sposób kolumnę kategoryczną na 26 kolumn o wartościach 0 lub 1. Nie jest to złe rozwiązanie, jednak stanowczo zwiększa rozmiar naszej ramki i wydłża czas uczenia się modelu. Prawdopodobnie może istnieć lepsze rozwiązanie. ## Helmert Coding Documentation: [scikit-learn](http://contrib.scikit-learn.org/categorical-encoding/helmert.html) ``` # Pobieramy go z category_encoders helmert_encoder = category_encoders.HelmertEncoder() helmert_encoded = helmert_encoder.fit_transform(cdata.main_category) #showing only first 5 encoded rows print(helmert_encoded.loc[1:5,:].transpose()) ``` ## Backward Difference Coding Documentation: [scikit-learn](http://contrib.scikit-learn.org/categorical-encoding/backward_difference.html#backward-difference-coding) ``` # Pobieramy go z category_encoders back_diff_encoder = category_encoders.BackwardDifferenceEncoder() back_diff_encoded = back_diff_encoder.fit_transform(cdata.main_category) #showing only first 5 encoded rows print(back_diff_encoded.loc[1:5,:].transpose()) ``` # Uzupełnianie braków ### Wybranie danych z ramki Dalej będziemy pracowac tylko na 3 kolumnach danych, ograniczę więc je dla przejżystości poczynań. ``` data_selected= cdata.loc[:,['price','it_seller_rating','it_quantity']] data_selected.head() ``` ### Usunięcie danych z kolumny Dane z kolumny będziemy usuwać funkcją `df.column.sample(frac)` gdzie `frac` będzie oznaczać procent danych, które chcemy zatrzymać. Gwarantuje nam to w miarę losowe usunięcie danych, które powinno być wystarczające do dalszych działań. ``` cdata.price.sample(frac=0.9) ``` ### Ocena skutecznosci imputacji Do oceny skuteczności podanych algorytmów imputacji danych musimy przyjąć jakis sposób liczenia ich. Zgodnie z sugestią prowadzącej skorszystam z [RMSE](https://en.wikipedia.org/wiki/Root_mean_square) czyli root mean square error. Nazwa powinna przybliżyć sposób, którm RMSE jest wyznaczane, jednak ciekawskich zachęcam do wejścia w link. ## Imputacja danych Napiszmy więc funkcje, która pozwoli nam stestować wybrany sposób imputacji. ``` from sklearn.experimental import enable_iterative_imputer from sklearn.impute import IterativeImputer from sklearn.metrics import mean_squared_error def test_imputation(imputer,iterations=10): _resoults=[] # we always use the same data, so it's taken globally for i in range(iterations): test_data = data_selected.copy() test_data.it_seller_rating = test_data.it_seller_rating.sample(frac = 0.9) data_imputed = pd.DataFrame(imputer.fit_transform(test_data)) _resoults.append(np.sqrt(mean_squared_error(data_selected,data_imputed))) return _resoults ``` I to niech będzie przykład działania takiej funkcji ``` imputer = IterativeImputer(max_iter=10,random_state=0) RMSE_list = test_imputation(imputer,20) print("Średnie RMSE wynosi", round(np.mean(RMSE_list))) print('Odchylenie standardowe RMSE wynosi: ', round(np.std(RMSE_list))) RMSE_list ``` Odchylenie standardowe jest dosyć małe, więc metodę imputacji uważam za skuteczną. Chętnym polecam przetestowanie tej funkcji dla innych typów imputacji oraz zmiennej liczbie iteracji. ### Usuwanie danych z wielu kolumn Powtórzmy uprzedni przykład dodając drobną modyfikacje. Tym razem będziemy usuwali dane zarówno z `it_seller_rating` jak i `it_quantity`. Napiszmy do tego odpowiednią funkcję i zobaczmy wyniki. ``` def test_imputation2(imputer,iterations=10): _resoults=[] # we always use the same data, so it's taken globally for i in range(iterations): test_data = data_selected.copy() test_data.it_seller_rating = test_data.it_seller_rating.sample(frac = 0.9) test_data.it_quantity = test_data.it_quantity.sample(frac = 0.9) data_imputed = pd.DataFrame(imputer.fit_transform(test_data)) _resoults.append(np.sqrt(mean_squared_error(data_selected,data_imputed))) return _resoults imputer = IterativeImputer(max_iter=10,random_state=0) RMSE_list = test_imputation2(imputer,20) print("Średnie RMSE wynosi", round(np.mean(RMSE_list))) print('Odchylenie standardowe RMSE wynosi: ', round(np.std(RMSE_list))) RMSE_list ``` Tak jak moglibyśmy się spodziewać, średni błąd jest większy w przypadku gdy usuneliśmy więcej danych. Ponownie zachęcam do powtórzenia obliczeń i sprawdzenia wyników. ### Spojrzenie na imputacje typu `IterativeImputer` Wykorzystałem pewien szczególny sposób imputacji danych tzn `IterativeImputer`, o którym nie wspomniałem za dużo podczas korzystania z niego. Chciałbym jednka w tym miejscu go bardziej szczegółowo przedstawić oraz zobaczyć jak liczba iteracji wpływa na jakość imputacji. Nasz imputer będę chiał przetestować dla całego spektrum wartości `max_iter` i dokładnie to zrobię w poniższej pętli. **uwaga poniższy kod wykonuję się dosyć długo** ``` upper_iter_limit = 30 lower_iter_limit = 5 imputation_iterations = 10 mean_RMSE ={ "single": [], "multi": [], } for imputer_iterations in range(lower_iter_limit,upper_iter_limit,2): _resoults_single = [] _resoults_multi = [] imputer = IterativeImputer(max_iter=imputer_iterations,random_state=0) print("max_iter: ", imputer_iterations, "/",upper_iter_limit) # Data missing from single columns _resoults_multi.append(test_imputation(imputer,imputation_iterations)) # Data missing from multiple column _resoults_single.append(test_imputation2(imputer,imputation_iterations)) mean_RMSE['single'].append(np.mean(_resoults_single)) mean_RMSE['multi'].append(np.mean(_resoults_multi)) ``` Przyjrzyjmy się wynikom. ``` mean_RMSE ``` ### Komentarz Co ciekawe nie widać dużej różnicy w błędzie imputacji dla różnych współczynników imputacji. Co wiecej nie ma, żadnego typu korelacji, a więc nie opłaca się brać dużego współczynnika iteracji, ponieważ wcale nie daje on lepszych wyników. Pragnę jednak ograniczyć moje wnioski do tego zbioru, ponieważ nie dysponuję w tym momencie wystarczającą liczbą informacji by twierdzić, że jest to zjawisko globalne. Niech ten przykład posłuży jako pretekst do dalszych dyskusji na ten temat.
github_jupyter
``` import requests import pandas as pd from bs4 import BeautifulSoup import string import re import nltk import json import matplotlib.pyplot as plt import numpy as np def get_text(url): response = requests.get(url) content = response.content parser = BeautifulSoup(content,'html.parser') return(parser.text) def clean_text(script): script_clean=script.strip() script_clean=script_clean.replace("\n","") script_clean=script_clean.replace("\r"," ") script_clean=script_clean.replace("\r\n","") script_clean=re.sub("([\(\[]).*?([\)\]])", "", script_clean) script_clean=re.sub(r'\.([a-zA-Z])', r'. \1', script_clean) #remove missing whitespace between character lines. script_clean=re.sub(r'\!([a-zA-Z])', r'! \1', script_clean) script_clean=re.sub(r'\?([a-zA-Z])', r'? \1', script_clean) return(script_clean) def get_cast(script_clean): tokens=nltk.word_tokenize(script_clean) cast=[] for word in tokens: if re.search("\\b[A-Z]{3,}\\b", word) is not None: cast.append(word) return(list(set(cast))) script=get_text('http://www.chakoteya.net/DS9/575.htm') script_clean=clean_text(script) def get_lines(script_clean, cast): split_script=script_clean.split(':') lines_dict=dict.fromkeys(cast) for cast_member in cast: lines=[] for i in range(len(split_script)-1): if cast_member in split_script[i].strip().split(" "): line=split_script[i+1].strip().split(" ") line=[word for word in line if word != ''] for member in cast: if member in line: line.remove(member) line=' '.join(line) lines.append(line) lines_dict[cast_member]=lines return(lines_dict) def get_page_links(): top_links=["http://www.chakoteya.net/DS9/episodes.htm", "http://www.chakoteya.net/StarTrek/episodes.htm", "http://www.chakoteya.net/NextGen/episodes.htm", "http://www.chakoteya.net/Voyager/episode_listing.htm", "http://www.chakoteya.net/Enterprise/episodes.htm"] short_links=["http://www.chakoteya.net/DS9/", "http://www.chakoteya.net/StarTrek/", "http://www.chakoteya.net/NextGen/", "http://www.chakoteya.net/Voyager/", "http://www.chakoteya.net/Enterprise/"] links_list=[] names_list=[] for i, link in enumerate(top_links): response = requests.get(link) content = response.content parser = BeautifulSoup(content,'html.parser') urls = parser.find_all('a') for page in urls: links_list.append(short_links[i]+str(page.get('href'))) name=page.text name=name.replace('\r\n',' ') names_list.append(name) links_to_remove=['http://www.chakoteya.net/Voyager/fortyseven.htm', 'http://www.chakoteya.net/Voyager/LineCountS1-S3.htm', 'http://www.chakoteya.net/Voyager/LineCountS4-S7.htm', 'http://www.chakoteya.net/Enterprise/fortyseven.htm', ] links_list=[link for link in links_list if (link.endswith('.htm')) & (link not in links_to_remove)] return(links_list) # links_list page_links=get_page_links() len(page_links) DS9_links = page_links[0:173] TOS_links = page_links[173:253] TAS_links = page_links[253:275] TNG_links = page_links[275:451] VOY_links = page_links[451:611] ENT_links = page_links[611:708] links=[DS9_links, TOS_links, TAS_links, TNG_links, VOY_links, ENT_links] links_names=['DS9', 'TOS', 'TAS', 'TNG', 'VOY', 'ENT'] links=[DS9_links, TOS_links, TAS_links, TNG_links, VOY_links, ENT_links] all_series_scripts={} for i,series in enumerate(links): series_name=str(links_names[i]) print(series_name) all_series_scripts[series_name]={} episode_script={} all_cast=[] for j,link in enumerate(series): episode="episode "+str(j) text=get_text(link) episode_script[episode]=text all_series_scripts[series_name]=episode_script print(all_series_scripts) with open('all_scripts_raw.json', 'w') as data: json.dump(all_series_scripts, data) with open('all_scripts_raw.json', 'r') as data: all_scripts_raw = json.load(data) links_names=['DS9', 'TOS', 'TAS', 'TNG', 'VOY', 'ENT'] all_series_lines={} for i,series in enumerate(links_names): print(series) series_name=str(links_names[i]) all_series_lines[series_name]={} all_lines_dict={} all_cast=[] #for j,episode in enumerate(all_series_scripts[series]): for j,episode in enumerate(all_scripts_raw[series]): #script=all_series_scripts[series][episode] script=all_scripts_raw[series][episode] cleaned_script=clean_text(script) cast=get_cast(cleaned_script) for member in cast: if member not in all_cast: all_cast.append(member) lines_dict=get_lines(cleaned_script,all_cast) all_lines_dict[episode]=lines_dict all_series_lines[series]=all_lines_dict print(all_series_lines) with open('all_series_lines.json', 'w') as data: json.dump(all_series_lines, data) with open('all_series_lines.json', 'r') as data: all_series_lines = json.load(data) #checking against source to make sure the character lines #appear in the correct episode all_series_lines['TNG']['episode 30']['LAFORGE'] #writing the corrected df # all_series_lines = pd.DataFrame(data=all_series_lines) # all_series_lines.to_csv(r'C:\Users\Eric\Desktop\Star_Trek_Scripts-master\Star_Trek_Scripts-master\data\all_series_lines.csv') #when I wrote it to a df spock ended up getting lines??????????????? episodes=all_series_lines['TNG'].keys() total_lines_counts={} line_counts_by_episode={} for i,ep in enumerate(episodes): if i == 0: episode="Episode 1 & 2" else: episode="Episode "+str(i+2) line_counts_by_episode[episode]={} if all_series_lines['TNG'][ep] is not np.NaN: for member in list(all_series_lines['TNG'][ep].keys()): line_counts_by_episode[episode][member]=len(all_series_lines['TNG'][ep][member]) if member in total_lines_counts.keys(): total_lines_counts[member]=total_lines_counts[member]+len(all_series_lines['TNG'][ep][member]) else: total_lines_counts[member]=len(all_series_lines['TNG'][ep][member]) #checking to make sure Spock doesn't appear, since that was an issue before TNG_df_byep = pd.DataFrame(line_counts_by_episode) # TNG_df_byep.loc['SPOCK'] TNG_df=pd.DataFrame(list(total_lines_counts.items()), columns=['Character','No. of Lines']) Top20=TNG_df.sort_values(by='No. of Lines', ascending=False).head(20) Top20.plot.bar(x='Character',y='No. of Lines') plt.show() Top20['Character'] export_TOP20 = Top20.to_csv(r'C:\Users\Eric\startrek-dash-app\assets\top20') export_vis_TNG = TNG_df_byep.to_csv(r'C:\Users\Eric\startrek-dash-app\assets\bar_chart_TNG') ```
github_jupyter
# Classifying Fashion-MNIST Now it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world. <img src='assets/fashion-mnist-sprite.png' width=500px> In this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebooks though as you work through this. First off, let's load the dataset through torchvision. ``` import torch from torchvision import datasets, transforms import helper # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) # Download and load the training data trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Download and load the test data testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) ``` Here we can see one of the images. ``` image, label = next(iter(trainloader)) helper.imshow(image[0,:]); ``` ## Building the network Here you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits or log-softmax from the forward pass. It's up to you how many layers you add and the size of those layers. ``` # TODO: Define your network architecture here from torch import nn n_hidden1 = 1024 n_hidden2 = 128 n_hidden3 = 64 model = nn.Sequential(nn.Linear(784,n_hidden1), nn.ReLU(), nn.Linear(n_hidden1,n_hidden2), nn.ReLU(), nn.Linear(n_hidden2,n_hidden3), nn.ReLU(), nn.Linear(n_hidden3, 10), nn.LogSoftmax(dim=1)) ``` # Train the network Now you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.html#loss-functions) ( something like `nn.CrossEntropyLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`). Then write the training code. Remember the training pass is a fairly straightforward process: * Make a forward pass through the network to get the logits * Use the logits to calculate the loss * Perform a backward pass through the network with `loss.backward()` to calculate the gradients * Take a step with the optimizer to update the weights By adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4. ``` # TODO: Create the network, define the criterion and optimizer from torch import optim criterion = nn.NLLLoss() optimizer = optim.SGD(model.parameters(),lr=0.007) # TODO: Train the network here data = iter(trainloader) epochs = 10 for e in range(epochs): running_loss = 0 for images,labels in trainloader: input=images.view(images.shape[0],784) optimizer.zero_grad() output = model(input) loss = criterion(output,labels) loss.backward() optimizer.step() running_loss+=loss.item() else: print "Epoch: ", e, "Training loss: ", running_loss/len(trainloader) %matplotlib inline %config InlineBackend.figure_format = 'retina' import helper # Test out your network! dataiter = iter(testloader) images, labels = dataiter.next() img = images[0] # Convert 2D image to 1D vector img = img.resize_(1, 784) # TODO: Calculate the class probabilities (softmax) for img with torch.no_grad(): logprobs = model(img) ps = torch.exp(logprobs) # Plot the image and probabilities helper.view_classify(img.resize_(1, 28, 28), ps, version='Fashion') ```
github_jupyter
<a href="https://colab.research.google.com/github/reihaneh-torkzadehmahani/MyDPGAN/blob/master/AdvancedDPCGAN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ## differential_privacy.analysis.rdp_accountant ``` # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """RDP analysis of the Sampled Gaussian Mechanism. Functionality for computing Renyi differential privacy (RDP) of an additive Sampled Gaussian Mechanism (SGM). Its public interface consists of two methods: compute_rdp(q, noise_multiplier, T, orders) computes RDP for SGM iterated T times. get_privacy_spent(orders, rdp, target_eps, target_delta) computes delta (or eps) given RDP at multiple orders and a target value for eps (or delta). Example use: Suppose that we have run an SGM applied to a function with l2-sensitivity 1. Its parameters are given as a list of tuples (q1, sigma1, T1), ..., (qk, sigma_k, Tk), and we wish to compute eps for a given delta. The example code would be: max_order = 32 orders = range(2, max_order + 1) rdp = np.zeros_like(orders, dtype=float) for q, sigma, T in parameters: rdp += rdp_accountant.compute_rdp(q, sigma, T, orders) eps, _, opt_order = rdp_accountant.get_privacy_spent(rdp, target_delta=delta) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import sys import numpy as np from scipy import special import six ######################## # LOG-SPACE ARITHMETIC # ######################## def _log_add(logx, logy): """Add two numbers in the log space.""" a, b = min(logx, logy), max(logx, logy) if a == -np.inf: # adding 0 return b # Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b) return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1) def _log_sub(logx, logy): """Subtract two numbers in the log space. Answer must be non-negative.""" if logx < logy: raise ValueError("The result of subtraction must be non-negative.") if logy == -np.inf: # subtracting 0 return logx if logx == logy: return -np.inf # 0 is represented as -np.inf in the log space. try: # Use exp(x) - exp(y) = (exp(x - y) - 1) * exp(y). return math.log( math.expm1(logx - logy)) + logy # expm1(x) = exp(x) - 1 except OverflowError: return logx def _log_print(logx): """Pretty print.""" if logx < math.log(sys.float_info.max): return "{}".format(math.exp(logx)) else: return "exp({})".format(logx) def _compute_log_a_int(q, sigma, alpha): """Compute log(A_alpha) for integer alpha. 0 < q < 1.""" assert isinstance(alpha, six.integer_types) # Initialize with 0 in the log space. log_a = -np.inf for i in range(alpha + 1): log_coef_i = (math.log(special.binom(alpha, i)) + i * math.log(q) + (alpha - i) * math.log(1 - q)) s = log_coef_i + (i * i - i) / (2 * (sigma**2)) log_a = _log_add(log_a, s) return float(log_a) def _compute_log_a_frac(q, sigma, alpha): """Compute log(A_alpha) for fractional alpha. 0 < q < 1.""" # The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are # initialized to 0 in the log space: log_a0, log_a1 = -np.inf, -np.inf i = 0 z0 = sigma**2 * math.log(1 / q - 1) + .5 while True: # do ... until loop coef = special.binom(alpha, i) log_coef = math.log(abs(coef)) j = alpha - i log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q) log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q) log_e0 = math.log(.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma)) log_e1 = math.log(.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma)) log_s0 = log_t0 + (i * i - i) / (2 * (sigma**2)) + log_e0 log_s1 = log_t1 + (j * j - j) / (2 * (sigma**2)) + log_e1 if coef > 0: log_a0 = _log_add(log_a0, log_s0) log_a1 = _log_add(log_a1, log_s1) else: log_a0 = _log_sub(log_a0, log_s0) log_a1 = _log_sub(log_a1, log_s1) i += 1 if max(log_s0, log_s1) < -30: break return _log_add(log_a0, log_a1) def _compute_log_a(q, sigma, alpha): """Compute log(A_alpha) for any positive finite alpha.""" if float(alpha).is_integer(): return _compute_log_a_int(q, sigma, int(alpha)) else: return _compute_log_a_frac(q, sigma, alpha) def _log_erfc(x): """Compute log(erfc(x)) with high accuracy for large x.""" try: return math.log(2) + special.log_ndtr(-x * 2**.5) except NameError: # If log_ndtr is not available, approximate as follows: r = special.erfc(x) if r == 0.0: # Using the Laurent series at infinity for the tail of the erfc function: # erfc(x) ~ exp(-x^2-.5/x^2+.625/x^4)/(x*pi^.5) # To verify in Mathematica: # Series[Log[Erfc[x]] + Log[x] + Log[Pi]/2 + x^2, {x, Infinity, 6}] return (-math.log(math.pi) / 2 - math.log(x) - x**2 - .5 * x**-2 + .625 * x**-4 - 37. / 24. * x**-6 + 353. / 64. * x**-8) else: return math.log(r) def _compute_delta(orders, rdp, eps): """Compute delta given a list of RDP values and target epsilon. Args: orders: An array (or a scalar) of orders. rdp: A list (or a scalar) of RDP guarantees. eps: The target epsilon. Returns: Pair of (delta, optimal_order). Raises: ValueError: If input is malformed. """ orders_vec = np.atleast_1d(orders) rdp_vec = np.atleast_1d(rdp) if len(orders_vec) != len(rdp_vec): raise ValueError("Input lists must have the same length.") deltas = np.exp((rdp_vec - eps) * (orders_vec - 1)) idx_opt = np.argmin(deltas) return min(deltas[idx_opt], 1.), orders_vec[idx_opt] def _compute_eps(orders, rdp, delta): """Compute epsilon given a list of RDP values and target delta. Args: orders: An array (or a scalar) of orders. rdp: A list (or a scalar) of RDP guarantees. delta: The target delta. Returns: Pair of (eps, optimal_order). Raises: ValueError: If input is malformed. """ orders_vec = np.atleast_1d(orders) rdp_vec = np.atleast_1d(rdp) if len(orders_vec) != len(rdp_vec): raise ValueError("Input lists must have the same length.") eps = rdp_vec - math.log(delta) / (orders_vec - 1) idx_opt = np.nanargmin(eps) # Ignore NaNs return eps[idx_opt], orders_vec[idx_opt] def _compute_rdp(q, sigma, alpha): """Compute RDP of the Sampled Gaussian mechanism at order alpha. Args: q: The sampling rate. sigma: The std of the additive Gaussian noise. alpha: The order at which RDP is computed. Returns: RDP at alpha, can be np.inf. """ if q == 0: return 0 if q == 1.: return alpha / (2 * sigma**2) if np.isinf(alpha): return np.inf return _compute_log_a(q, sigma, alpha) / (alpha - 1) def compute_rdp(q, noise_multiplier, steps, orders): """Compute RDP of the Sampled Gaussian Mechanism. Args: q: The sampling rate. noise_multiplier: The ratio of the standard deviation of the Gaussian noise to the l2-sensitivity of the function to which it is added. steps: The number of steps. orders: An array (or a scalar) of RDP orders. Returns: The RDPs at all orders, can be np.inf. """ if np.isscalar(orders): rdp = _compute_rdp(q, noise_multiplier, orders) else: rdp = np.array( [_compute_rdp(q, noise_multiplier, order) for order in orders]) return rdp * steps def get_privacy_spent(orders, rdp, target_eps=None, target_delta=None): """Compute delta (or eps) for given eps (or delta) from RDP values. Args: orders: An array (or a scalar) of RDP orders. rdp: An array of RDP values. Must be of the same length as the orders list. target_eps: If not None, the epsilon for which we compute the corresponding delta. target_delta: If not None, the delta for which we compute the corresponding epsilon. Exactly one of target_eps and target_delta must be None. Returns: eps, delta, opt_order. Raises: ValueError: If target_eps and target_delta are messed up. """ if target_eps is None and target_delta is None: raise ValueError( "Exactly one out of eps and delta must be None. (Both are).") if target_eps is not None and target_delta is not None: raise ValueError( "Exactly one out of eps and delta must be None. (None is).") if target_eps is not None: delta, opt_order = _compute_delta(orders, rdp, target_eps) return target_eps, delta, opt_order else: eps, opt_order = _compute_eps(orders, rdp, target_delta) return eps, target_delta, opt_order ``` ## dp query ``` # Copyright 2018, The TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """An interface for differentially private query mechanisms. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc class DPQuery(object): """Interface for differentially private query mechanisms.""" __metaclass__ = abc.ABCMeta @abc.abstractmethod def initial_global_state(self): """Returns the initial global state for the DPQuery.""" pass @abc.abstractmethod def derive_sample_params(self, global_state): """Given the global state, derives parameters to use for the next sample. Args: global_state: The current global state. Returns: Parameters to use to process records in the next sample. """ pass @abc.abstractmethod def initial_sample_state(self, global_state, tensors): """Returns an initial state to use for the next sample. Args: global_state: The current global state. tensors: A structure of tensors used as a template to create the initial sample state. Returns: An initial sample state. """ pass @abc.abstractmethod def accumulate_record(self, params, sample_state, record): """Accumulates a single record into the sample state. Args: params: The parameters for the sample. sample_state: The current sample state. record: The record to accumulate. Returns: The updated sample state. """ pass @abc.abstractmethod def get_noised_result(self, sample_state, global_state): """Gets query result after all records of sample have been accumulated. Args: sample_state: The sample state after all records have been accumulated. global_state: The global state. Returns: A tuple (result, new_global_state) where "result" is the result of the query and "new_global_state" is the updated global state. """ pass ``` ## gausian query ``` # Copyright 2018, The TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implements DPQuery interface for Gaussian average queries. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import tensorflow as tf nest = tf.contrib.framework.nest class GaussianSumQuery(DPQuery): """Implements DPQuery interface for Gaussian sum queries. Accumulates clipped vectors, then adds Gaussian noise to the sum. """ # pylint: disable=invalid-name _GlobalState = collections.namedtuple( '_GlobalState', ['l2_norm_clip', 'stddev']) def __init__(self, l2_norm_clip, stddev): """Initializes the GaussianSumQuery. Args: l2_norm_clip: The clipping norm to apply to the global norm of each record. stddev: The stddev of the noise added to the sum. """ self._l2_norm_clip = l2_norm_clip self._stddev = stddev def initial_global_state(self): """Returns the initial global state for the GaussianSumQuery.""" return self._GlobalState(float(self._l2_norm_clip), float(self._stddev)) def derive_sample_params(self, global_state): """Given the global state, derives parameters to use for the next sample. Args: global_state: The current global state. Returns: Parameters to use to process records in the next sample. """ return global_state.l2_norm_clip def initial_sample_state(self, global_state, tensors): """Returns an initial state to use for the next sample. Args: global_state: The current global state. tensors: A structure of tensors used as a template to create the initial sample state. Returns: An initial sample state. """ del global_state # unused. return nest.map_structure(tf.zeros_like, tensors) def accumulate_record(self, params, sample_state, record): """Accumulates a single record into the sample state. Args: params: The parameters for the sample. sample_state: The current sample state. record: The record to accumulate. Returns: The updated sample state. """ l2_norm_clip = params record_as_list = nest.flatten(record) clipped_as_list, _ = tf.clip_by_global_norm(record_as_list, l2_norm_clip) clipped = nest.pack_sequence_as(record, clipped_as_list) return nest.map_structure(tf.add, sample_state, clipped) def get_noised_result(self, sample_state, global_state, add_noise=True): """Gets noised sum after all records of sample have been accumulated. Args: sample_state: The sample state after all records have been accumulated. global_state: The global state. Returns: A tuple (estimate, new_global_state) where "estimate" is the estimated sum of the records and "new_global_state" is the updated global state. """ def add_noise(v): if add_noise: return v + tf.random_normal(tf.shape(v), stddev=global_state.stddev) else: return v return nest.map_structure(add_noise, sample_state), global_state class GaussianAverageQuery(DPQuery): """Implements DPQuery interface for Gaussian average queries. Accumulates clipped vectors, adds Gaussian noise, and normalizes. Note that we use "fixed-denominator" estimation: the denominator should be specified as the expected number of records per sample. Accumulating the denominator separately would also be possible but would be produce a higher variance estimator. """ # pylint: disable=invalid-name _GlobalState = collections.namedtuple( '_GlobalState', ['sum_state', 'denominator']) def __init__(self, l2_norm_clip, sum_stddev, denominator): """Initializes the GaussianAverageQuery. Args: l2_norm_clip: The clipping norm to apply to the global norm of each record. sum_stddev: The stddev of the noise added to the sum (before normalization). denominator: The normalization constant (applied after noise is added to the sum). """ self._numerator = GaussianSumQuery(l2_norm_clip, sum_stddev) self._denominator = denominator def initial_global_state(self): """Returns the initial global state for the GaussianAverageQuery.""" sum_global_state = self._numerator.initial_global_state() return self._GlobalState(sum_global_state, float(self._denominator)) def derive_sample_params(self, global_state): """Given the global state, derives parameters to use for the next sample. Args: global_state: The current global state. Returns: Parameters to use to process records in the next sample. """ return self._numerator.derive_sample_params(global_state.sum_state) def initial_sample_state(self, global_state, tensors): """Returns an initial state to use for the next sample. Args: global_state: The current global state. tensors: A structure of tensors used as a template to create the initial sample state. Returns: An initial sample state. """ # GaussianAverageQuery has no state beyond the sum state. return self._numerator.initial_sample_state(global_state.sum_state, tensors) def accumulate_record(self, params, sample_state, record): """Accumulates a single record into the sample state. Args: params: The parameters for the sample. sample_state: The current sample state. record: The record to accumulate. Returns: The updated sample state. """ return self._numerator.accumulate_record(params, sample_state, record) def get_noised_result(self, sample_state, global_state, add_noise=True): """Gets noised average after all records of sample have been accumulated. Args: sample_state: The sample state after all records have been accumulated. global_state: The global state. Returns: A tuple (estimate, new_global_state) where "estimate" is the estimated average of the records and "new_global_state" is the updated global state. """ noised_sum, new_sum_global_state = self._numerator.get_noised_result( sample_state, global_state.sum_state, add_noise) new_global_state = self._GlobalState( new_sum_global_state, global_state.denominator) def normalize(v): return tf.truediv(v, global_state.denominator) return nest.map_structure(normalize, noised_sum), new_global_state ``` ## our_dp_optimizer ``` # Copyright 2018, The TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Differentially private optimizers for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf def make_optimizer_class(cls): """Constructs a DP optimizer class from an existing one.""" if (tf.train.Optimizer.compute_gradients.__code__ is not cls.compute_gradients.__code__): tf.logging.warning( 'WARNING: Calling make_optimizer_class() on class %s that overrides ' 'method compute_gradients(). Check to ensure that ' 'make_optimizer_class() does not interfere with overridden version.', cls.__name__) class DPOptimizerClass(cls): """Differentially private subclass of given class cls.""" def __init__( self, l2_norm_clip, noise_multiplier, dp_average_query, num_microbatches, unroll_microbatches=False, *args, # pylint: disable=keyword-arg-before-vararg **kwargs): super(DPOptimizerClass, self).__init__(*args, **kwargs) self._dp_average_query = dp_average_query self._num_microbatches = num_microbatches self._global_state = self._dp_average_query.initial_global_state() # TODO(b/122613513): Set unroll_microbatches=True to avoid this bug. # Beware: When num_microbatches is large (>100), enabling this parameter # may cause an OOM error. self._unroll_microbatches = unroll_microbatches def dp_compute_gradients(self, loss, var_list, gate_gradients=tf.train.Optimizer.GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, grad_loss=None, add_noise=True): # Note: it would be closer to the correct i.i.d. sampling of records if # we sampled each microbatch from the appropriate binomial distribution, # although that still wouldn't be quite correct because it would be # sampling from the dataset without replacement. microbatches_losses = tf.reshape(loss, [self._num_microbatches, -1]) sample_params = (self._dp_average_query.derive_sample_params( self._global_state)) def process_microbatch(i, sample_state): """Process one microbatch (record) with privacy helper.""" grads, _ = zip(*super(cls, self).compute_gradients( tf.gather(microbatches_losses, [i]), var_list, gate_gradients, aggregation_method, colocate_gradients_with_ops, grad_loss)) # Converts tensor to list to replace None gradients with zero grads1 = list(grads) for inx in range(0, len(grads)): if (grads[inx] == None): grads1[inx] = tf.zeros_like(var_list[inx]) grads_list = grads1 sample_state = self._dp_average_query.accumulate_record( sample_params, sample_state, grads_list) return sample_state if var_list is None: var_list = (tf.trainable_variables() + tf.get_collection( tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)) sample_state = self._dp_average_query.initial_sample_state( self._global_state, var_list) if self._unroll_microbatches: for idx in range(self._num_microbatches): sample_state = process_microbatch(idx, sample_state) else: # Use of while_loop here requires that sample_state be a nested # structure of tensors. In general, we would prefer to allow it to be # an arbitrary opaque type. cond_fn = lambda i, _: tf.less(i, self._num_microbatches) body_fn = lambda i, state: [ tf.add(i, 1), process_microbatch(i, state) ] idx = tf.constant(0) _, sample_state = tf.while_loop(cond_fn, body_fn, [idx, sample_state]) final_grads, self._global_state = ( self._dp_average_query.get_noised_result( sample_state, self._global_state, add_noise)) return (final_grads) def minimize(self, d_loss_real, d_loss_fake, global_step=None, var_list=None, gate_gradients=tf.train.Optimizer.GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, name=None, grad_loss=None): """Minimize using sanitized gradients Args: d_loss_real: the loss tensor for real data d_loss_fake: the loss tensor for fake data global_step: the optional global step. var_list: the optional variables. name: the optional name. Returns: the operation that runs one step of DP gradient descent. """ # First validate the var_list if var_list is None: var_list = tf.trainable_variables() for var in var_list: if not isinstance(var, tf.Variable): raise TypeError("Argument is not a variable.Variable: %s" % var) # ------------------ OUR METHOD -------------------------------- r_grads = self.dp_compute_gradients( d_loss_real, var_list=var_list, gate_gradients=gate_gradients, aggregation_method=aggregation_method, colocate_gradients_with_ops=colocate_gradients_with_ops, grad_loss=grad_loss, add_noise = True) f_grads = self.dp_compute_gradients( d_loss_fake, var_list=var_list, gate_gradients=gate_gradients, aggregation_method=aggregation_method, colocate_gradients_with_ops=colocate_gradients_with_ops, grad_loss=grad_loss, add_noise=False) # Compute the overall gradients s_grads = [(r_grads[idx] + f_grads[idx]) for idx in range(len(r_grads))] sanitized_grads_and_vars = list(zip(s_grads, var_list)) self._assert_valid_dtypes( [v for g, v in sanitized_grads_and_vars if g is not None]) # Apply the overall gradients apply_grads = self.apply_gradients(sanitized_grads_and_vars, global_step=global_step, name=name) return apply_grads # ----------------------------------------------------------------- return DPOptimizerClass def make_gaussian_optimizer_class(cls): """Constructs a DP optimizer with Gaussian averaging of updates.""" class DPGaussianOptimizerClass(make_optimizer_class(cls)): """DP subclass of given class cls using Gaussian averaging.""" def __init__( self, l2_norm_clip, noise_multiplier, num_microbatches, unroll_microbatches=False, *args, # pylint: disable=keyword-arg-before-vararg **kwargs): dp_average_query = GaussianAverageQuery( l2_norm_clip, l2_norm_clip * noise_multiplier, num_microbatches) self.l2_norm_clip = l2_norm_clip self.noise_multiplier = noise_multiplier super(DPGaussianOptimizerClass, self).__init__(l2_norm_clip, noise_multiplier, dp_average_query, num_microbatches, unroll_microbatches, *args, **kwargs) return DPGaussianOptimizerClass DPAdagradOptimizer = make_optimizer_class(tf.train.AdagradOptimizer) DPAdamOptimizer = make_optimizer_class(tf.train.AdamOptimizer) DPGradientDescentOptimizer = make_optimizer_class( tf.train.GradientDescentOptimizer) DPAdagradGaussianOptimizer = make_gaussian_optimizer_class( tf.train.AdagradOptimizer) DPAdamGaussianOptimizer = make_gaussian_optimizer_class(tf.train.AdamOptimizer) DPGradientDescentGaussianOptimizer = make_gaussian_optimizer_class( tf.train.GradientDescentOptimizer) ``` ## gan.ops ``` """ Most codes from https://github.com/carpedm20/DCGAN-tensorflow """ import math import numpy as np import tensorflow as tf if "concat_v2" in dir(tf): def concat(tensors, axis, *args, **kwargs): return tf.concat_v2(tensors, axis, *args, **kwargs) else: def concat(tensors, axis, *args, **kwargs): return tf.concat(tensors, axis, *args, **kwargs) def bn(x, is_training, scope): return tf.contrib.layers.batch_norm(x, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=is_training, scope=scope) def conv_out_size_same(size, stride): return int(math.ceil(float(size) / float(stride))) def conv_cond_concat(x, y): """Concatenate conditioning vector on feature map axis.""" x_shapes = x.get_shape() y_shapes = y.get_shape() return concat( [x, y * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3) def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name="conv2d"): with tf.variable_scope(name): w = tf.get_variable( 'w', [k_h, k_w, input_.get_shape()[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) return conv def deconv2d(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, name="deconv2d", stddev=0.02, with_w=False): with tf.variable_scope(name): # filter : [height, width, output_channels, in_channels] w = tf.get_variable( 'w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]], initializer=tf.random_normal_initializer(stddev=stddev)) try: deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) # Support for verisons of TensorFlow before 0.7.0 except AttributeError: deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0)) deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape()) if with_w: return deconv, w, biases else: return deconv def lrelu(x, leak=0.2, name="lrelu"): return tf.maximum(x, leak * x) def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): shape = input_.get_shape().as_list() with tf.variable_scope(scope or "Linear"): matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32, tf.random_normal_initializer(stddev=stddev)) bias = tf.get_variable("bias", [output_size], initializer=tf.constant_initializer(bias_start)) if with_w: return tf.matmul(input_, matrix) + bias, matrix, bias else: return tf.matmul(input_, matrix) + bias ``` ## OUR DP CGAN ``` # -*- coding: utf-8 -*- from __future__ import division from keras.datasets import cifar10 from mlxtend.data import loadlocal_mnist from sklearn.preprocessing import label_binarize from sklearn.multiclass import OneVsRestClassifier from sklearn.metrics import roc_curve, auc from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.neural_network import MLPClassifier class OUR_DP_CGAN(object): model_name = "OUR_DP_CGAN" # name for checkpoint def __init__(self, sess, epoch, batch_size, z_dim, epsilon, delta, sigma, clip_value, lr, dataset_name, base_dir, checkpoint_dir, result_dir, log_dir): self.sess = sess self.dataset_name = dataset_name self.base_dir = base_dir self.checkpoint_dir = checkpoint_dir self.result_dir = result_dir self.log_dir = log_dir self.epoch = epoch self.batch_size = batch_size self.epsilon = epsilon self.delta = delta self.noise_multiplier = sigma self.l2_norm_clip = clip_value self.lr = lr if dataset_name == 'mnist' or dataset_name == 'fashion-mnist': # parameters self.input_height = 28 self.input_width = 28 self.output_height = 28 self.output_width = 28 self.z_dim = z_dim # dimension of noise-vector self.y_dim = 10 # dimension of condition-vector (label) self.c_dim = 1 # train self.learningRateD = self.lr self.learningRateG = self.learningRateD * 5 self.beta1 = 0.5 self.beta2 = 0.99 # test self.sample_num = 64 # number of generated images to be saved # load mnist self.data_X, self.data_y = load_mnist(train = True) # get number of batches for a single epoch self.num_batches = len(self.data_X) // self.batch_size elif dataset_name == 'cifar10': # parameters self.input_height = 32 self.input_width = 32 self.output_height = 32 self.output_width = 32 self.z_dim = 100 # dimension of noise-vector self.y_dim = 10 # dimension of condition-vector (label) self.c_dim = 3 # color dimension # train # self.learning_rate = 0.0002 # 1e-3, 1e-4 self.learningRateD = 1e-3 self.learningRateG = 1e-4 self.beta1 = 0.5 self.beta2 = 0.99 # test self.sample_num = 64 # number of generated images to be saved # load cifar10 self.data_X, self.data_y = load_cifar10(train=True) self.num_batches = len(self.data_X) // self.batch_size else: raise NotImplementedError def discriminator(self, x, y, is_training=True, reuse=False): # Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657) # Architecture : (64)4c2s-(128)4c2s_BL-FC1024_BL-FC1_S with tf.variable_scope("discriminator", reuse=reuse): # merge image and label if (self.dataset_name == "mnist"): y = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim]) x = conv_cond_concat(x, y) net = lrelu(conv2d(x, 64, 4, 4, 2, 2, name='d_conv1')) net = lrelu( bn(conv2d(net, 128, 4, 4, 2, 2, name='d_conv2'), is_training=is_training, scope='d_bn2')) net = tf.reshape(net, [self.batch_size, -1]) net = lrelu( bn(linear(net, 1024, scope='d_fc3'), is_training=is_training, scope='d_bn3')) out_logit = linear(net, 1, scope='d_fc4') out = tf.nn.sigmoid(out_logit) elif (self.dataset_name == "cifar10"): y = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim]) x = conv_cond_concat(x, y) lrelu_slope = 0.2 kernel_size = 5 w_init = tf.contrib.layers.xavier_initializer() net = lrelu( conv2d(x, 64, 5, 5, 2, 2, name='d_conv1' + '_' + self.dataset_name)) net = lrelu( bn(conv2d(net, 128, 5, 5, 2, 2, name='d_conv2' + '_' + self.dataset_name), is_training=is_training, scope='d_bn2')) net = lrelu( bn(conv2d(net, 256, 5, 5, 2, 2, name='d_conv3' + '_' + self.dataset_name), is_training=is_training, scope='d_bn3')) net = lrelu( bn(conv2d(net, 512, 5, 5, 2, 2, name='d_conv4' + '_' + self.dataset_name), is_training=is_training, scope='d_bn4')) net = tf.reshape(net, [self.batch_size, -1]) out_logit = linear(net, 1, scope='d_fc5' + '_' + self.dataset_name) out = tf.nn.sigmoid(out_logit) return out, out_logit def generator(self, z, y, is_training=True, reuse=False): # Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657) # Architecture : FC1024_BR-FC7x7x128_BR-(64)4dc2s_BR-(1)4dc2s_S with tf.variable_scope("generator", reuse=reuse): if (self.dataset_name == "mnist"): # merge noise and label z = concat([z, y], 1) net = tf.nn.relu( bn(linear(z, 1024, scope='g_fc1'), is_training=is_training, scope='g_bn1')) net = tf.nn.relu( bn(linear(net, 128 * 7 * 7, scope='g_fc2'), is_training=is_training, scope='g_bn2')) net = tf.reshape(net, [self.batch_size, 7, 7, 128]) net = tf.nn.relu( bn(deconv2d(net, [self.batch_size, 14, 14, 64], 4, 4, 2, 2, name='g_dc3'), is_training=is_training, scope='g_bn3')) out = tf.nn.sigmoid( deconv2d(net, [self.batch_size, 28, 28, 1], 4, 4, 2, 2, name='g_dc4')) elif (self.dataset_name == "cifar10"): h_size = 32 h_size_2 = 16 h_size_4 = 8 h_size_8 = 4 h_size_16 = 2 z = concat([z, y], 1) net = linear(z, 512 * h_size_16 * h_size_16, scope='g_fc1' + '_' + self.dataset_name) net = tf.nn.relu( bn(tf.reshape( net, [self.batch_size, h_size_16, h_size_16, 512]), is_training=is_training, scope='g_bn1')) net = tf.nn.relu( bn(deconv2d(net, [self.batch_size, h_size_8, h_size_8, 256], 5, 5, 2, 2, name='g_dc2' + '_' + self.dataset_name), is_training=is_training, scope='g_bn2')) net = tf.nn.relu( bn(deconv2d(net, [self.batch_size, h_size_4, h_size_4, 128], 5, 5, 2, 2, name='g_dc3' + '_' + self.dataset_name), is_training=is_training, scope='g_bn3')) net = tf.nn.relu( bn(deconv2d(net, [self.batch_size, h_size_2, h_size_2, 64], 5, 5, 2, 2, name='g_dc4' + '_' + self.dataset_name), is_training=is_training, scope='g_bn4')) out = tf.nn.tanh( deconv2d(net, [ self.batch_size, self.output_height, self.output_width, self.c_dim ], 5, 5, 2, 2, name='g_dc5' + '_' + self.dataset_name)) return out def build_model(self): # some parameters image_dims = [self.input_height, self.input_width, self.c_dim] bs = self.batch_size """ Graph Input """ # images self.inputs = tf.placeholder(tf.float32, [bs] + image_dims, name='real_images') # labels self.y = tf.placeholder(tf.float32, [bs, self.y_dim], name='y') # noises self.z = tf.placeholder(tf.float32, [bs, self.z_dim], name='z') """ Loss Function """ # output of D for real images D_real, D_real_logits = self.discriminator(self.inputs, self.y, is_training=True, reuse=False) # output of D for fake images G = self.generator(self.z, self.y, is_training=True, reuse=False) D_fake, D_fake_logits = self.discriminator(G, self.y, is_training=True, reuse=True) # get loss for discriminator d_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=D_real_logits, labels=tf.ones_like(D_real))) d_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=D_fake_logits, labels=tf.zeros_like(D_fake))) self.d_loss_real_vec = tf.nn.sigmoid_cross_entropy_with_logits( logits=D_real_logits, labels=tf.ones_like(D_real)) self.d_loss_fake_vec = tf.nn.sigmoid_cross_entropy_with_logits( logits=D_fake_logits, labels=tf.zeros_like(D_fake)) self.d_loss = d_loss_real + d_loss_fake # get loss for generator self.g_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=D_fake_logits, labels=tf.ones_like(D_fake))) """ Training """ # divide trainable variables into a group for D and a group for G t_vars = tf.trainable_variables() d_vars = [ var for var in t_vars if var.name.startswith('discriminator') ] g_vars = [var for var in t_vars if var.name.startswith('generator')] # optimizers with tf.control_dependencies(tf.get_collection( tf.GraphKeys.UPDATE_OPS)): d_optim_init = DPGradientDescentGaussianOptimizer( l2_norm_clip=self.l2_norm_clip, noise_multiplier=self.noise_multiplier, num_microbatches=self.batch_size, learning_rate=self.learningRateD) global_step = tf.train.get_global_step() self.d_optim = d_optim_init.minimize( d_loss_real=self.d_loss_real_vec, d_loss_fake=self.d_loss_fake_vec, global_step=global_step, var_list=d_vars) optimizer = DPGradientDescentGaussianOptimizer( l2_norm_clip=self.l2_norm_clip, noise_multiplier=self.noise_multiplier, num_microbatches=self.batch_size, learning_rate=self.learningRateD) self.g_optim = tf.train.GradientDescentOptimizer(self.learningRateG) \ .minimize(self.g_loss, var_list=g_vars) """" Testing """ self.fake_images = self.generator(self.z, self.y, is_training=False, reuse=True) """ Summary """ d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real) d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake) d_loss_sum = tf.summary.scalar("d_loss", self.d_loss) g_loss_sum = tf.summary.scalar("g_loss", self.g_loss) # final summary operations self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum]) self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum]) def train(self): # initialize all variables tf.global_variables_initializer().run() # graph inputs for visualize training results self.sample_z = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim)) self.test_labels = self.data_y[0:self.batch_size] # saver to save model self.saver = tf.train.Saver() # summary writer self.writer = tf.summary.FileWriter( self.log_dir + '/' + self.model_name, self.sess.graph) # restore check-point if it exits could_load, checkpoint_counter = self.load(self.checkpoint_dir) if could_load: start_epoch = (int)(checkpoint_counter / self.num_batches) start_batch_id = checkpoint_counter - start_epoch * self.num_batches counter = checkpoint_counter print(" [*] Load SUCCESS") else: start_epoch = 0 start_batch_id = 0 counter = 1 print(" [!] Load failed...") # loop for epoch epoch = start_epoch should_terminate = False while (epoch < self.epoch and not should_terminate): # get batch data for idx in range(start_batch_id, self.num_batches): batch_images = self.data_X[idx * self.batch_size:(idx + 1) * self.batch_size] batch_labels = self.data_y[idx * self.batch_size:(idx + 1) * self.batch_size] batch_z = np.random.uniform( -1, 1, [self.batch_size, self.z_dim]).astype(np.float32) # update D network _, summary_str, d_loss = self.sess.run( [self.d_optim, self.d_sum, self.d_loss], feed_dict={ self.inputs: batch_images, self.y: batch_labels, self.z: batch_z }) self.writer.add_summary(summary_str, counter) eps = self.compute_epsilon((epoch * self.num_batches) + idx) if (eps > self.epsilon): should_terminate = True print("TERMINATE !! Run out of Privacy Budget.....") epoch = self.epoch break # update G network _, summary_str, g_loss = self.sess.run( [self.g_optim, self.g_sum, self.g_loss], feed_dict={ self.inputs: batch_images, self.y: batch_labels, self.z: batch_z }) self.writer.add_summary(summary_str, counter) # display training status counter += 1 _ = self.sess.run(self.fake_images, feed_dict={ self.z: self.sample_z, self.y: self.test_labels }) # save training results for every 100 steps if np.mod(counter, 100) == 0: print("Iteration : " + str(idx) + " Eps: " + str(eps)) samples = self.sess.run(self.fake_images, feed_dict={ self.z: self.sample_z, self.y: self.test_labels }) tot_num_samples = min(self.sample_num, self.batch_size) manifold_h = int(np.floor(np.sqrt(tot_num_samples))) manifold_w = int(np.floor(np.sqrt(tot_num_samples))) save_images( samples[:manifold_h * manifold_w, :, :, :], [manifold_h, manifold_w], check_folder(self.result_dir + '/' + self.model_dir) + '/' + self.model_name + '_train_{:02d}_{:04d}.png'.format(epoch, idx)) epoch = epoch + 1 # After an epoch, start_batch_id is set to zero # non-zero value is only for the first epoch after loading pre-trained model start_batch_id = 0 # save model self.save(self.checkpoint_dir, counter) # show temporal results if (self.dataset_name == 'mnist'): self.visualize_results_MNIST(epoch) elif (self.dataset_name == 'cifar10'): self.visualize_results_CIFAR(epoch) # save model for final step self.save(self.checkpoint_dir, counter) def compute_fpr_tpr_roc(Y_test, Y_score): n_classes = Y_score.shape[1] false_positive_rate = dict() true_positive_rate = dict() roc_auc = dict() for class_cntr in range(n_classes): false_positive_rate[class_cntr], true_positive_rate[ class_cntr], _ = roc_curve(Y_test[:, class_cntr], Y_score[:, class_cntr]) roc_auc[class_cntr] = auc(false_positive_rate[class_cntr], true_positive_rate[class_cntr]) # Compute micro-average ROC curve and ROC area false_positive_rate["micro"], true_positive_rate[ "micro"], _ = roc_curve(Y_test.ravel(), Y_score.ravel()) roc_auc["micro"] = auc(false_positive_rate["micro"], true_positive_rate["micro"]) return false_positive_rate, true_positive_rate, roc_auc def classify(X_train, Y_train, X_test, classiferName, random_state_value=0): if classiferName == "lr": classifier = OneVsRestClassifier( LogisticRegression(solver='lbfgs', multi_class='multinomial', random_state=random_state_value)) elif classiferName == "mlp": classifier = OneVsRestClassifier( MLPClassifier(random_state=random_state_value, alpha=1)) elif classiferName == "rf": classifier = OneVsRestClassifier( RandomForestClassifier(n_estimators=100, random_state=random_state_value)) else: print("Classifier not in the list!") exit() Y_score = classifier.fit(X_train, Y_train).predict_proba(X_test) return Y_score batch_size = int(self.batch_size) if (self.dataset_name == "mnist"): n_class = np.zeros(10) n_class[0] = 5923 - batch_size n_class[1] = 6742 n_class[2] = 5958 n_class[3] = 6131 n_class[4] = 5842 n_class[5] = 5421 n_class[6] = 5918 n_class[7] = 6265 n_class[8] = 5851 n_class[9] = 5949 Z_sample = np.random.uniform(-1, 1, size=(batch_size, self.z_dim)) y = np.zeros(batch_size, dtype=np.int64) + 0 y_one_hot = np.zeros((batch_size, self.y_dim)) y_one_hot[np.arange(batch_size), y] = 1 images = self.sess.run(self.fake_images, feed_dict={ self.z: Z_sample, self.y: y_one_hot }) for classLabel in range(0, 10): for _ in range(0, int(n_class[classLabel]), batch_size): Z_sample = np.random.uniform(-1, 1, size=(batch_size, self.z_dim)) y = np.zeros(batch_size, dtype=np.int64) + classLabel y_one_hot_init = np.zeros((batch_size, self.y_dim)) y_one_hot_init[np.arange(batch_size), y] = 1 images = np.append(images, self.sess.run(self.fake_images, feed_dict={ self.z: Z_sample, self.y: y_one_hot_init }), axis=0) y_one_hot = np.append(y_one_hot, y_one_hot_init, axis=0) X_test, Y_test = load_mnist(train = False) Y_test = [int(y) for y in Y_test] classes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] Y_test = label_binarize(Y_test, classes=classes) if (self.dataset_name == "cifar10"): n_class = np.zeros(10) for t in range(1, 10): n_class[t] = 1000 Z_sample = np.random.uniform(-1, 1, size=(batch_size, self.z_dim)) y = np.zeros(batch_size, dtype=np.int64) + 0 y_one_hot = np.zeros((batch_size, self.y_dim)) y_one_hot[np.arange(batch_size), y] = 1 images = self.sess.run(self.fake_images, feed_dict={ self.z: Z_sample, self.y: y_one_hot }) for classLabel in range(0, 10): for _ in range(0, int(n_class[classLabel]), batch_size): Z_sample = np.random.uniform(-1, 1, size=(batch_size, self.z_dim)) y = np.zeros(batch_size, dtype=np.int64) + classLabel y_one_hot_init = np.zeros((batch_size, self.y_dim)) y_one_hot_init[np.arange(batch_size), y] = 1 images = np.append(images, self.sess.run(self.fake_images, feed_dict={ self.z: Z_sample, self.y: y_one_hot_init }), axis=0) y_one_hot = np.append(y_one_hot, y_one_hot_init, axis=0) X_test, Y_test = load_cifar10(train=False) classes = range(0, 10) Y_test = label_binarize(Y_test, classes=classes) print(" Classifying - Logistic Regression...") TwoDim_images = images.reshape(np.shape(images)[0], -2) X_test = X_test.reshape(np.shape(X_test)[0], -2) Y_score = classify(TwoDim_images, y_one_hot, X_test, "lr", random_state_value=30) false_positive_rate, true_positive_rate, roc_auc = compute_fpr_tpr_roc( Y_test, Y_score) classification_results_fname = self.base_dir + "CGAN_AuROC.txt" classification_results = open(classification_results_fname, "w") classification_results.write( "\nepsilon : {:.2f}, sigma: {:.2f}, clipping value: {:.2f}".format( (self.epsilon), round(self.noise_multiplier, 2), round(self.l2_norm_clip, 2))) classification_results.write("\nAuROC - logistic Regression: " + str(roc_auc["micro"])) classification_results.write( "\n--------------------------------------------------------------------\n" ) print(" Classifying - Random Forest...") Y_score = classify(TwoDim_images, y_one_hot, X_test, "rf", random_state_value=30) print(" Computing ROC - Random Forest ...") false_positive_rate, true_positive_rate, roc_auc = compute_fpr_tpr_roc( Y_test, Y_score) classification_results.write( "\nepsilon : {:.2f}, sigma: {:.2f}, clipping value: {:.2f}".format( (self.epsilon), round(self.noise_multiplier, 2), round(self.l2_norm_clip, 2))) classification_results.write("\nAuROC - random Forest: " + str(roc_auc["micro"])) classification_results.write( "\n--------------------------------------------------------------------\n" ) print(" Classifying - multilayer Perceptron ...") Y_score = classify(TwoDim_images, y_one_hot, X_test, "mlp", random_state_value=30) print(" Computing ROC - Multilayer Perceptron ...") false_positive_rate, true_positive_rate, roc_auc = compute_fpr_tpr_roc( Y_test, Y_score) classification_results.write( "\nepsilon : {:.2f}, sigma: {:.2f}, clipping value: {:.2f}".format( (self.epsilon), round(self.noise_multiplier, 2), round(self.l2_norm_clip, 2))) classification_results.write("\nAuROC - multilayer Perceptron: " + str(roc_auc["micro"])) classification_results.write( "\n--------------------------------------------------------------------\n" ) # save model for final step self.save(self.checkpoint_dir, counter) def compute_epsilon(self, steps): """Computes epsilon value for given hyperparameters.""" if self.noise_multiplier == 0.0: return float('inf') orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64)) sampling_probability = self.batch_size / 60000 rdp = compute_rdp(q=sampling_probability, noise_multiplier=self.noise_multiplier, steps=steps, orders=orders) # Delta is set to 1e-5 because MNIST has 60000 training points. return get_privacy_spent(orders, rdp, target_delta=1e-5)[0] # CIFAR 10 def visualize_results_CIFAR(self, epoch): tot_num_samples = min(self.sample_num, self.batch_size) # 64, 100 image_frame_dim = int(np.floor(np.sqrt(tot_num_samples))) # 8 """ random condition, random noise """ y = np.random.choice(self.y_dim, self.batch_size) y_one_hot = np.zeros((self.batch_size, self.y_dim)) y_one_hot[np.arange(self.batch_size), y] = 1 z_sample = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim)) # 100, 100 samples = self.sess.run(self.fake_images, feed_dict={ self.z: z_sample, self.y: y_one_hot }) save_matplot_img( samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim], self.result_dir + '/' + self.model_name + '_epoch%03d' % epoch + '_test_all_classes.png') # MNIST def visualize_results_MNIST(self, epoch): tot_num_samples = min(self.sample_num, self.batch_size) image_frame_dim = int(np.floor(np.sqrt(tot_num_samples))) """ random condition, random noise """ y = np.random.choice(self.y_dim, self.batch_size) y_one_hot = np.zeros((self.batch_size, self.y_dim)) y_one_hot[np.arange(self.batch_size), y] = 1 z_sample = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim)) samples = self.sess.run(self.fake_images, feed_dict={ self.z: z_sample, self.y: y_one_hot }) save_images( samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim], check_folder(self.result_dir + '/' + self.model_dir) + '/' + self.model_name + '_epoch%03d' % epoch + '_test_all_classes.png') """ specified condition, random noise """ n_styles = 10 # must be less than or equal to self.batch_size np.random.seed() si = np.random.choice(self.batch_size, n_styles) for l in range(self.y_dim): y = np.zeros(self.batch_size, dtype=np.int64) + l y_one_hot = np.zeros((self.batch_size, self.y_dim)) y_one_hot[np.arange(self.batch_size), y] = 1 samples = self.sess.run(self.fake_images, feed_dict={ self.z: z_sample, self.y: y_one_hot }) save_images( samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim], check_folder(self.result_dir + '/' + self.model_dir) + '/' + self.model_name + '_epoch%03d' % epoch + '_test_class_%d.png' % l) samples = samples[si, :, :, :] if l == 0: all_samples = samples else: all_samples = np.concatenate((all_samples, samples), axis=0) """ save merged images to check style-consistency """ canvas = np.zeros_like(all_samples) for s in range(n_styles): for c in range(self.y_dim): canvas[s * self.y_dim + c, :, :, :] = all_samples[c * n_styles + s, :, :, :] save_images( canvas, [n_styles, self.y_dim], check_folder(self.result_dir + '/' + self.model_dir) + '/' + self.model_name + '_epoch%03d' % epoch + '_test_all_classes_style_by_style.png') @property def model_dir(self): return "{}_{}_{}_{}".format(self.model_name, self.dataset_name, self.batch_size, self.z_dim) def save(self, checkpoint_dir, step): checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir, self.model_name) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) self.saver.save(self.sess, os.path.join(checkpoint_dir, self.model_name + '.model'), global_step=step) def load(self, checkpoint_dir): import re print(" [*] Reading checkpoints...") checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir, self.model_name) ckpt = tf.train.get_checkpoint_state(checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: ckpt_name = os.path.basename(ckpt.model_checkpoint_path) self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) counter = int( next(re.finditer("(\d+)(?!.*\d)", ckpt_name)).group(0)) print(" [*] Success to read {}".format(ckpt_name)) return True, counter else: print(" [*] Failed to find a checkpoint") return False, 0 ``` ## gan.utils ``` """ Most codes from https://github.com/carpedm20/DCGAN-tensorflow """ from __future__ import division import scipy.misc import numpy as np from six.moves import xrange import matplotlib.pyplot as plt import os, gzip import tensorflow as tf import tensorflow.contrib.slim as slim from keras.datasets import cifar10 from keras.datasets import mnist def one_hot(x, n): """ convert index representation to one-hot representation """ x = np.array(x) assert x.ndim == 1 return np.eye(n)[x] def prepare_input(data=None, labels=None): image_height = 32 image_width = 32 image_depth = 3 assert (data.shape[1] == image_height * image_width * image_depth) assert (data.shape[0] == labels.shape[0]) # do mean normalization across all samples mu = np.mean(data, axis=0) mu = mu.reshape(1, -1) sigma = np.std(data, axis=0) sigma = sigma.reshape(1, -1) data = data - mu data = data / sigma is_nan = np.isnan(data) is_inf = np.isinf(data) if np.any(is_nan) or np.any(is_inf): print('data is not well-formed : is_nan {n}, is_inf: {i}'.format( n=np.any(is_nan), i=np.any(is_inf))) # data is transformed from (no_of_samples, 3072) to (no_of_samples , image_height, image_width, image_depth) # make sure the type of the data is no.float32 data = data.reshape([-1, image_depth, image_height, image_width]) data = data.transpose([0, 2, 3, 1]) data = data.astype(np.float32) return data, labels def read_cifar10(filename): # queue one element class CIFAR10Record(object): pass result = CIFAR10Record() label_bytes = 1 # 2 for CIFAR-100 result.height = 32 result.width = 32 result.depth = 3 data = np.load(filename, encoding='latin1') value = np.asarray(data['data']).astype(np.float32) labels = np.asarray(data['labels']).astype(np.int32) return prepare_input(value, labels) def load_cifar10(train): (x_train, y_train), (x_test, y_test) = cifar10.load_data() if (train == True): dataX = x_train.reshape([-1, 32, 32, 3]) dataY = y_train else: dataX = x_test.reshape([-1, 32, 32, 3]) dataY = y_test seed = 547 np.random.seed(seed) np.random.shuffle(dataX) np.random.seed(seed) np.random.shuffle(dataY) y_vec = np.zeros((len(dataY), 10), dtype=np.float) for i, label in enumerate(dataY): y_vec[i, dataY[i]] = 1.0 return dataX / 255., y_vec def load_mnist(train = True): def extract_data(filename, num_data, head_size, data_size): with gzip.open(filename) as bytestream: bytestream.read(head_size) buf = bytestream.read(data_size * num_data) data = np.frombuffer(buf, dtype=np.uint8).astype(np.float) return data (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.reshape((60000, 28, 28, 1)) y_train = y_train.reshape((60000)) x_test = x_test.reshape((10000, 28, 28, 1)) y_test = y_test.reshape((10000)) y_train = np.asarray(y_train) y_test = np.asarray(y_test) if (train == True): seed = 547 np.random.seed(seed) np.random.shuffle(x_train) np.random.seed(seed) np.random.shuffle(y_train) y_vec = np.zeros((len(y_train), 10), dtype=np.float) for i, label in enumerate(y_train): y_vec[i, y_train[i]] = 1.0 return x_train / 255., y_vec else: seed = 547 np.random.seed(seed) np.random.shuffle(x_test) np.random.seed(seed) np.random.shuffle(y_test) y_vec = np.zeros((len(y_test), 10), dtype=np.float) for i, label in enumerate(y_test): y_vec[i, y_test[i]] = 1.0 return x_test / 255., y_vec def check_folder(log_dir): if not os.path.exists(log_dir): os.makedirs(log_dir) return log_dir def show_all_variables(): model_vars = tf.trainable_variables() slim.model_analyzer.analyze_vars(model_vars, print_info=True) def get_image(image_path, input_height, input_width, resize_height=64, resize_width=64, crop=True, grayscale=False): image = imread(image_path, grayscale) return transform(image, input_height, input_width, resize_height, resize_width, crop) def save_images(images, size, image_path): return imsave(inverse_transform(images), size, image_path) def imread(path, grayscale=False): if (grayscale): return scipy.misc.imread(path, flatten=True).astype(np.float) else: return scipy.misc.imread(path).astype(np.float) def merge_images(images, size): return inverse_transform(images) def merge(images, size): h, w = images.shape[1], images.shape[2] if (images.shape[3] in (3, 4)): c = images.shape[3] img = np.zeros((h * size[0], w * size[1], c)) for idx, image in enumerate(images): i = idx % size[1] j = idx // size[1] img[j * h:j * h + h, i * w:i * w + w, :] = image return img elif images.shape[3] == 1: img = np.zeros((h * size[0], w * size[1])) for idx, image in enumerate(images): i = idx % size[1] j = idx // size[1] img[j * h:j * h + h, i * w:i * w + w] = image[:, :, 0] return img else: raise ValueError('in merge(images,size) images parameter ' 'must have dimensions: HxW or HxWx3 or HxWx4') def imsave(images, size, path): image = np.squeeze(merge(images, size)) return scipy.misc.imsave(path, image) def center_crop(x, crop_h, crop_w, resize_h=64, resize_w=64): if crop_w is None: crop_w = crop_h h, w = x.shape[:2] j = int(round((h - crop_h) / 2.)) i = int(round((w - crop_w) / 2.)) return scipy.misc.imresize(x[j:j + crop_h, i:i + crop_w], [resize_h, resize_w]) def transform(image, input_height, input_width, resize_height=64, resize_width=64, crop=True): if crop: cropped_image = center_crop(image, input_height, input_width, resize_height, resize_width) else: cropped_image = scipy.misc.imresize(image, [resize_height, resize_width]) return np.array(cropped_image) / 127.5 - 1. def inverse_transform(images): return (images + 1.) / 2. """ Drawing Tools """ # borrowed from https://github.com/ykwon0407/variational_autoencoder/blob/master/variational_bayes.ipynb def save_scattered_image(z, id, z_range_x, z_range_y, name='scattered_image.jpg'): N = 10 plt.figure(figsize=(8, 6)) plt.scatter(z[:, 0], z[:, 1], c=np.argmax(id, 1), marker='o', edgecolor='none', cmap=discrete_cmap(N, 'jet')) plt.colorbar(ticks=range(N)) axes = plt.gca() axes.set_xlim([-z_range_x, z_range_x]) axes.set_ylim([-z_range_y, z_range_y]) plt.grid(True) plt.savefig(name) # borrowed from https://gist.github.com/jakevdp/91077b0cae40f8f8244a def discrete_cmap(N, base_cmap=None): """Create an N-bin discrete colormap from the specified input map""" # Note that if base_cmap is a string or None, you can simply do # return plt.cm.get_cmap(base_cmap, N) # The following works for string, None, or a colormap instance: base = plt.cm.get_cmap(base_cmap) color_list = base(np.linspace(0, 1, N)) cmap_name = base.name + str(N) return base.from_list(cmap_name, color_list, N) def save_matplot_img(images, size, image_path): # revice image data // M*N*3 // RGB float32 : value must set between 0. with 1. for idx in range(64): vMin = np.amin(images[idx]) vMax = np.amax(images[idx]) img_arr = images[idx].reshape(32 * 32 * 3, 1) # flatten for i, v in enumerate(img_arr): img_arr[i] = (v - vMin) / (vMax - vMin) img_arr = img_arr.reshape(32, 32, 3) # M*N*3 plt.subplot(8, 8, idx + 1), plt.imshow(img_arr, interpolation='nearest') plt.axis("off") plt.savefig(image_path) ``` ## Main ``` import tensorflow as tf import os base_dir = "./" out_dir = base_dir + "mnist_clip1_sigma0.6_lr0.55" if not os.path.exists(out_dir): os.mkdir(out_dir) gpu_options = tf.GPUOptions(visible_device_list="0") with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)) as sess: epoch = 100 cgan = OUR_DP_CGAN(sess, epoch=epoch, batch_size=64, z_dim=100, epsilon=9.6, delta=1e-5, sigma=0.6, clip_value=1, lr=0.055, dataset_name='mnist', checkpoint_dir=out_dir + "/checkpoint/", result_dir=out_dir + "/results/", log_dir=out_dir + "/logs/", base_dir=base_dir) cgan.build_model() print(" [*] Building model finished!") show_all_variables() cgan.train() print(" [*] Training finished!") ```
github_jupyter
### **PINN eikonal solver for a portion of the Marmousi model** ``` from google.colab import drive drive.mount('/content/gdrive') cd "/content/gdrive/My Drive/Colab Notebooks/Codes/PINN_isotropic_eikonal_R1" !pip install sciann==0.5.4.0 !pip install tensorflow==2.2.0 #!pip install keras==2.3.1 import numpy as np import pandas as pd import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable import tensorflow as tf from sciann import Functional, Variable, SciModel, PDE from sciann.utils import * import scipy.io import time import random from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid1.inset_locator import mark_inset tf.config.threading.set_intra_op_parallelism_threads(1) tf.config.threading.set_inter_op_parallelism_threads(1) np.random.seed(123) tf.random.set_seed(123) # Loading velocity model filename="./inputs/marm/model/marm_vz.txt" marm = pd.read_csv(filename, index_col=None, header=None) velmodel = np.reshape(np.array(marm), (101, 101)).T # Loading reference solution filename="./inputs/marm/traveltimes/fmm_or2_marm_s(1,1).txt" T_data = pd.read_csv(filename, index_col=None, header=None) T_data = np.reshape(np.array(T_data), (101, 101)).T #Model specifications zmin = 0.; zmax = 2.; deltaz = 0.02; xmin = 0.; xmax = 2.; deltax = 0.02; # Point-source location sz = 1.0; sx = 1.0; # Number of training points num_tr_pts = 3000 # Creating grid, calculating refrence traveltimes, and prepare list of grid points for training (X_star) z = np.arange(zmin,zmax+deltaz,deltaz) nz = z.size x = np.arange(xmin,xmax+deltax,deltax) nx = x.size Z,X = np.meshgrid(z,x,indexing='ij') X_star = [Z.reshape(-1,1), X.reshape(-1,1)] selected_pts = np.random.choice(np.arange(Z.size),num_tr_pts,replace=False) Zf = Z.reshape(-1,1)[selected_pts] Zf = np.append(Zf,sz) Xf = X.reshape(-1,1)[selected_pts] Xf = np.append(Xf,sx) X_starf = [Zf.reshape(-1,1), Xf.reshape(-1,1)] # Plot the velocity model with the source location plt.style.use('default') plt.figure(figsize=(4,4)) ax = plt.gca() im = ax.imshow(velmodel, extent=[xmin,xmax,zmax,zmin], aspect=1, cmap="jet") ax.plot(sx,sz,'k*',markersize=8) plt.xlabel('Offset (km)', fontsize=14) plt.xticks(fontsize=10) plt.ylabel('Depth (km)', fontsize=14) plt.yticks(fontsize=10) ax.xaxis.set_major_locator(plt.MultipleLocator(0.5)) ax.yaxis.set_major_locator(plt.MultipleLocator(0.5)) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="6%", pad=0.15) cbar = plt.colorbar(im, cax=cax) cbar.set_label('km/s',size=10) cbar.ax.tick_params(labelsize=10) plt.savefig("./figs/marm/velmodel.pdf", format='pdf', bbox_inches="tight") # Analytical solution for the known traveltime part vel = velmodel[int(round(sz/deltaz)),int(round(sx/deltax))] # Velocity at the source location T0 = np.sqrt((Z-sz)**2 + (X-sx)**2)/vel; px0 = np.divide(X-sx, T0*vel**2, out=np.zeros_like(T0), where=T0!=0) pz0 = np.divide(Z-sz, T0*vel**2, out=np.zeros_like(T0), where=T0!=0) # Find source location id in X_star TOLX = 1e-6 TOLZ = 1e-6 sids,_ = np.where(np.logical_and(np.abs(X_starf[0]-sz)<TOLZ , np.abs(X_starf[1]-sx)<TOLX)) print(sids) print(sids.shape) print(X_starf[0][sids,0]) print(X_starf[1][sids,0]) # Preparing the Sciann model object K.clear_session() layers = [20]*10 # Appending source values velmodelf = velmodel.reshape(-1,1)[selected_pts]; velmodelf = np.append(velmodelf,vel) px0f = px0.reshape(-1,1)[selected_pts]; px0f = np.append(px0f,0.) pz0f = pz0.reshape(-1,1)[selected_pts]; pz0f = np.append(pz0f,0.) T0f = T0.reshape(-1,1)[selected_pts]; T0f = np.append(T0f,0.) xt = Variable("xt",dtype='float64') zt = Variable("zt",dtype='float64') vt = Variable("vt",dtype='float64') px0t = Variable("px0t",dtype='float64') pz0t = Variable("pz0t",dtype='float64') T0t = Variable("T0t",dtype='float64') tau = Functional("tau", [zt, xt], layers, 'l-atan') # Loss function based on the factored isotropic eikonal equation L = (T0t*diff(tau, xt) + tau*px0t)**2 + (T0t*diff(tau, zt) + tau*pz0t)**2 - 1.0/vt**2 targets = [tau, 20*L, (1-sign(tau*T0t))*abs(tau*T0t)] target_vals = [(sids, np.ones(sids.shape).reshape(-1,1)), 'zeros', 'zeros'] model = SciModel( [zt, xt, vt, pz0t, px0t, T0t], targets, load_weights_from='models/vofz_model-end.hdf5', optimizer='scipy-l-BFGS-B' ) #Model training start_time = time.time() hist = model.train( X_starf + [velmodelf,pz0f,px0f,T0f], target_vals, batch_size = X_starf[0].size, epochs = 12000, learning_rate = 0.008, verbose=0 ) elapsed = time.time() - start_time print('Training time: %.2f seconds' %(elapsed)) # Convergence history plot for verification fig = plt.figure(figsize=(5,3)) ax = plt.axes() #ax.semilogy(np.arange(0,300,0.001),hist.history['loss'],LineWidth=2) ax.semilogy(hist.history['loss'],LineWidth=2) ax.set_xlabel('Epochs (x $10^3$)',fontsize=16) plt.xticks(fontsize=12) #ax.xaxis.set_major_locator(plt.MultipleLocator(50)) ax.set_ylabel('Loss',fontsize=16) plt.yticks(fontsize=12); plt.grid() # Predicting traveltime solution from the trained model L_pred = L.eval(model, X_star + [velmodel,pz0,px0,T0]) tau_pred = tau.eval(model, X_star + [velmodel,pz0,px0,T0]) tau_pred = tau_pred.reshape(Z.shape) T_pred = tau_pred*T0 print('Time at source: %.4f'%(tau_pred[int(round(sz/deltaz)),int(round(sx/deltax))])) # Plot the PINN solution error plt.style.use('default') plt.figure(figsize=(4,4)) ax = plt.gca() im = ax.imshow(np.abs(T_pred-T_data), extent=[xmin,xmax,zmax,zmin], aspect=1, cmap="jet") plt.xlabel('Offset (km)', fontsize=14) plt.xticks(fontsize=10) plt.ylabel('Depth (km)', fontsize=14) plt.yticks(fontsize=10) ax.xaxis.set_major_locator(plt.MultipleLocator(0.5)) ax.yaxis.set_major_locator(plt.MultipleLocator(0.5)) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="6%", pad=0.15) cbar = plt.colorbar(im, cax=cax) cbar.set_label('seconds',size=10) cbar.ax.tick_params(labelsize=10) plt.savefig("./figs/marm/pinnerror.pdf", format='pdf', bbox_inches="tight") # Load fast sweeping traveltims for comparison T_fsm = np.load('./inputs/marm/traveltimes/Tcomp.npy') # Plot the first order FMM solution error plt.style.use('default') plt.figure(figsize=(4,4)) ax = plt.gca() im = ax.imshow(np.abs(T_fsm-T_data), extent=[xmin,xmax,zmax,zmin], aspect=1, cmap="jet") plt.xlabel('Offset (km)', fontsize=14) plt.xticks(fontsize=10) plt.ylabel('Depth (km)', fontsize=14) plt.yticks(fontsize=10) ax.xaxis.set_major_locator(plt.MultipleLocator(0.5)) ax.yaxis.set_major_locator(plt.MultipleLocator(0.5)) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="6%", pad=0.15) cbar = plt.colorbar(im, cax=cax) cbar.set_label('seconds',size=10) cbar.ax.tick_params(labelsize=10) plt.savefig("./figs/marm/fmm1error.pdf", format='pdf', bbox_inches="tight") # Traveltime contour plots fig = plt.figure(figsize=(5,5)) ax = plt.gca() im1 = ax.contour(T_data, 6, extent=[xmin,xmax,zmin,zmax], colors='r') im2 = ax.contour(T_pred, 6, extent=[xmin,xmax,zmin,zmax], colors='k',linestyles = 'dashed') im3 = ax.contour(T_fsm, 6, extent=[xmin,xmax,zmin,zmax], colors='b',linestyles = 'dotted') ax.plot(sx,sz,'k*',markersize=8) plt.xlabel('Offset (km)', fontsize=14) plt.ylabel('Depth (km)', fontsize=14) ax.tick_params(axis='both', which='major', labelsize=8) plt.gca().invert_yaxis() h1,_ = im1.legend_elements() h2,_ = im2.legend_elements() h3,_ = im3.legend_elements() ax.legend([h1[0], h2[0], h3[0]], ['Reference', 'PINN', 'Fast sweeping'],fontsize=12) ax.xaxis.set_major_locator(plt.MultipleLocator(0.5)) ax.yaxis.set_major_locator(plt.MultipleLocator(0.5)) plt.xticks(fontsize=10) plt.yticks(fontsize=10) #ax.arrow(1.9, 1.7, -0.1, -0.1, head_width=0.05, head_length=0.075, fc='red', ec='red',width=0.02) plt.savefig("./figs/marm/contours.pdf", format='pdf', bbox_inches="tight") print(np.linalg.norm(T_pred-T_data)/np.linalg.norm(T_data)) print(np.linalg.norm(T_pred-T_data)) ```
github_jupyter
# GRIP June'21 - The Sparks Foundation ## Data Science and Business Analytics ## Author: Smriti Gupta ### Task 1: **Prediction using Supervised ML** * Predict the percentage of an student based on the no. of study hours. * What will be predicted score if a student studies for 9.25 hrs/ day? * _LANGUAGE:_ Python * _DATASET:_ http://bit.ly/w-data ``` # Importing Libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline %config Completer.use_jedi = False # Reading data from remote link url = "https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv" df = pd.read_csv(url) # Viewing the Data df.head(10) # Shape of the Dataset df.shape # Checking the information of Data df.info() # Checking the statistical details of Data df.describe() # Checking the correlation between Hours and Scores corr = df.corr() corr colors = ['#670067','#008080'] ``` # Data Visualization ``` # 2-D graph to establish relationship between the Data and checking for linearity sns.set_style('darkgrid') df.plot(x='Hours', y='Scores', style='o') plt.title('Hours vs Percentage') plt.xlabel('Hours Studied') plt.ylabel('Percentage Score') plt.show() ``` # Data Preprocessing ``` X = df.iloc[:, :-1].values y = df.iloc[:, 1].values ``` # LINEAR REGRESSION MODEL ## Splitting Dataset into training and test sets: ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) ``` ## Training the Model ``` from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) print('TRAINING COMPLETED.') ``` ## Predicting the Score ``` y_predict = regressor.predict(X_test) prediction = pd.DataFrame({'Hours': [i[0] for i in X_test], 'Predicted Scores': [k for k in y_predict]}) prediction print(regressor.intercept_) print(regressor.coef_) # Plotting the regression line line = regressor.coef_*X+regressor.intercept_ # Plotting for the test data plt.scatter(X, y, color = colors[1]) plt.plot(X, line, color = colors[0]); plt.title('Hours vs Percentage') plt.xlabel('Hours Studied') plt.ylabel('Percentage Score') plt.show() ``` ## Checking the Accuracy Scores for training and test set ``` print('Test Score') print(regressor.score(X_test, y_test)) print('Training Score') print(regressor.score(X_train, y_train)) ``` ## Comparing Actual Scores and Predicted Scores ``` data= pd.DataFrame({'Actual': y_test,'Predicted': y_predict}) data # Visualization comparing Actual Scores and Predicted Scores plt.scatter(X_test, y_test, color = colors[1]) plt.plot(X_test, y_predict, color = colors[0]) plt.title("Hours Studied Vs Percentage (Test Dataset)") plt.xlabel("Hour") plt.ylabel("Percentage") plt.show() ``` ## Model Evaluation Metrics ``` #Checking the efficiency of model from sklearn import metrics print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_predict)) print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_predict)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_predict))) ``` # What will be predicted score if a student studies for 9.25 hrs/ day? ``` hours = 9.25 ans = regressor.predict([[hours]]) print("No of Hours = {}".format(hours)) print("Predicted Score = {}".format(ans[0])) ```
github_jupyter
<a href="https://colab.research.google.com/github/yohanesnuwara/ccs-gundih/blob/master/main/gundih_historical_production_data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import numpy as np import matplotlib.pyplot as plt import pandas as pd !git clone https://github.com/yohanesnuwara/ccs-gundih ``` # Visualize the Historical Production Data ``` # Read simulation result col = np.array(['Date', 'Days', 'WSAT', 'OSAT', 'GSAT', 'GMT', 'OMR', 'GMR', 'GCDI', 'GCDM', 'WCD', 'WGR', 'WCT', 'VPR', 'VPT', 'VIR', 'VIT', 'WPR', 'OPR', 'GPR', 'WPT', 'OPT', 'GPT', 'PR', 'GIR', 'GIT', 'GOR']) case1 = pd.read_excel(r'/content/ccs-gundih/data/CASE_1.xlsx'); case1 = pd.DataFrame(case1, columns=col) #INJ-2, 15 MMSCFD, kv/kh = 0.1 case2 = pd.read_excel(r'/content/ccs-gundih/data/CASE_2.xlsx'); case2 = pd.DataFrame(case2, columns=col) #INJ-2, 15 MMSCFD, kv/kh = 0.5 case1.head(5) case2.head(5) # convert to Panda datetime date = pd.to_datetime(case1['Date']) # gas production cumulative GPT1 = case1['GPT'] * 35.31 * 1E-06 # convert from m3 to ft3 then to mmscf GPT2 = case2['GPT'] * 35.31 * 1E-06 # gas production rate GPR1 = case1['GPR'] * 35.31 * 1E-06 GPR2 = case2['GPR'] * 35.31 * 1E-06 # average pressure PR1 = case1['PR'] * 14.5038 # convert from bar to psi PR2 = case2['PR'] * 14.5038 # plot gas production and pressure data from 2014-01-01 to 2016-01-01 pd.plotting.register_matplotlib_converters() plt.figure(figsize=(12, 7)) plt.plot(date, GPR1, color='blue') plt.plot(date, GPR2, color='red') plt.xlabel("Date"); plt.ylabel("Cumulative Gas Production (MMscf)") plt.title("Cumulative Gas Production from 01/01/2014 to 01/01/2019", pad=20, size=15) plt.xlim('2014-01-01', '2019-01-01') # plt.ylim(0, 50000) # plot gas production and pressure data from 2014-01-01 to 2016-01-01 pd.plotting.register_matplotlib_converters() fig = plt.figure() fig = plt.figure(figsize=(12,7)) host = fig.add_subplot(111) par1 = host.twinx() par2 = host.twinx() host.set_xlabel("Year") host.set_ylabel("Cumulative Gas Production (MMscf)") par1.set_ylabel("Gas Production Rate (MMscfd)") par2.set_ylabel("Average Reservoir Pressure (psi)") host.set_title("Historical Production Data of Gundih Field from 2014 to 2019", pad=20, size=15) color1 = plt.cm.viridis(0) color2 = plt.cm.viridis(.5) color3 = plt.cm.viridis(.8) p1, = host.plot(date, GPR1, color=color1,label="Gas production rate (MMscfd)") p2, = par1.plot(date, GPT1, color=color2, label="Cumulative gas production (MMscf)") p3, = par2.plot(date, PR1, color=color3, label="Average Pressure (psi)") host.set_xlim('2014-05-01', '2019-01-01') host.set_ylim(ymin=0) par1.set_ylim(0, 40000) par2.set_ylim(3400, 4100) lns = [p1, p2, p3] host.legend(handles=lns, loc='best') # right, left, top, bottom par2.spines['right'].set_position(('outward', 60)) plt.savefig('/content/ccs-gundih/result/production_curve') ``` # Dry-Gas Reservoir Analysis ``` !git clone https://github.com/yohanesnuwara/reservoir-engineering # calculate gas z factor and FVF import os, sys sys.path.append('/content/reservoir-engineering/Unit 2 Review of Rock and Fluid Properties/functions') from pseudoprops import pseudoprops from dranchuk_aboukassem import dranchuk from gasfvf import gasfvf temp_f = (temp * 9/5) + 32 # Rankine pressure = np.array(PR1) z_arr = [] Bg_arr = [] for i in range(len(pressure)): P_pr, T_pr = pseudoprops(temp_f, pressure[i], 0.8, 0.00467, 0.23) rho_pr, z = dranchuk(T_pr, P_pr) temp_r = temp_f + 459.67 Bg = 0.0282793 * z * temp_r / pressure[i] # Eq 2.2, temp in Rankine, p in psia, result in res ft3/scf z_arr.append(float(z)) Bg_arr.append(float(Bg)) Bg_arr = np.array(Bg_arr) F = GPT1 * Bg_arr # MMscf Eg = Bg_arr - Bg_arr[0] F_Eg = F / Eg plt.figure(figsize=(10,7)) plt.plot(GPT1, (F_Eg / 1E+03), '.', color='red') # convert F_Eg from MMscf to Bscf plt.xlim(xmin=0); plt.ylim(ymin=0) plt.title("Waterdrive Diagnostic Plot of $F/E_g$ vs. $G_p$", pad=20, size=15) plt.xlabel('Cumulative Gas Production (MMscf)') plt.ylabel('$F/E_g$ (Bscf)') plt.ylim(ymin=250) date_hist = date.iloc[:1700] GPT1_hist = GPT1.iloc[:1700] Bg_arr = np.array(Bg_arr) Bg_arr_hist = Bg_arr[:1700] F_hist = GPT1_hist * Bg_arr_hist # MMscf Eg_hist = Bg_arr_hist - Bg_arr_hist[0] F_Eg_hist = F_hist / Eg_hist plt.figure(figsize=(10,7)) plt.plot(GPT1_hist, (F_Eg_hist / 1E+03), '.') plt.title("Waterdrive Diagnostic Plot of $F/E_g$ vs. $G_p$", pad=20, size=15) plt.xlabel('Cumulative Gas Production (MMscf)') plt.ylabel('$F/E_g$ (Bscf)') plt.xlim(xmin=0); plt.ylim(300, 350) p_z = PR1 / z_arr plt.plot(GPT1, p_z, '.') ```
github_jupyter
# 1A.1 - Deviner un nombre aléatoire (correction) On reprend la fonction introduite dans l'énoncé et qui permet de saisir un nombre. ``` import random nombre = input("Entrez un nombre") nombre ``` **Q1 :** Ecrire une jeu dans lequel python choisi aléatoirement un nombre entre 0 et 100, et essayer de trouver ce nombre en 10 étapes. ``` n = random.randint(0,100) appreciation = "?" while True: var = input("Entrez un nombre") var = int(var) if var < n : appreciation = "trop bas" print(var, appreciation) else : appreciation = "trop haut" print(var, appreciation) if var == n: appreciation = "bravo !" print(var, appreciation) break ``` **Q2 :** Transformer ce jeu en une fonction ``jeu(nVies)`` où ``nVies`` est le nombre d'itérations maximum. ``` import random n = random.randint(0,100) vies = 10 appreciation = "?" while vies > 0: var = input("Entrez un nombre") var = int(var) if var < n : appreciation = "trop bas" print(vies, var, appreciation) else : appreciation = "trop haut" print(vies, var, appreciation) if var == n: appreciation = "bravo !" print(vies, var, appreciation) break vies -= 1 ``` **Q3 :** Adapter le code pour faire une classe joueur avec une méthode jouer, où un joueur est défini par un pseudo et son nombre de vies. Faire jouer deux joueurs et déterminer le vainqueur. ``` class joueur: def __init__(self, vies, pseudo): self.vies = vies self.pseudo = pseudo def jouer(self): appreciation = "?" n = random.randint(0,100) while self.vies > 0: message = appreciation + " -- " + self.pseudo + " : " + str(self.vies) + " vies restantes. Nombre choisi : " var = input(message) var = int(var) if var < n : appreciation = "trop bas" print(vies, var, appreciation) else : appreciation = "trop haut" print(vies, var, appreciation) if var == n: appreciation = "bravo !" print(vies, var, appreciation) break self.vies -= 1 # Initialisation des deux joueurs j1 = joueur(10, "joueur 1") j2 = joueur(10, "joueur 2") # j1 et j2 jouent j1.jouer() j2.jouer() # Nombre de vies restantes à chaque joueur print("Nombre de vies restantes à chaque joueur") print(j1.pseudo + " : " + str(j1.vies) + " restantes") print(j2.pseudo + " : " + str(j2.vies) + " restantes") # Résultat de la partie print("Résultat de la partie") if j1.vies < j2.vies: print(j1.pseudo + "a gagné la partie") elif j1.vies == j2.vies: print("match nul") else: print(j2.pseudo + " a gagné la partie") ```
github_jupyter
``` import sys import os import numpy as np module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path + "/src/simulations_v2") from analysis_helpers import poisson_waiting_function, \ run_multiple_trajectories, \ plot_aip_vs_t, \ plot_cip_vs_t, \ run_sensitivity_sims, \ extract_cips # what percent of self-reports are from severe symptoms? # in reality I think this value will vary a lot in the first few days, # and then reach some kind of steady-state, and I'm not sure what makes the most # sense to use here. I am setting it to the very pessimistic value of 100% of # self-reporters are severe, which yields the smallest infectious window size pct_self_reports_severe = 0.6 daily_self_report_severe = 0.85 daily_self_report_mild = 0.1 # avg_infectious_window = (avg time in ID state) + (avg time in Sy state prior to self-reporting) avg_infectious_window = 4 + pct_self_reports_severe * (1 / daily_self_report_severe) + \ (1-pct_self_reports_severe) * (1 / daily_self_report_mild) print(avg_infectious_window) pre_reopen_population = 1500 pre_reopen_daily_contacts = 7 reopen_population = 2500 reopen_daily_contacts = 10 pre_reopen_params = { 'max_time_exposed': 4, 'exposed_time_function': poisson_waiting_function(max_time=4, mean_time=1), 'max_time_pre_ID': 4, 'pre_ID_time_function': poisson_waiting_function(max_time=4, mean_time=1), 'max_time_ID': 8, 'ID_time_function': poisson_waiting_function(max_time=8, mean_time=4), 'max_time_SyID_mild': 14, 'SyID_mild_time_function': poisson_waiting_function(max_time=14, mean_time=10), 'max_time_SyID_severe': 14, 'SyID_severe_time_function': poisson_waiting_function(max_time=14, mean_time=10), 'sample_QI_exit_function': (lambda n: np.random.binomial(n, 0.05)), 'sample_QS_exit_function': (lambda n: np.random.binomial(n, 0.3)), 'exposed_infection_p': 0.026, 'expected_contacts_per_day': pre_reopen_daily_contacts, 'mild_symptoms_p': 0.4, 'mild_symptoms_daily_self_report_p': daily_self_report_mild, 'severe_symptoms_daily_self_report_p': daily_self_report_severe, 'days_between_tests': 300, 'test_population_fraction': 0, 'test_protocol_QFNR': 0.1, 'test_protocol_QFPR': 0.005, 'perform_contact_tracing': True, 'contact_tracing_constant': 0.5, 'contact_tracing_delay': 1, 'contact_trace_infectious_window': avg_infectious_window, 'pre_ID_state': 'detectable', 'population_size': pre_reopen_population, 'initial_E_count': 0, 'initial_pre_ID_count': 2, 'initial_ID_count': 0, 'initial_ID_prevalence': 0.001, 'initial_SyID_mild_count': 0, 'initial_SyID_severe_count': 0 } reopen_params = pre_reopen_params.copy() reopen_params['population_size'] = reopen_population reopen_params['expected_contacts_per_day'] = reopen_daily_contacts ``` # Run sims to understand sensitivity of 'contact_tracing_constant' ``` ctc_range = [0.1 * x for x in range(11)] dfs_ctc_pre_reopen = run_sensitivity_sims(pre_reopen_params, param_to_vary='contact_tracing_constant', param_values = ctc_range, trajectories_per_config=250, time_horizon=100) dfs_ctc_post_reopen = run_sensitivity_sims(reopen_params, param_to_vary='contact_tracing_constant', param_values = ctc_range, trajectories_per_config=250, time_horizon=100) import matplotlib.pyplot as plt def plot_many_dfs_threshold(dfs_dict, threshold=0.1, xlabel="", title="", figsize=(10,6)): plt.figure(figsize=figsize) for df_label, dfs_varied in dfs_dict.items(): p_thresholds = [] xs = sorted(list(dfs_varied.keys())) for x in xs: cips = extract_cips(dfs_varied[x]) cip_exceed_thresh = [cip for cip in cips if cip >= threshold] p_thresholds.append(len(cip_exceed_thresh) / len(cips) * 100) plt.plot([x * 100 for x in xs], p_thresholds, marker='o', label=df_label) plt.xlabel(xlabel) plt.ylabel("Probability at least {:.0f}% infected (%)".format(threshold * 100)) plt.title(title) plt.legend(loc='best') plt.show() title = """Outbreak Likelihood vs. Contact Tracing Effectiveness""" plot_many_dfs_threshold({'Post-Reopen (Population-size 2500, Contacts/person/day 10)': dfs_ctc_post_reopen, 'Pre-Reopen (Population-size 1500, Contacts/person/day 7)': dfs_ctc_pre_reopen, }, xlabel="Percentage of contacts recalled in contact tracing (%)", title=title) ```
github_jupyter
# Artificial Intelligence Nanodegree ## Machine Translation Project In this notebook, sections that end with **'(IMPLEMENTATION)'** in the header indicate that the following blocks of code will require additional functionality which you must provide. Please be sure to read the instructions carefully! ## Introduction In this notebook, you will build a deep neural network that functions as part of an end-to-end machine translation pipeline. Your completed pipeline will accept English text as input and return the French translation. - **Preprocess** - You'll convert text to sequence of integers. - **Models** Create models which accepts a sequence of integers as input and returns a probability distribution over possible translations. After learning about the basic types of neural networks that are often used for machine translation, you will engage in your own investigations, to design your own model! - **Prediction** Run the model on English text. ``` %load_ext autoreload %aimport helper, tests %autoreload 1 import collections import helper import numpy as np import project_tests as tests from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.models import Model from keras.layers import GRU, Input, Dense, TimeDistributed, Activation, RepeatVector, Bidirectional from keras.layers.embeddings import Embedding from keras.optimizers import Adam from keras.losses import sparse_categorical_crossentropy ``` ### Verify access to the GPU The following test applies only if you expect to be using a GPU, e.g., while running in a Udacity Workspace or using an AWS instance with GPU support. Run the next cell, and verify that the device_type is "GPU". - If the device is not GPU & you are running from a Udacity Workspace, then save your workspace with the icon at the top, then click "enable" at the bottom of the workspace. - If the device is not GPU & you are running from an AWS instance, then refer to the cloud computing instructions in the classroom to verify your setup steps. ``` from tensorflow.python.client import device_lib print(device_lib.list_local_devices()) ``` ## Dataset We begin by investigating the dataset that will be used to train and evaluate your pipeline. The most common datasets used for machine translation are from [WMT](http://www.statmt.org/). However, that will take a long time to train a neural network on. We'll be using a dataset we created for this project that contains a small vocabulary. You'll be able to train your model in a reasonable time with this dataset. ### Load Data The data is located in `data/small_vocab_en` and `data/small_vocab_fr`. The `small_vocab_en` file contains English sentences with their French translations in the `small_vocab_fr` file. Load the English and French data from these files from running the cell below. ``` # Load English data english_sentences = helper.load_data('data/small_vocab_en') # Load French data french_sentences = helper.load_data('data/small_vocab_fr') print('Dataset Loaded') ``` ### Files Each line in `small_vocab_en` contains an English sentence with the respective translation in each line of `small_vocab_fr`. View the first two lines from each file. ``` for sample_i in range(2): print('small_vocab_en Line {}: {}'.format(sample_i + 1, english_sentences[sample_i])) print('small_vocab_fr Line {}: {}'.format(sample_i + 1, french_sentences[sample_i])) ``` From looking at the sentences, you can see they have been preprocessed already. The puncuations have been delimited using spaces. All the text have been converted to lowercase. This should save you some time, but the text requires more preprocessing. ### Vocabulary The complexity of the problem is determined by the complexity of the vocabulary. A more complex vocabulary is a more complex problem. Let's look at the complexity of the dataset we'll be working with. ``` english_words_counter = collections.Counter([word for sentence in english_sentences for word in sentence.split()]) french_words_counter = collections.Counter([word for sentence in french_sentences for word in sentence.split()]) print('{} English words.'.format(len([word for sentence in english_sentences for word in sentence.split()]))) print('{} unique English words.'.format(len(english_words_counter))) print('10 Most common words in the English dataset:') print('"' + '" "'.join(list(zip(*english_words_counter.most_common(10)))[0]) + '"') print() print('{} French words.'.format(len([word for sentence in french_sentences for word in sentence.split()]))) print('{} unique French words.'.format(len(french_words_counter))) print('10 Most common words in the French dataset:') print('"' + '" "'.join(list(zip(*french_words_counter.most_common(10)))[0]) + '"') ``` For comparison, _Alice's Adventures in Wonderland_ contains 2,766 unique words of a total of 15,500 words. ## Preprocess For this project, you won't use text data as input to your model. Instead, you'll convert the text into sequences of integers using the following preprocess methods: 1. Tokenize the words into ids 2. Add padding to make all the sequences the same length. Time to start preprocessing the data... ### Tokenize (IMPLEMENTATION) For a neural network to predict on text data, it first has to be turned into data it can understand. Text data like "dog" is a sequence of ASCII character encodings. Since a neural network is a series of multiplication and addition operations, the input data needs to be number(s). We can turn each character into a number or each word into a number. These are called character and word ids, respectively. Character ids are used for character level models that generate text predictions for each character. A word level model uses word ids that generate text predictions for each word. Word level models tend to learn better, since they are lower in complexity, so we'll use those. Turn each sentence into a sequence of words ids using Keras's [`Tokenizer`](https://keras.io/preprocessing/text/#tokenizer) function. Use this function to tokenize `english_sentences` and `french_sentences` in the cell below. Running the cell will run `tokenize` on sample data and show output for debugging. ``` def tokenize(x): """ Tokenize x :param x: List of sentences/strings to be tokenized :return: Tuple of (tokenized x data, tokenizer used to tokenize x) """ # TODO: Implement tokenizer = Tokenizer() tokenizer.fit_on_texts(x) return tokenizer.texts_to_sequences(x), tokenizer tests.test_tokenize(tokenize) # Tokenize Example output text_sentences = [ 'The quick brown fox jumps over the lazy dog .', 'By Jove , my quick study of lexicography won a prize .', 'This is a short sentence .'] text_tokenized, text_tokenizer = tokenize(text_sentences) print(text_tokenizer.word_index) print() for sample_i, (sent, token_sent) in enumerate(zip(text_sentences, text_tokenized)): print('Sequence {} in x'.format(sample_i + 1)) print(' Input: {}'.format(sent)) print(' Output: {}'.format(token_sent)) ``` ### Padding (IMPLEMENTATION) When batching the sequence of word ids together, each sequence needs to be the same length. Since sentences are dynamic in length, we can add padding to the end of the sequences to make them the same length. Make sure all the English sequences have the same length and all the French sequences have the same length by adding padding to the **end** of each sequence using Keras's [`pad_sequences`](https://keras.io/preprocessing/sequence/#pad_sequences) function. ``` def pad(x, length=None): """ Pad x :param x: List of sequences. :param length: Length to pad the sequence to. If None, use length of longest sequence in x. :return: Padded numpy array of sequences """ # TODO: Implement if length is None: length = max([len(sentence) for sentence in x]) return pad_sequences(x, maxlen=length, padding='post') tests.test_pad(pad) # Pad Tokenized output test_pad = pad(text_tokenized) for sample_i, (token_sent, pad_sent) in enumerate(zip(text_tokenized, test_pad)): print('Sequence {} in x'.format(sample_i + 1)) print(' Input: {}'.format(np.array(token_sent))) print(' Output: {}'.format(pad_sent)) ``` ### Preprocess Pipeline Your focus for this project is to build neural network architecture, so we won't ask you to create a preprocess pipeline. Instead, we've provided you with the implementation of the `preprocess` function. ``` def preprocess(x, y): """ Preprocess x and y :param x: Feature List of sentences :param y: Label List of sentences :return: Tuple of (Preprocessed x, Preprocessed y, x tokenizer, y tokenizer) """ preprocess_x, x_tk = tokenize(x) preprocess_y, y_tk = tokenize(y) preprocess_x = pad(preprocess_x) preprocess_y = pad(preprocess_y) # Keras's sparse_categorical_crossentropy function requires the labels to be in 3 dimensions preprocess_y = preprocess_y.reshape(*preprocess_y.shape, 1) return preprocess_x, preprocess_y, x_tk, y_tk preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer =\ preprocess(english_sentences, french_sentences) max_english_sequence_length = preproc_english_sentences.shape[1] max_french_sequence_length = preproc_french_sentences.shape[1] english_vocab_size = len(english_tokenizer.word_index) french_vocab_size = len(french_tokenizer.word_index) print('Data Preprocessed') print("Max English sentence length:", max_english_sequence_length) print("Max French sentence length:", max_french_sequence_length) print("English vocabulary size:", english_vocab_size) print("French vocabulary size:", french_vocab_size) ``` ## Models In this section, you will experiment with various neural network architectures. You will begin by training four relatively simple architectures. - Model 1 is a simple RNN - Model 2 is a RNN with Embedding - Model 3 is a Bidirectional RNN - Model 4 is an optional Encoder-Decoder RNN After experimenting with the four simple architectures, you will construct a deeper architecture that is designed to outperform all four models. ### Ids Back to Text The neural network will be translating the input to words ids, which isn't the final form we want. We want the French translation. The function `logits_to_text` will bridge the gab between the logits from the neural network to the French translation. You'll be using this function to better understand the output of the neural network. ``` def logits_to_text(logits, tokenizer): """ Turn logits from a neural network into text using the tokenizer :param logits: Logits from a neural network :param tokenizer: Keras Tokenizer fit on the labels :return: String that represents the text of the logits """ index_to_words = {id: word for word, id in tokenizer.word_index.items()} index_to_words[0] = '<PAD>' return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)]) print('`logits_to_text` function loaded.') ``` ### Model 1: RNN (IMPLEMENTATION) ![RNN](images/rnn.png) A basic RNN model is a good baseline for sequence data. In this model, you'll build a RNN that translates English to French. ``` def simple_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size): """ Build and train a basic RNN on x and y :param input_shape: Tuple of input shape :param output_sequence_length: Length of output sequence :param english_vocab_size: Number of unique English words in the dataset :param french_vocab_size: Number of unique French words in the dataset :return: Keras model built, but not trained """ # TODO: Build the layers inputs = Input(input_shape[1:]) outputs = GRU(256, return_sequences=True)(inputs) outputs = TimeDistributed(Dense(french_vocab_size, activation="softmax"))(outputs) model = Model(inputs, outputs) learning_rate = 0.001 model.compile(loss=sparse_categorical_crossentropy, optimizer=Adam(learning_rate), metrics=['accuracy']) return model tests.test_simple_model(simple_model) # Reshaping the input to work with a basic RNN tmp_x = pad(preproc_english_sentences, max_french_sequence_length) tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2], 1)) # Train the neural network simple_rnn_model = simple_model( tmp_x.shape, max_french_sequence_length, english_vocab_size, french_vocab_size) simple_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=1024, epochs=10, validation_split=0.2) # Print prediction(s) print(logits_to_text(simple_rnn_model.predict(tmp_x[:1])[0], french_tokenizer)) ``` ### Model 2: Embedding (IMPLEMENTATION) ![RNN](images/embedding.png) You've turned the words into ids, but there's a better representation of a word. This is called word embeddings. An embedding is a vector representation of the word that is close to similar words in n-dimensional space, where the n represents the size of the embedding vectors. In this model, you'll create a RNN model using embedding. ``` def embed_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size): """ Build and train a RNN model using word embedding on x and y :param input_shape: Tuple of input shape :param output_sequence_length: Length of output sequence :param english_vocab_size: Number of unique English words in the dataset :param french_vocab_size: Number of unique French words in the dataset :return: Keras model built, but not trained """ # TODO: Implement inputs = Input(input_shape[1:]) outputs = Embedding(english_vocab_size, output_sequence_length)(inputs) outputs = GRU(256, return_sequences=True)(outputs) outputs = TimeDistributed(Dense(french_vocab_size, activation="softmax"))(outputs) model = Model(inputs, outputs) learning_rate = 0.001 model.compile(loss=sparse_categorical_crossentropy, optimizer=Adam(learning_rate), metrics=['accuracy']) return model tests.test_embed_model(embed_model) # TODO: Reshape the input tmp_x = pad(preproc_english_sentences, preproc_french_sentences.shape[1]) tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2])) # TODO: Train the neural network embed_rnn_model = embed_model( tmp_x.shape, preproc_french_sentences.shape[1], english_vocab_size, french_vocab_size) embed_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=1024, epochs=10, validation_split=0.2) # TODO: Print prediction(s) print(logits_to_text(embed_rnn_model.predict(tmp_x[:1])[0], french_tokenizer)) ``` ### Model 3: Bidirectional RNNs (IMPLEMENTATION) ![RNN](images/bidirectional.png) One restriction of a RNN is that it can't see the future input, only the past. This is where bidirectional recurrent neural networks come in. They are able to see the future data. ``` def bd_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size): """ Build and train a bidirectional RNN model on x and y :param input_shape: Tuple of input shape :param output_sequence_length: Length of output sequence :param english_vocab_size: Number of unique English words in the dataset :param french_vocab_size: Number of unique French words in the dataset :return: Keras model built, but not trained """ # TODO: Implement inputs = Input(input_shape[1:]) outputs = Bidirectional(GRU(256, return_sequences=True))(inputs) outputs = TimeDistributed(Dense(french_vocab_size, activation="softmax"))(outputs) model = Model(inputs, outputs) learning_rate = 0.001 model.compile(loss=sparse_categorical_crossentropy, optimizer=Adam(learning_rate), metrics=['accuracy']) return model tests.test_bd_model(bd_model) # TODO: Train and Print prediction(s) tmp_x = pad(preproc_english_sentences, preproc_french_sentences.shape[1]) tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2])) bd_rnn_model = embed_model( tmp_x.shape, preproc_french_sentences.shape[1], english_vocab_size, french_vocab_size) bd_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=1024, epochs=10, validation_split=0.2) print(logits_to_text(bd_rnn_model.predict(tmp_x[:1])[0], french_tokenizer)) ``` ### Model 4: Encoder-Decoder (OPTIONAL) Time to look at encoder-decoder models. This model is made up of an encoder and decoder. The encoder creates a matrix representation of the sentence. The decoder takes this matrix as input and predicts the translation as output. Create an encoder-decoder model in the cell below. ``` def encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size): """ Build and train an encoder-decoder model on x and y :param input_shape: Tuple of input shape :param output_sequence_length: Length of output sequence :param english_vocab_size: Number of unique English words in the dataset :param french_vocab_size: Number of unique French words in the dataset :return: Keras model built, but not trained """ # OPTIONAL: Implement inputs = Input(input_shape[1:]) encoded = GRU(256, return_sequences=False)(inputs) decoded = RepeatVector(output_sequence_length)(encoded) outputs = GRU(256, return_sequences=True)(decoded) outputs = Dense(french_vocab_size, activation="softmax")(outputs) model = Model(inputs, outputs) learning_rate = 0.001 model.compile(loss=sparse_categorical_crossentropy, optimizer=Adam(learning_rate), metrics=['accuracy']) return model tests.test_encdec_model(encdec_model) # OPTIONAL: Train and Print prediction(s) tmp_x = pad(preproc_english_sentences, preproc_french_sentences.shape[1]) tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2])) encdec_rnn_model = embed_model( tmp_x.shape, preproc_french_sentences.shape[1], english_vocab_size, french_vocab_size) encdec_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=1024, epochs=10, validation_split=0.2) print(logits_to_text(encdec_rnn_model.predict(tmp_x[:1])[0], french_tokenizer)) ``` ### Model 5: Custom (IMPLEMENTATION) Use everything you learned from the previous models to create a model that incorporates embedding and a bidirectional rnn into one model. ``` def model_final(input_shape, output_sequence_length, english_vocab_size, french_vocab_size): """ Build and train a model that incorporates embedding, encoder-decoder, and bidirectional RNN on x and y :param input_shape: Tuple of input shape :param output_sequence_length: Length of output sequence :param english_vocab_size: Number of unique English words in the dataset :param french_vocab_size: Number of unique French words in the dataset :return: Keras model built, but not trained """ # TODO: Implement learning_rate = 0.001 inputs = Input(shape=input_shape[1:]) encoded = Embedding(english_vocab_size, 300)(inputs) encoded = Bidirectional(GRU(512, dropout=0.2))(encoded) encoded = Dense(512, activation='relu')(encoded) decoded = RepeatVector(output_sequence_length)(encoded) decoded = Bidirectional(GRU(512, dropout=0.2, return_sequences=True))(decoded) decoded = TimeDistributed(Dense(french_vocab_size))(decoded) predictions = Activation('softmax')(decoded) model = Model(inputs=inputs, outputs=predictions) model.compile(loss=sparse_categorical_crossentropy, optimizer=Adam(learning_rate), metrics=['accuracy']) return model tests.test_model_final(model_final) print('Final Model Loaded') # TODO: Train the final model tmp_x = pad(preproc_english_sentences, preproc_french_sentences.shape[1]) tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2])) final_model = model_final( tmp_x.shape, preproc_french_sentences.shape[1], english_vocab_size, french_vocab_size) final_model.fit(tmp_x, preproc_french_sentences, batch_size=1024, epochs=10, validation_split=0.2) print(logits_to_text(encdec_rnn_model.predict(tmp_x[:1])[0], french_tokenizer)) ``` ## Prediction (IMPLEMENTATION) ``` def final_predictions(x, y, x_tk, y_tk): """ Gets predictions using the final model :param x: Preprocessed English data :param y: Preprocessed French data :param x_tk: English tokenizer :param y_tk: French tokenizer """ # TODO: Train neural network using model_final model = model_final(x.shape, y.shape[1], len(x_tk.word_index), len(y_tk.word_index)) model.fit(x, y, batch_size=1024, epochs=12, validation_split=0.2) ## DON'T EDIT ANYTHING BELOW THIS LINE y_id_to_word = {value: key for key, value in y_tk.word_index.items()} y_id_to_word[0] = '<PAD>' sentence = 'he saw a old yellow truck' sentence = [x_tk.word_index[word] for word in sentence.split()] sentence = pad_sequences([sentence], maxlen=x.shape[-1], padding='post') sentences = np.array([sentence[0], x[0]]) predictions = model.predict(sentences, len(sentences)) print('Sample 1:') print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[0]])) print('Il a vu un vieux camion jaune') print('Sample 2:') print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[1]])) print(' '.join([y_id_to_word[np.max(x)] for x in y[0]])) final_predictions(preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer) ``` ## Submission When you're ready to submit, complete the following steps: 1. Review the [rubric](https://review.udacity.com/#!/rubrics/1004/view) to ensure your submission meets all requirements to pass 2. Generate an HTML version of this notebook - Run the next cell to attempt automatic generation (this is the recommended method in Workspaces) - Navigate to **FILE -> Download as -> HTML (.html)** - Manually generate a copy using `nbconvert` from your shell terminal ``` $ pip install nbconvert $ python -m nbconvert machine_translation.ipynb ``` 3. Submit the project - If you are in a Workspace, simply click the "Submit Project" button (bottom towards the right) - Otherwise, add the following files into a zip archive and submit them - `helper.py` - `machine_translation.ipynb` - `machine_translation.html` - You can export the notebook by navigating to **File -> Download as -> HTML (.html)**. ### Generate the html **Save your notebook before running the next cell to generate the HTML output.** Then submit your project. ``` # Save before you run this cell! !!jupyter nbconvert *.ipynb ``` ## Optional Enhancements This project focuses on learning various network architectures for machine translation, but we don't evaluate the models according to best practices by splitting the data into separate test & training sets -- so the model accuracy is overstated. Use the [`sklearn.model_selection.train_test_split()`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) function to create separate training & test datasets, then retrain each of the models using only the training set and evaluate the prediction accuracy using the hold out test set. Does the "best" model change?
github_jupyter
# Unit 5 - Financial Planning ``` # Initial imports import os import requests import pandas as pd from dotenv import load_dotenv import alpaca_trade_api as tradeapi from MCForecastTools import MCSimulation # date here from datetime import date %matplotlib inline # Load .env enviroment variables load_dotenv() ``` ## Part 1 - Personal Finance Planner ## Collect Crypto Prices Using the `requests` Library ``` # Set current amount of crypto assets # YOUR CODE HERE! my_btc = 1.2 my_eth = 5.3 # Crypto API URLs btc_url = "https://api.alternative.me/v2/ticker/Bitcoin/?convert=CAD" eth_url = "https://api.alternative.me/v2/ticker/Ethereum/?convert=CAD" # response_data = requests.get(create_deck_url).json() # response_data btc_resp = requests.get(btc_url).json() btc_price = btc_resp['data']['1']['quotes']['USD']['price'] my_btc_value =my_btc * btc_price eth_resp = requests.get(eth_url).json() eth_price = eth_resp['data']['1027']['quotes']['USD']['price'] my_eth_value = my_eth * eth_price print(f"The current value of your {my_btc} BTC is ${my_btc_value:0.2f}") print(f"The current value of your {my_eth} ETH is ${my_eth_value:0.2f}") ``` ### Collect Investments Data Using Alpaca: `SPY` (stocks) and `AGG` (bonds) ``` # Current amount of shares # Create two variables named my_agg and my_spy and set them equal to 200 and 50, respectively. # YOUR CODE HERE! my_agg = 200 my_spy = 50 # Set Alpaca API key and secret # YOUR CODE HERE! alpaca_api_key = os.getenv("ALPACA_API_KEY") alpaca_secret_key = os.getenv("ALPACA_SECRET_KEY") # Create the Alpaca API object # YOUR CODE HERE! api = tradeapi.REST( alpaca_api_key, alpaca_secret_key, api_version = "v2" ) # Format current date as ISO format # YOUR CODE HERE! # use "2021-04-16" so weekend gives actual data start_date = pd.Timestamp("2021-04-16", tz="America/New_York").isoformat() today_date = pd.Timestamp(date.today(), tz="America/New_York").isoformat() # Set the tickers tickers = ["AGG", "SPY"] # Set timeframe to '1D' for Alpaca API timeframe = "1D" # Get current closing prices for SPY and AGG # YOUR CODE HERE! ticker_data = api.get_barset( tickers, timeframe, start=start_date, end=start_date, ).df # Preview DataFrame # YOUR CODE HERE! ticker_data # Pick AGG and SPY close prices # YOUR CODE HERE! agg_close_price = ticker_data['AGG']['close'][0] spy_close_price = ticker_data['SPY']['close'][0] # Print AGG and SPY close prices print(f"Current AGG closing price: ${agg_close_price}") print(f"Current SPY closing price: ${spy_close_price}") # Compute the current value of shares # YOUR CODE HERE! my_spy_value = spy_close_price * my_spy my_agg_value = agg_close_price * my_agg # Print current value of share print(f"The current value of your {my_spy} SPY shares is ${my_spy_value:0.2f}") print(f"The current value of your {my_agg} AGG shares is ${my_agg_value:0.2f}") ``` ### Savings Health Analysis ``` # Set monthly household income # YOUR CODE HERE! monthly_income = 12000 # Create savings DataFrame # YOUR CODE HERE! df_savings = pd.DataFrame([my_btc_value+ my_eth_value, my_spy_value + my_agg_value], columns = ['amount'], index = ['crypto', 'shares']) # Display savings DataFrame display(df_savings) # Plot savings pie chart # YOUR CODE HERE! df_savings['amount'].plot.pie(y= ['crypto', 'shares']) # how to put label # Set ideal emergency fund emergency_fund = monthly_income * 3 # Calculate total amount of savings # YOUR CODE HERE! total_saving = df_savings['amount'].sum() # Validate saving health # YOUR CODE HERE! # If total savings are greater than the emergency fund, display a message congratulating the person for having enough money in this fund. # If total savings are equal to the emergency fund, display a message congratulating the person on reaching this financial goal. # If total savings are less than the emergency fund, display a message showing how many dollars away the person is from reaching the goal. if total_saving > emergency_fund: print ('Congratulations! You have enough money in your emergency fund.') elif total_saving == emergency_fund: print ('Contratulations you reached your financial goals ') else: print ('You are making great progress. You need to save $ {round((emergency_fund - total_saving), 2)}') ``` ## Part 2 - Retirement Planning ### Monte Carlo Simulation #### Hassan's Note for some reason AlPaca would not let me get more than 1000 records. To get 5 years data (252 * 5), I had to break it up into two reads and concat the data. ``` # Set start and end dates of five years back from today. # Sample results may vary from the solution based on the time frame chosen start_date1 = pd.Timestamp('2015-08-07', tz='America/New_York').isoformat() end_date1 = pd.Timestamp('2017-08-07', tz='America/New_York').isoformat() start_date2 = pd.Timestamp('2017-08-08', tz='America/New_York').isoformat() end_date2 = pd.Timestamp('2020-08-07', tz='America/New_York').isoformat() # end_date1 = pd.Timestamp('2019-08-07', tz='America/New_York').isoformat() # hits 1000 item limit, have to do in two batches and pringt # Get 5 years' worth of historical data for SPY and AGG # YOUR CODE HERE! # Display sample data df_stock_data.head() # Get 5 years' worth of historical data for SPY and AGG # create two dataframes and concaternate them # first period data frame df_stock_data1 = api.get_barset( tickers, timeframe, start = start_date1, end = end_date1, limit = 1000 ).df # second period dataframe df_stock_data2 = api.get_barset( tickers, timeframe, start = start_date2, end = end_date2, limit = 1000 ).df df_stock_data = pd.concat ([df_stock_data1, df_stock_data2], axis = 0, join = 'inner') print (f'stock data head: ') print (df_stock_data.head(5)) print (f'\nstock data 1 tail: ') print (df_stock_data.tail(5)) # print (f'stock data 1 head: {start_date1}') # print (df_stock_data1.head(5)) # print (f'\nstock data 1 tail: {end_date1}') # print (df_stock_data1.tail(5)) # print (f'\nstock data 2 head: {start_date2}') # print (df_stock_data2.head(5)) # print (f'\nstock data 2 tail: {end_date2}') # print (df_stock_data2.tail(5)) # delete # # Get 5 years' worth of historical data for SPY and AGG # # YOUR CODE HERE! # df_stock_data = api.get_barset( # tickers, # timeframe, # start = start_date, # end = end_date, # limit = 1000 # ).df # # Display sample data # # to fix. # df_stock_data.head() # Configuring a Monte Carlo simulation to forecast 30 years cumulative returns # YOUR CODE HERE! MC_even_dist = MCSimulation( portfolio_data = df_stock_data, weights = [.4, .6], num_simulation = 500, num_trading_days = 252*30 ) # Printing the simulation input data # YOUR CODE HERE! # Printing the simulation input data # YOUR CODE HERE! MC_even_dist.portfolio_data.head() # Running a Monte Carlo simulation to forecast 30 years cumulative returns # YOUR CODE HERE! # Running a Monte Carlo simulation to forecast 30 years cumulative returns # YOUR CODE HERE! MC_even_dist.calc_cumulative_return() # Plot simulation outcomes # YOUR CODE HERE! # Plot simulation outcomes line_plot = MC_even_dist.plot_simulation() # delete # Plot probability distribution and confidence intervals # YOUR CODE HERE! # Plot probability distribution and confidence intervals # YOUR CODE HERE! dist_plot = MC_even_dist.plot_distribution() ``` ### Retirement Analysis ``` # delete # Fetch summary statistics from the Monte Carlo simulation results # YOUR CODE HERE! # Print summary statistics # YOUR CODE HERE! # Fetch summary statistics from the Monte Carlo simulation results # YOUR CODE HERE! tbl = MC_even_dist.summarize_cumulative_return() # Print summary statistics print (tbl) ``` ### Calculate the expected portfolio return at the 95% lower and upper confidence intervals based on a `$20,000` initial investment. ``` # delete # # Set initial investment # initial_investment = 20000 # # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $20,000 # # YOUR CODE HERE! # # Print results # print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" # f" over the next 30 years will end within in the range of" # f" ${ci_lower} and ${ci_upper}") # Set initial investment initial_investment = 20000 # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $20,000 # YOUR CODE HERE! ci_lower = round(tbl[8]*initial_investment,2) ci_upper = round(tbl[9]*initial_investment,2) # Print results print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" f" over the next 30 years will end within in the range of" f" ${ci_lower} and ${ci_upper}") ``` ### Calculate the expected portfolio return at the `95%` lower and upper confidence intervals based on a `50%` increase in the initial investment. ``` # delete # # Set initial investment # initial_investment = 20000 * 1.5 # # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $30,000 # # YOUR CODE HERE! # # Print results # print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" # f" over the next 30 years will end within in the range of" # f" ${ci_lower} and ${ci_upper}") # Set initial investment initial_investment = 20000 * 1.5 # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $30,000 # YOUR CODE HERE! ci_lower = round(tbl[8]*initial_investment,2) ci_upper = round(tbl[9]*initial_investment,2) # Print results print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" f" over the next 30 years will end within in the range of" f" ${ci_lower} and ${ci_upper}") ``` ## Optional Challenge - Early Retirement ### Five Years Retirement Option ``` # Configuring a Monte Carlo simulation to forecast 5 years cumulative returns # YOUR CODE HERE! MC_even_dist = MCSimulation( portfolio_data = df_stock_data, weights = [.4, .6], num_simulation = 500, num_trading_days = 252*5 ) # Running a Monte Carlo simulation to forecast 5 years cumulative returns # YOUR CODE HERE! # Running a Monte Carlo simulation to forecast 5 years cumulative returns MC_even_dist.calc_cumulative_return() # Plot simulation outcomes # YOUR CODE HERE! # Plot simulation outcomes line_plot = MC_even_dist.plot_simulation() # Plot probability distribution and confidence intervals # YOUR CODE HERE! # Plot probability distribution and confidence intervals # YOUR CODE HERE! dist_plot = MC_even_dist.plot_distribution() # Fetch summary statistics from the Monte Carlo simulation results # YOUR CODE HERE! # Print summary statistics # YOUR CODE HERE! # Fetch summary statistics from the Monte Carlo simulation results # YOUR CODE HERE! tbl = MC_even_dist.summarize_cumulative_return() # Print summary statistics print (tbl) # Set initial investment # YOUR CODE HERE! # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $60,000 # YOUR CODE HERE! # Print results print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" f" over the next 5 years will end within in the range of" f" ${ci_lower_five} and ${ci_upper_five}") # Set initial investment # YOUR CODE HERE! initial_investment = 60000 # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $60,000 # YOUR CODE HERE! ci_lower_five = round(tbl[8]*initial_investment,2) ci_upper_five = round(tbl[9]*initial_investment,2) # Print results print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" f" over the next 5 years will end within in the range of" f" ${ci_lower_five} and ${ci_upper_five}") ``` ### Ten Years Retirement Option ``` # Configuring a Monte Carlo simulation to forecast 10 years cumulative returns # YOUR CODE HERE! MC_even_dist = MCSimulation( portfolio_data = df_stock_data, weights = [.4, .6], num_simulation = 500, num_trading_days = 252*10 ) # Running a Monte Carlo simulation to forecast 10 years cumulative returns # YOUR CODE HERE! MC_even_dist.calc_cumulative_return() # Plot simulation outcomes # YOUR CODE HERE! # Plot simulation outcomes line_plot = MC_even_dist.plot_simulation() # Plot probability distribution and confidence intervals # YOUR CODE HERE! # Plot probability distribution and confidence intervals dist_plot = MC_even_dist.plot_distribution() # Fetch summary statistics from the Monte Carlo simulation results # YOUR CODE HERE! # Print summary statistics # YOUR CODE HERE! # Fetch summary statistics from the Monte Carlo simulation results # YOUR CODE HERE! tbl = MC_even_dist.summarize_cumulative_return() # Print summary statistics print (tbl) # Set initial investment # YOUR CODE HERE! # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $60,000 # YOUR CODE HERE! # Print results print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" f" over the next 10 years will end within in the range of" f" ${ci_lower_ten} and ${ci_upper_ten}") # Set initial investment # YOUR CODE HERE! initial_investment = 60000 # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $60,000 # YOUR CODE HERE! ci_lower_ten = round(tbl[8]*initial_investment,2) ci_upper_ten = round(tbl[9]*initial_investment,2) # Print results print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" f" over the next 10 years will end within in the range of" f" ${ci_lower_ten} and ${ci_upper_ten}") ```
github_jupyter
### Abstract Factory Design Pattern >An abstract factory is a generative design pattern that allows you to create families of related objects without getting attached to specific classes of created objects. The pattern is being implemented by creating an abstract class (for example - Factory), which is represented as an interface for creating system components. Then the classes that implement this interface are being written. https://py.checkio.org/blog/design-patterns-part-1/ ``` class AbstractFactory: def create_chair(self): raise NotImplementedError() def create_sofa(self): raise NotImplementedError() def create_table(self): raise NotImplementedError() class Chair: def __init__(self, name): self._name = name def __str__(self): return self._name class Sofa: def __init__(self, name): self._name = name def __str__(self): return self._name class Table: def __init__(self, name): self._name = name def __str__(self): return self._name class VictorianFactory(AbstractFactory): def create_chair(self): return Chair('victorian chair') def create_sofa(self): return Sofa('victorian sofa') def create_table(self): return Table('victorian table') class ModernFactory(AbstractFactory): def create_chair(self): return Chair('modern chair') def create_sofa(self): return Sofa('modern sofa') def create_table(self): return Table('modern table') class FuturisticFactory(AbstractFactory): def create_chair(self): return Chair('futuristic chair') def create_sofa(self): return Sofa('futuristic sofa') def create_table(self): return Table('futuristic table') factory_1 = VictorianFactory() factory_2 = ModernFactory() factory_3 = FuturisticFactory() print(factory_1.create_chair()) print(factory_1.create_sofa()) print(factory_1.create_table()) print(factory_2.create_chair()) print(factory_2.create_sofa()) print(factory_2.create_table()) print(factory_3.create_chair()) print(factory_3.create_sofa()) print(factory_3.create_table()) ``` Example - https://py.checkio.org/mission/army-units/solve/ ``` class Army: def train_swordsman(self, name): return NotImplementedError() def train_lancer(self, name): return NotImplementedError() def train_archer(self, name): return NotImplementedError() class Swordsman: def __init__(self, soldier_type, name, army_type): self.army_type = army_type + ' swordsman' self.name = name self.soldier_type = soldier_type def introduce(self): return '{} {}, {}'.format(self.soldier_type, self.name, self.army_type) class Lancer: def __init__(self, soldier_type, name, army_type): self.army_type = army_type + ' lancer' self.name = name self.soldier_type = soldier_type def introduce(self): return '{} {}, {}'.format(self.soldier_type, self.name, self.army_type) class Archer: def __init__(self, soldier_type, name, army_type): self.army_type = army_type + ' archer' self.name = name self.soldier_type = soldier_type def introduce(self): return '{} {}, {}'.format(self.soldier_type, self.name, self.army_type) class AsianArmy(Army): def __init__(self): self.army_type = 'Asian' def train_swordsman(self, name): return Swordsman('Samurai', name, self.army_type) def train_lancer(self, name): return Lancer('Ronin', name, self.army_type) def train_archer(self, name): return Archer('Shinobi', name, self.army_type) class EuropeanArmy(Army): def __init__(self): self.army_type = 'European' def train_swordsman(self, name): return Swordsman('Knight', name, self.army_type) def train_lancer(self, name): return Lancer('Raubritter', name, self.army_type) def train_archer(self, name): return Archer('Ranger', name, self.army_type) if __name__ == '__main__': #These "asserts" using only for self-checking and not necessary for auto-testing my_army = EuropeanArmy() enemy_army = AsianArmy() soldier_1 = my_army.train_swordsman("Jaks") soldier_2 = my_army.train_lancer("Harold") soldier_3 = my_army.train_archer("Robin") soldier_4 = enemy_army.train_swordsman("Kishimoto") soldier_5 = enemy_army.train_lancer("Ayabusa") soldier_6 = enemy_army.train_archer("Kirigae") assert soldier_1.introduce() == "Knight Jaks, European swordsman" assert soldier_2.introduce() == "Raubritter Harold, European lancer" assert soldier_3.introduce() == "Ranger Robin, European archer" assert soldier_4.introduce() == "Samurai Kishimoto, Asian swordsman" assert soldier_5.introduce() == "Ronin Ayabusa, Asian lancer" assert soldier_6.introduce() == "Shinobi Kirigae, Asian archer" print("Coding complete? Let's try tests!") class Army: def train_swordsman(self, name): return Swordsman(self, name) def train_lancer(self, name): return Lancer(self, name) def train_archer(self, name): return Archer(self, name) def introduce(self, name, army_type): return f'{self.title[army_type]} {name}, {self.region} {army_type}' class Fighter: def __init__(self, army, name): self.army = army self.name = name def introduce(self): return self.army.introduce(self.name, self.army_type) class Swordsman(Fighter): army_type = 'swordsman' class Lancer(Fighter): army_type = 'lancer' class Archer(Fighter): army_type = 'archer' class AsianArmy(Army): title = {'swordsman': 'Samurai', 'lancer': 'Ronin', 'archer': 'Shinobi'} region = 'Asian' class EuropeanArmy(Army): title = {'swordsman': 'Knight', 'lancer': 'Raubritter', 'archer': 'Ranger'} region = 'European' if __name__ == '__main__': #These "asserts" using only for self-checking and not necessary for auto-testing my_army = EuropeanArmy() enemy_army = AsianArmy() soldier_1 = my_army.train_swordsman("Jaks") soldier_2 = my_army.train_lancer("Harold") soldier_3 = my_army.train_archer("Robin") soldier_4 = enemy_army.train_swordsman("Kishimoto") soldier_5 = enemy_army.train_lancer("Ayabusa") soldier_6 = enemy_army.train_archer("Kirigae") print(soldier_1.introduce()) print("Knight Jaks, European swordsman") print(soldier_2.introduce()) print(soldier_3.introduce()) assert soldier_1.introduce() == "Knight Jaks, European swordsman" assert soldier_2.introduce() == "Raubritter Harold, European lancer" assert soldier_3.introduce() == "Ranger Robin, European archer" assert soldier_4.introduce() == "Samurai Kishimoto, Asian swordsman" assert soldier_5.introduce() == "Ronin Ayabusa, Asian lancer" assert soldier_6.introduce() == "Shinobi Kirigae, Asian archer" print("Coding complete? Let's try tests!") ```
github_jupyter
# Pytorch Basic ``` import torch import torch.nn as nn import torchvision import torchvision.transforms as transforms import matplotlib.pyplot as plt from IPython.display import clear_output torch.cuda.is_available() ``` ## Device ``` device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') ``` ## Hyper Parameter ``` input_size = 784 hidden_size = 500 num_class = 10 epochs = 5 batch_size = 100 lr = 0.001 ``` ## Load MNIST Dataset ``` train_dataset = torchvision.datasets.MNIST(root='../data', train=True, transform=transforms.ToTensor(), download=True) test_dataset = torchvision.datasets.MNIST(root='../data', train=False, transform=transforms.ToTensor()) print('train dataset shape : ',train_dataset.data.shape) print('test dataset shape : ',test_dataset.data.shape) plt.imshow(train_dataset.data[0]) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) ``` ## Simple Model ``` class NeuralNet(nn.Module): def __init__(self, input_size, hidden_size, num_class): super(NeuralNet, self).__init__() self.fc1 = nn.Linear(input_size,hidden_size) self.relu = nn.ReLU() self.fc2 = nn.Linear(hidden_size, num_class) def forward(self, x): out = self.fc1(x) out = self.relu(out) out = self.fc2(out) return out model = NeuralNet(input_size,hidden_size,num_class).to(device) ``` ## Loss and Optimizer ``` criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=lr) ``` ## Train ``` total_step = len(train_loader) for epoch in range(epochs): for i, (images, labels) in enumerate(train_loader): images = images.reshape(-1,28*28).to(device) labels = labels.to(device) outputs = model(images) loss = criterion(outputs, labels) optimizer.zero_grad() loss.backward() optimizer.step() if (i+1) % 100 == 0: clear_output() print('EPOCH [{}/{}] STEP [{}/{}] Loss {: .4f})' .format(epoch+1, epochs, i+1, total_step, loss.item())) ``` ## Test ``` with torch.no_grad(): correct = 0 total = 0 for images, labels in test_loader: images = images.reshape(-1, 28*28).to(device) labels = labels.to(device) outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total)) ``` ## save ``` torch.save(model.state_dict(), 'model.ckpt') ``` ---
github_jupyter
### Importar librerías y series de datos ``` import time start = time.time() #importar datos y librerias import numpy as np import pandas as pd from datetime import datetime import matplotlib.pyplot as plt from scipy import signal from sklearn.linear_model import LinearRegression from statsmodels.tsa.seasonal import seasonal_decompose from scipy.stats import boxcox from scipy import special #leer excel de datos y de dias especiales general = pd.read_excel (r'C:\Users\Diana\PAP\Data\Data1.xlsx') special_days= pd.read_excel (r'C:\Users\Diana\PAP\Data\Christmas.xlsx') #convertir dias especiales a fechas en python for column in special_days.columns: special_days[column] = pd.to_datetime(special_days[column]) general = general.set_index('fecha') ``` ### Establecer las funciones a utilizar ``` def kronecker(data1:'Dataframe 1',data2:'Dataframe 2'): x=0 data1_kron=data1[x:x+1] data2_kron=data2[x:x+1] Combinacion=np.kron(data1_kron,data2_kron) Combinacion=pd.DataFrame(Combinacion) for x in range(1,len(data1)): data1_kron=data1[x:x+1] data2_kron=data2[x:x+1] kron=np.kron(data1_kron,data2_kron) Kron=pd.DataFrame(kron) Combinacion=Combinacion.append(Kron) return Combinacion def regresion_linear(X:'variables para regresion',y:'datos'): global model model.fit(X, y) coefficients=model.coef_ return model.predict(X) def comparacion(real,pred): comparacion=pd.DataFrame(columns=['real','prediccion','error']) comparacion.real=real comparacion.prediccion=pred comparacion.error=np.abs((comparacion.real.values-comparacion.prediccion)/comparacion.real)*100 return comparacion ``` ### Hacer variables dummies ``` n=-10 final=general.MWh.tail(-n) onlyMWh=pd.DataFrame(general.MWh) general['Month'] = general.index.month general['Weekday_Name'] = general.index.weekday_name dates=general.index dummies = pd.get_dummies(general['Weekday_Name']).astype(int) dummies2 = pd.get_dummies(general['Month']).astype(int) Dum=pd.DataFrame(dummies.join(dummies2)) t=np.arange(0,len(onlyMWh)) Dum["t"]= np.arange(0,len(onlyMWh)) Dum["tiempo"]= np.arange(1,len(onlyMWh)+1) Dum["ones"]=np.ones(len(t)) Dum= Dum.set_index('t') Dum["Dom santo"]=0 Dum["NewYear"]=0 Dum["Constitucion"]=0 Dum["Benito"]=0 Dum["Jue santo"]=0 Dum["Vie santo"]=0 Dum["Trabajo"]=0 Dum["Madre"]=0 Dum["Grito"]=0 Dum["virgen"]=0 Dum["muertos"]=0 Dum["Virgen2"]=0 Dum["Navidad"]=0 Dum["elecciones"]=0 Dum["toma"]=0 Dum["sab santo"]=0 Dum["rev"]=0 ind=0 for date in general.index: for date2 in special_days["Dom santo"]: if date ==date2: Dum.iloc[ind,21]=1 for date2 in special_days["NewYear"]: if date ==date2: Dum.iloc[ind,22]=1 for date2 in special_days["Constitucion"]: if date ==date2: Dum.iloc[ind,23]=1 for date2 in special_days["Benito"]: if date ==date2: Dum.iloc[ind,24]=1 for date2 in special_days["Jue santo"]: if date ==date2: Dum.iloc[ind,25]=1 for date2 in special_days["Vie santo"]: if date ==date2: Dum.iloc[ind,26]=1 for date2 in special_days["Trabajo"]: if date ==date2: Dum.iloc[ind,27]=1 for date2 in special_days["Madre"]: if date ==date2: Dum.iloc[ind,28]=1 for date2 in special_days["Grito"]: if date ==date2: Dum.iloc[ind,29]=1 for date2 in special_days["virgen"]: if date ==date2: Dum.iloc[ind,30]=1 for date2 in special_days["muertos"]: if date ==date2: Dum.iloc[ind,31]=1 for date2 in special_days["Virgen2"]: if date ==date2: Dum.iloc[ind,32]=1 for date2 in special_days["Navidad"]: if date ==date2: Dum.iloc[ind,33]=1 for date2 in special_days["elecciones"]: if date ==date2: Dum.iloc[ind,34]=1 for date2 in special_days["toma"]: if date ==date2: Dum.iloc[ind,35]=1 for date2 in special_days["sab santo"]: if date ==date2: Dum.iloc[ind,36]=1 for date2 in special_days["rev"]: if date ==date2: Dum.iloc[ind,37]=1 ind+=1 del Dum["Friday"] Dum.drop(Dum.columns[[15]], axis=1,inplace=True) ``` ### Observar descomposición ``` part=general.MWh.tail(100) result=seasonal_decompose(part, model='multiplicative') fig = result.seasonal.plot(figsize=(20,5)) ``` Al ver la decomposición, se puede ver por la forma que fourier debe estblecerse en senos y cosenos absolutos, para que se parezca a la estacionalidad de la serie. Se agrega a las variables dummies esta estacionalidad semanal, que parece ser fundamental en los datos ### Detectar efecto de las variables dummies ``` t=np.arange(1,len(onlyMWh)+1) Tiempo=pd.DataFrame(t) Tiempo["one"]=np.ones(len(onlyMWh)) Tiempo['sen']=np.abs(np.sin(((2*np.pi)/14)*t)) Tiempo['cos']=np.abs(np.cos(((2*np.pi)/14)*t)) Combinacion=kronecker(Dum,Tiempo) model = LinearRegression() prediction=regresion_linear(Combinacion[:n],general.MWh.values[:n]) plt.figure(figsize=(10,5)) plt.plot(onlyMWh.MWh.values[:n],label ="Datos") plt.plot(prediction,label="Predicción") plt.ylabel("demanda en MWh") plt.xlabel("días") plt.legend() #plt.axis([1630,1650,120000,160000]) plt.show() comp=comparacion(onlyMWh.MWh.values[:n],prediction) MAPE=comp.error.mean() print("MAPE = ",round(MAPE,4),"%") ``` ### Obtener error de datos con variables dummies vs datos reales ``` Tabla=pd.DataFrame(columns=['regresion','datos','resta']) Tabla["regresion"]=prediction Tabla["datos"]=onlyMWh.MWh.values[:n] Tabla["resta"]=Tabla.datos-Tabla.regresion plt.plot(Tabla.resta) plt.show() ``` ### Establecer las frecuencias que se debe considerar en la serie de fourier ``` f, Pxx_den = signal.periodogram(Tabla.resta, 1) plt.plot(1/f, Pxx_den) plt.xlabel('periodo') plt.ylabel('PSD') plt.show() top_50_periods = {} # get indices for 3 highest Pxx values top50_freq_indices = np.flip(np.argsort(Pxx_den), 0)[2:12] freqs = f[top50_freq_indices] power = Pxx_den[top50_freq_indices] periods = 1 / np.array(freqs) matrix=pd.DataFrame(columns=["power","periods"]) matrix.power=power matrix.periods=periods print(matrix) ``` ### Hacer la regresión del efecto cruzado de variables dummies y senos/cosenos absolutos de frecuencia de error ``` sencos = pd.DataFrame() sencos["t"]=np.arange(1,len(onlyMWh)+1) for i in matrix.periods: sencos["{}_sen".format(i)] = np.abs(np.sin(((2*np.pi)/i)*t)) sencos["{}_cos".format(i)] = np.abs(np.cos(((2*np.pi)/i)*t)) sencos["unos"] = 1 sencos['sen']=np.abs(np.sin(((2*np.pi)/14)*t)) sencos['cos']=np.abs(np.cos(((2*np.pi)/14)*t)) sencos['sen1']=np.abs(np.sin(((2*np.pi)/365)*t)) sencos['cos1']=np.abs(np.cos(((2*np.pi)/365)*t)) sencos['sen2']=np.abs(np.sin(((2*np.pi)/28)*t)) sencos['cos2']=np.abs(np.cos(((2*np.pi)/28)*t)) sencos_test=sencos[n:] sencos_train=sencos[0:n] Dum_test=Dum[n:] Dum_train=Dum[0:n] Combinacion=kronecker(Dum_train,sencos_train) model = LinearRegression() prediction=regresion_linear(Combinacion,general.MWh.values[0:n]) ``` ### MAPE de la regresion ``` plt.figure(figsize=(10,5)) plt.plot(onlyMWh.MWh[0:n].values,label ="Datos") plt.plot(prediction,label="Predicción") plt.ylabel("demanda en MWh") plt.xlabel("días") plt.legend() plt.show() #%%obtener mape de regresión comp=comparacion(onlyMWh.MWh.values[:n],prediction) MAPE=comp.error.mean() print("MAPE = ",round(MAPE,4),"%") ``` ### Graficar residuales de la regresión ``` Tabla=pd.DataFrame(columns=['regresion','datos','resta']) Tabla["regresion"]=prediction Tabla["datos"]=onlyMWh.MWh[0:n].values Tabla["resta"]=Tabla.datos-Tabla.regresion plt.plot(Tabla.resta) plt.show() plt.hist(Tabla["resta"],bins=50) plt.show() resta=pd.DataFrame(Tabla["resta"]) from statsmodels.tsa.arima_model import ARIMA mod = ARIMA(resta, order=(1,0,4)) results = mod.fit() plt.plot(resta) plt.plot(results.fittedvalues, color='red') T=pd.DataFrame(columns=['regresion','datos','nuevo']) T["regresion"]=results.fittedvalues T["datos"]=resta T["nuevo"]=T.datos-T.regresion plt.plot(T.nuevo) plt.show() plt.figure(figsize=(10,5)) plt.plot(onlyMWh.MWh[0:n].values,label="Reales") plt.plot(prediction+results.fittedvalues,label="Predicción") #plt.axis([1630,1650,120000,160000]) plt.ylabel("demanda en MWh") plt.xlabel("días") plt.legend() plt.show() #%%obtener mape de regresión comp=comparacion(onlyMWh.MWh[0:n].values,prediction+results.fittedvalues) MAPE=comp.error.mean() print("MAPE = ",round(MAPE,4),"%") ``` ### Gráfica de manera dinámica ``` extra=results.predict(len(onlyMWh.MWh[0:n]),len(onlyMWh.MWh[0:n])-n) extra=extra.iloc[1:] from sklearn.linear_model import Lasso Combinaciontest=kronecker(Dum_test,sencos_test) #Initializing the Lasso Regressor with Normalization Factor as True lasso_reg = Lasso(normalize=True) #Fitting the Training data to the Lasso regressor lasso_reg.fit(Combinacion,onlyMWh.MWh[0:n]) coeff = lasso_reg.coef_ #coeff #Predicting for X_test y_pred_lass =lasso_reg.predict(Combinaciontest) coeff = np.sum(abs(lasso_reg.coef_)==0) coeff len(lasso_reg.coef_) #comb=Combinacion #comb2=Combinaciontest #x=np.where(lasso_reg.coef_==0) #comb=comb.drop(comb.columns[x], axis=1) #comb2=comb2.drop(comb2.columns[x], axis=1) #from sklearn.linear_model import HuberRegressor #huber = HuberRegressor().fit(comb,onlyMWh.MWh[0:n]) #hubpredict=huber.predict(comb2) ``` ### todo para pronóstico ``` comp_pronostico=comparacion(final,y_pred_lass+extra.values) #comp_pronostico=comparacion(final,hubpredict+extra.values) MAPE=comp_pronostico.error.mean() plt.figure(figsize=(10,5)) plt.plot(final,label="Real") plt.plot(comp_pronostico.prediccion,label="Pronóstico") plt.ylabel("demanda en MWh") plt.xlabel("días") plt.legend() plt.show() print("MAPE = ",round(MAPE,4),"%") comp_pronostico end = time.time() print((end - start)/60) model =LinearRegression() model.fit(comb,onlyMWh.MWh[0:n]) prediction=model.predict(comb2) comp_pronostico=comparacion(final,prediction+extra.values) MAPE=comp_pronostico.error.mean() plt.figure(figsize=(10,5)) plt.plot(final,label="Real") plt.plot(comp_pronostico.prediccion,label="Pronóstico") plt.ylabel("demanda en MWh") plt.xlabel("días") plt.legend() plt.show() print("MAPE = ",round(MAPE,4),"%") comp_pronostico 799,39.39 58.39 13.01 lasso_reg = Lasso(normalize=True) #Fitting the Training data to the Lasso regressor lasso_reg.fit(comb,onlyMWh.MWh[0:n]) coeff = lasso_reg.coef_ #coeff #Predicting for X_test y_pred_lass =lasso_reg.predict(comb2) comp_pronostico=comparacion(final,y_pred_lass+extra.values) MAPE=comp_pronostico.error.mean() plt.figure(figsize=(10,5)) plt.plot(final,label="Real") plt.plot(comp_pronostico.prediccion,label="Pronóstico") plt.ylabel("demanda en MWh") plt.xlabel("días") plt.legend() plt.show() print("MAPE = ",round(MAPE,4),"%") #coeff = lasso_reg.coef_ #coeff ```
github_jupyter