hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
sequence
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
sequence
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
sequence
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
sequence
cell_types
sequence
cell_type_groups
sequence
d0a71daa81895ef9de8d99c03fa5b6c2d4ee2cfe
8,576
ipynb
Jupyter Notebook
Concert_Prep_artist_monopoly_JL.ipynb
athenian-ct-projects/Concert-Prep-Day-JL
42232a31ba35b7ff2f662a67bb2de79acbf0639e
[ "Apache-2.0" ]
null
null
null
Concert_Prep_artist_monopoly_JL.ipynb
athenian-ct-projects/Concert-Prep-Day-JL
42232a31ba35b7ff2f662a67bb2de79acbf0639e
[ "Apache-2.0" ]
null
null
null
Concert_Prep_artist_monopoly_JL.ipynb
athenian-ct-projects/Concert-Prep-Day-JL
42232a31ba35b7ff2f662a67bb2de79acbf0639e
[ "Apache-2.0" ]
null
null
null
50.447059
270
0.555271
[ [ [ "<a href=\"https://colab.research.google.com/github/athenian-ct-projects/Concert-Prep-Day-JL/blob/master/Concert_Prep_artist_monopoly_JL.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "Artist-themed Monopoly for Concert Prep Day\n\nJack L. '23", "_____no_output_____" ] ], [ [ "#art fact function\ndef fact():\n import random\n x = random.randint(1,10)\n if x == 1:\n print(\"Bob Ross's 'Joy of Painting' TV series lasted for 31 seasons\")\n if x == 2:\n print(\"In 1911 when the Mona Lisa was stolen from the Louvre, Pablo Picasso was one of the two primary suspects in the investigation \\nbefore it was found out that an employee did it\")\n if x == 3:\n print(\"Salvador Dalí thinks that he is a re-incarnation of his brother that died before he was born\")\n if x == 4:\n print(\"Vincent van Gogh only ever sold one painting in his life\")\n if x == 5:\n print(\"'The Last Supper' by Leonardo da Vinci originally featured Jesus's feet, but in 1652 when installing a door in the refectory where the painting is, \\nthe feet were cut off\")\n if x == 6:\n print(\"Vincent van Gogh's painting 'The Starry Night' is the view from a psychiatric hospital in France where van Gogh was staying when he painted it\")\n if x == 7:\n print(\"The marble that was used for Michelangelo's 'David' was used by two other sculptors before Michelangelo\")\n if x == 8:\n print(\"There are five versions of Edvard Munch’s 'The Scream'\")\n if x == 9:\n print(\"Auguste Rodin’s 'The Thinker' originally was only 70cm until he later made an enlarged version\")\n if x == 10:\n print(\"Andy Warhol's Campbell's Soup cans came in a set of thirty-two cans\")\n#Rainbow paint bucket function\ndef paint():\n import random\n y = random.randint(1,10)\n if y == 1:\n print(\"HOORAY, advance to go collect $200\")\n if y == 2:\n print(\"You commited tax fraud - go to jail. If you pass go do not collect $200.\")\n if y == 3:\n print(\"You are a guest star on a game show. Collect $100 from the bank.\")\n if y == 4:\n print(\"You drink a Sprite cranbery. Suddenly your door falls down and Lebron James walks in and hands you a fat stack of cash. Collect $500 from the bank.\")\n if y == 5:\n print(\"Some guy blows up your house with a grenade launcher like in John Wick 2. Pay the bank $200.\")\n if y == 6:\n print(\"The Great Depression happens again and your bank fails. Pay the bank all of your money (you can mortgage your artists to avoid going bankrupt).\")\n if y == 7:\n print(\"You get in a car crash while wearng a VR headset and playing a flight simulator in the car, saying 'it will be like I am flying in a plane'. \\nPay the bank $200 in medical fees\")\n if y == 8:\n print(\"Your grandfather dies and he leaves you an inheritance. You assume his massive debt and pay the bank $500.\")\n if y == 9:\n print(\"Your favorite NFL team wins the Super Bowl! Pay the bank $50 for the jersey you bought.\")\n if y == 10:\n print(\"You win the lottery but spend it all on worthless stuff. Roll the dice again\")\n#Instructions\nprint(\"Welcome to Artist Monopoly!\")\nprint(\"This is just like regular monopoly but with some IMPORTANT twists:\")\nprint(\"To roll dice just ask siri to roll a pair of dice\")\nprint(\"*there are more spaces, and railroads have been replaced with more modern airlines\")\nprint(\"*there are auction spaces now. If you land on one you can buy any artist on the board but you have to pay double (only one artist each time you land on the spot).\")\nprint(\"*trading artists for money and other artists are encoureged but you can only propose a trade on your turn.\")\nprint(\"*chance spaces have been replaced by artist facts. If you land on that space, type the word 'fact' into the computer.\")\nprint(\"*community chests have been replaced by rainbow paint buckets. If you land on that space, type the word 'paint' into the computer.\\n\")\nprint(\"IMPORTANT: When someone goes bankrupt, type the word 'player' into the computer.\\n\")\nplayer = int(input(\"How many people are playing today? \"))\nprint(\"Alright you're ready to play. Everyone starts with $1500\")\n\n#Gameplay while loop\nwhile player > 1:\n tip = input()\n if tip == \"fact\":\n fact()\n elif tip == \"paint\":\n paint()\n elif tip == \"player\":\n player = player - 1\n else:\n print(\"You must have spelled somthing wrong. Try again.\")\n\n#Final score calculating\nprint(\"Looks like we have a winner! Now lets calculate your final score.\")\npig = int(input(\"How much money did the winner have in the end? \"))\nfig = 0\nfor z in range(1,pig):\n fig = fig + z\nprint(\"The final score of the winner is:\")\nprint(fig)\nprint(\"Thanks for playing!\")", "Welcome to Artist Monopoly!\nThis is just like regular monopoly but with some IMPORTANT twists:\nTo roll dice just ask siri to roll a pair of dice\n*there are more spaces, and railroads have been replaced with more modern airlines\n*there are auction spaces now. If you land on one you can buy any artist on the board but you have to pay double (only one artist each time you land on the spot).\n*trading artists for money and other artists are encoureged but you can only propose a trade on your turn.\n*chance spaces have been replaced by artist facts. If you land on that space, type the word 'fact' into the computer.\n*community chests have been replaced by rainbow paint buckets. If you land on that space, type the word 'paint' into the computer.\n\nIMPORTANT: When someone goes bankrupt, type the word 'player' into the computer.\n\n" ] ], [ [ "https://www.bocadolobo.com/blog/art/famous-artists-time/\n\nhttps://www.mentalfloss.com/article/5838715-things-you-didnt-know-about-famous-art\n\nhttps://medium.com/@inna_13021/taxes-youll-have-to-pay-when-purchasing-or-selling-art-a418b958c457", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
d0a72812f4638c82e131a2afb98e115df016a911
189,564
ipynb
Jupyter Notebook
ipython3/07_Input_Output.ipynb
libaiw/PythonFinTechST
7282acc5ea8e1c9e492c3174f4dbc7e2e7aa9a02
[ "CNRI-Python" ]
15
2018-07-10T09:18:23.000Z
2021-12-30T06:35:09.000Z
ipython3/07_Input_Output.ipynb
libaiw/PythonFinTechST
7282acc5ea8e1c9e492c3174f4dbc7e2e7aa9a02
[ "CNRI-Python" ]
2
2020-10-27T19:44:15.000Z
2020-11-03T23:55:36.000Z
ipython3/07_Input_Output.ipynb
libaiw/PythonFinTechST
7282acc5ea8e1c9e492c3174f4dbc7e2e7aa9a02
[ "CNRI-Python" ]
13
2018-01-08T01:10:22.000Z
2021-05-26T17:35:35.000Z
57.063215
42,648
0.765156
[ [ [ "<img src=\"http://hilpisch.com/tpq_logo.png\" alt=\"The Python Quants\" width=\"35%\" align=\"right\" border=\"0\"><br>", "_____no_output_____" ], [ "# Python for Finance", "_____no_output_____" ], [ "**Analyze Big Financial Data**\n\nO'Reilly (2014)\n\nYves Hilpisch", "_____no_output_____" ], [ "<img style=\"border:0px solid grey;\" src=\"http://hilpisch.com/python_for_finance.png\" alt=\"Python for Finance\" width=\"30%\" align=\"left\" border=\"0\">", "_____no_output_____" ], [ "**Buy the book ** |\n<a href='http://shop.oreilly.com/product/0636920032441.do' target='_blank'>O'Reilly</a> |\n<a href='http://www.amazon.com/Yves-Hilpisch/e/B00JCYHHJM' target='_blank'>Amazon</a>\n\n**All book codes & IPYNBs** |\n<a href=\"http://oreilly.quant-platform.com\">http://oreilly.quant-platform.com</a>\n\n**The Python Quants GmbH** | <a href='http://tpq.io' target='_blank'>http://tpq.io</a>\n\n**Contact us** | <a href='mailto:[email protected]'>[email protected]</a>", "_____no_output_____" ], [ "# Input-Output Operations", "_____no_output_____" ] ], [ [ "from pylab import plt\nplt.style.use('ggplot')\nimport matplotlib as mpl\nmpl.rcParams['font.family'] = 'serif'", "_____no_output_____" ] ], [ [ "## Basic I/O with Python", "_____no_output_____" ], [ "### Writing Objects to Disk", "_____no_output_____" ] ], [ [ "path = './data/'", "_____no_output_____" ], [ "import numpy as np\nfrom random import gauss", "_____no_output_____" ], [ "a = [gauss(1.5, 2) for i in range(1000000)]\n # generation of normally distributed randoms", "_____no_output_____" ], [ "import pickle", "_____no_output_____" ], [ "pkl_file = open(path + 'data.pkl', 'wb')\n # open file for writing\n # Note: existing file might be overwritten", "_____no_output_____" ], [ "%time pickle.dump(a, pkl_file)", "CPU times: user 28.9 ms, sys: 16.2 ms, total: 45.1 ms\nWall time: 47.1 ms\n" ], [ "pkl_file", "_____no_output_____" ], [ "pkl_file.close()", "_____no_output_____" ], [ "ll $path*", "-rw-r--r-- 1 yves staff 9002006 Jul 11 19:24 ./data/data.pkl\r\n" ], [ "pkl_file = open(path + 'data.pkl', 'rb') # open file for reading", "_____no_output_____" ], [ "%time b = pickle.load(pkl_file)", "CPU times: user 38.3 ms, sys: 17.1 ms, total: 55.4 ms\nWall time: 54.5 ms\n" ], [ "b[:5]", "_____no_output_____" ], [ "a[:5]", "_____no_output_____" ], [ "np.allclose(np.array(a), np.array(b))", "_____no_output_____" ], [ "np.sum(np.array(a) - np.array(b))", "_____no_output_____" ], [ "pkl_file = open(path + 'data.pkl', 'wb') # open file for writing", "_____no_output_____" ], [ "%time pickle.dump(np.array(a), pkl_file)", "CPU times: user 29.3 ms, sys: 9.85 ms, total: 39.2 ms\nWall time: 40.1 ms\n" ], [ "%time pickle.dump(np.array(a) ** 2, pkl_file)", "CPU times: user 29.7 ms, sys: 9.38 ms, total: 39.1 ms\nWall time: 41.2 ms\n" ], [ "pkl_file.close()", "_____no_output_____" ], [ "ll $path*", "-rw-r--r--@ 1 yves staff 16000322 Jul 11 19:24 ./data/data.pkl\r\n" ], [ "pkl_file = open(path + 'data.pkl', 'rb') # open file for reading", "_____no_output_____" ], [ "x = pickle.load(pkl_file)\nx", "_____no_output_____" ], [ "y = pickle.load(pkl_file)\ny", "_____no_output_____" ], [ "pkl_file.close()\n", "_____no_output_____" ], [ "pkl_file = open(path + 'data.pkl', 'wb') # open file for writing\npickle.dump({'x' : x, 'y' : y}, pkl_file)\npkl_file.close()", "_____no_output_____" ], [ "pkl_file = open(path + 'data.pkl', 'rb') # open file for writing\ndata = pickle.load(pkl_file)\npkl_file.close()\nfor key in data.keys():\n print(key, data[key][:4])", "x [ 1.58130824 0.91925987 3.71006765 -0.97652614]\ny [ 2.50053574 0.84503871 13.76460194 0.95360331]\n" ], [ "!rm -f $path*", "_____no_output_____" ] ], [ [ "### Reading and Writing Text Files", "_____no_output_____" ] ], [ [ "rows = 5000\na = np.random.standard_normal((rows, 5)) # dummy data", "_____no_output_____" ], [ "a.round(4)", "_____no_output_____" ], [ "import pandas as pd\nt = pd.date_range(start='2014/1/1', periods=rows, freq='H')\n # set of hourly datetime objects", "_____no_output_____" ], [ "t", "_____no_output_____" ], [ "csv_file = open(path + 'data.csv', 'w') # open file for writing", "_____no_output_____" ], [ "header = 'date,no1,no2,no3,no4,no5\\n'\ncsv_file.write(header)", "_____no_output_____" ], [ "for t_, (no1, no2, no3, no4, no5) in zip(t, a):\n s = '%s,%f,%f,%f,%f,%f\\n' % (t_, no1, no2, no3, no4, no5)\n csv_file.write(s)\ncsv_file.close()", "_____no_output_____" ], [ "ll $path*", "-rw-r--r--@ 1 yves staff 337462 Jul 11 19:24 ./data/data.csv\r\n" ], [ "csv_file = open(path + 'data.csv', 'r') # open file for reading", "_____no_output_____" ], [ "for i in range(5):\n print(csv_file.readline(), end='')", "date,no1,no2,no3,no4,no5\n2014-01-01 00:00:00,-0.792854,-0.619980,0.847537,-0.519209,0.710493\n2014-01-01 01:00:00,0.248792,0.075653,0.896322,1.223483,-0.583715\n2014-01-01 02:00:00,0.294116,-0.228455,0.660129,-0.395653,-0.348395\n2014-01-01 03:00:00,0.458356,-0.646949,1.601538,1.342564,-0.970662\n" ], [ "csv_file = open(path + 'data.csv', 'r')\ncontent = csv_file.readlines()\nfor line in content[:5]:\n print(line, end='')", "date,no1,no2,no3,no4,no5\n2014-01-01 00:00:00,-0.792854,-0.619980,0.847537,-0.519209,0.710493\n2014-01-01 01:00:00,0.248792,0.075653,0.896322,1.223483,-0.583715\n2014-01-01 02:00:00,0.294116,-0.228455,0.660129,-0.395653,-0.348395\n2014-01-01 03:00:00,0.458356,-0.646949,1.601538,1.342564,-0.970662\n" ], [ "csv_file.close()\n!rm -f $path*", "_____no_output_____" ] ], [ [ "### SQL Databases", "_____no_output_____" ] ], [ [ "import sqlite3 as sq3", "_____no_output_____" ], [ "query = 'CREATE TABLE numbs (Date date, No1 real, No2 real)'", "_____no_output_____" ], [ "con = sq3.connect(path + 'numbs.db')", "_____no_output_____" ], [ "con.execute(query)", "_____no_output_____" ], [ "con.commit()", "_____no_output_____" ], [ "import datetime as dt", "_____no_output_____" ], [ "con.execute('INSERT INTO numbs VALUES(?, ?, ?)',\n (dt.datetime.now(), 0.12, 7.3))", "_____no_output_____" ], [ "data = np.random.standard_normal((10000, 2)).round(5)", "_____no_output_____" ], [ "for row in data:\n con.execute('INSERT INTO numbs VALUES(?, ?, ?)',\n (dt.datetime.now(), row[0], row[1]))\ncon.commit()", "_____no_output_____" ], [ "con.execute('SELECT * FROM numbs').fetchmany(10)", "_____no_output_____" ], [ "pointer = con.execute('SELECT * FROM numbs')", "_____no_output_____" ], [ "for i in range(3):\n print(pointer.fetchone())", "('2017-07-11 19:24:36.571687', 0.12, 7.3)\n('2017-07-11 19:24:36.588493', 0.67521, 2.22351)\n('2017-07-11 19:24:36.588669', -1.58142, 0.49044)\n" ], [ "con.close()\n!rm -f $path*", "_____no_output_____" ] ], [ [ "### Writing and Reading Numpy Arrays", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "dtimes = np.arange('2015-01-01 10:00:00', '2021-12-31 22:00:00',\n dtype='datetime64[m]') # minute intervals\nlen(dtimes)", "_____no_output_____" ], [ "dty = np.dtype([('Date', 'datetime64[m]'), ('No1', 'f'), ('No2', 'f')])\ndata = np.zeros(len(dtimes), dtype=dty)", "_____no_output_____" ], [ "data['Date'] = dtimes", "_____no_output_____" ], [ "a = np.random.standard_normal((len(dtimes), 2)).round(5)\ndata['No1'] = a[:, 0]\ndata['No2'] = a[:, 1]", "_____no_output_____" ], [ "%time np.save(path + 'array', data) # suffix .npy is added", "CPU times: user 3.32 ms, sys: 79.3 ms, total: 82.6 ms\nWall time: 107 ms\n" ], [ "ll $path*", "-rw-r--r--@ 1 yves staff 58901888 Jul 11 19:24 ./data/array.npy\r\n" ], [ "%time np.load(path + 'array.npy')", "CPU times: user 1.39 ms, sys: 40 ms, total: 41.4 ms\nWall time: 40.2 ms\n" ], [ "data = np.random.standard_normal((10000, 6000))", "_____no_output_____" ], [ "%time np.save(path + 'array', data) ", "CPU times: user 19.2 ms, sys: 715 ms, total: 735 ms\nWall time: 915 ms\n" ], [ "ll $path*", "-rw-r--r--@ 1 yves staff 480000080 Jul 11 19:24 ./data/array.npy\r\n" ], [ "%time np.load(path + 'array.npy')", "CPU times: user 2.04 ms, sys: 364 ms, total: 366 ms\nWall time: 416 ms\n" ], [ "data = 0.0\n!rm -f $path*", "_____no_output_____" ] ], [ [ "## I/O with pandas", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\ndata = np.random.standard_normal((1000000, 5)).round(5)\n # sample data set", "_____no_output_____" ], [ "filename = path + 'numbs'", "_____no_output_____" ] ], [ [ "### SQL Database", "_____no_output_____" ] ], [ [ "import sqlite3 as sq3", "_____no_output_____" ], [ "query = 'CREATE TABLE numbers (No1 real, No2 real,\\\n No3 real, No4 real, No5 real)'", "_____no_output_____" ], [ "con = sq3.Connection(filename + '.db')", "_____no_output_____" ], [ "con.execute(query)", "_____no_output_____" ], [ "%%time\ncon.executemany('INSERT INTO numbers VALUES (?, ?, ?, ?, ?)', data)\ncon.commit()", "CPU times: user 9.93 s, sys: 156 ms, total: 10.1 s\nWall time: 10.2 s\n" ], [ "ll $path*", "-rw-r--r--@ 1 yves staff 52633600 Jul 11 19:24 ./data/numbs.db\r\n" ], [ "%%time\ntemp = con.execute('SELECT * FROM numbers').fetchall()\nprint(temp[:2])\ntemp = 0.0", "[(-0.88408, -0.67193, 0.21988, 0.98507, 1.55431), (0.90875, 0.03152, -1.53419, -1.89065, -1.04942)]\nCPU times: user 2.27 s, sys: 186 ms, total: 2.45 s\nWall time: 2.48 s\n" ], [ "%%time\nquery = 'SELECT * FROM numbers WHERE No1 > 0 AND No2 < 0'\nres = np.array(con.execute(query).fetchall()).round(3)", "CPU times: user 932 ms, sys: 76.4 ms, total: 1.01 s\nWall time: 1.02 s\n" ], [ "res = res[::100] # every 100th result\nimport matplotlib.pyplot as plt\n%matplotlib inline\nplt.plot(res[:, 0], res[:, 1], 'ro')\nplt.grid(True); plt.xlim(-0.5, 4.5); plt.ylim(-4.5, 0.5)\n# tag: scatter_query\n# title: Plot of the query result\n# size: 60", "_____no_output_____" ] ], [ [ "### From SQL to pandas", "_____no_output_____" ] ], [ [ "import pandas.io.sql as pds", "_____no_output_____" ], [ "%time data = pds.read_sql('SELECT * FROM numbers', con)", "CPU times: user 2.53 s, sys: 211 ms, total: 2.74 s\nWall time: 2.75 s\n" ], [ "data.head()", "_____no_output_____" ], [ "%time data[(data['No1'] > 0) & (data['No2'] < 0)].head()", "CPU times: user 15.1 ms, sys: 8.72 ms, total: 23.8 ms\nWall time: 22.8 ms\n" ], [ "%%time\nres = data[['No1', 'No2']][((data['No1'] > 0.5) | (data['No1'] < -0.5))\n & ((data['No2'] < -1) | (data['No2'] > 1))]", "CPU times: user 15.2 ms, sys: 8.7 ms, total: 23.9 ms\nWall time: 22.5 ms\n" ], [ "plt.plot(res.No1, res.No2, 'ro')\nplt.grid(True); plt.axis('tight')\n# tag: data_scatter_1\n# title: Scatter plot of complex query results\n# size: 55", "_____no_output_____" ], [ "h5s = pd.HDFStore(filename + '.h5s', 'w')", "_____no_output_____" ], [ "%time h5s['data'] = data", "CPU times: user 39.2 ms, sys: 56.9 ms, total: 96.1 ms\nWall time: 133 ms\n" ], [ "h5s", "_____no_output_____" ], [ "h5s.close()", "_____no_output_____" ], [ "%%time\nh5s = pd.HDFStore(filename + '.h5s', 'r')\ntemp = h5s['data']\nh5s.close()", "CPU times: user 5.54 ms, sys: 19.7 ms, total: 25.2 ms\nWall time: 24.1 ms\n" ], [ "np.allclose(np.array(temp), np.array(data))", "_____no_output_____" ], [ "temp = 0.0", "_____no_output_____" ], [ "ll $path*", "-rw-r--r--@ 1 yves staff 52633600 Jul 11 19:24 ./data/numbs.db\r\n-rw-r--r--@ 1 yves staff 48007192 Jul 11 19:24 ./data/numbs.h5s\r\n" ] ], [ [ "### Data as CSV File", "_____no_output_____" ] ], [ [ "%time data.to_csv(filename + '.csv')", "CPU times: user 6.43 s, sys: 257 ms, total: 6.68 s\nWall time: 6.73 s\n" ], [ "ls data/", "numbs.csv numbs.db numbs.h5s\r\n" ], [ "%%time\npd.read_csv(filename + '.csv')[['No1', 'No2',\n 'No3', 'No4']].hist(bins=20)\n# tag: data_hist_3\n# title: Histogram of 4 data sets\n# size: 60", "CPU times: user 1.25 s, sys: 151 ms, total: 1.41 s\nWall time: 1.41 s\n" ] ], [ [ "### Data as Excel File", "_____no_output_____" ] ], [ [ "%time data[:100000].to_excel(filename + '.xlsx')", "CPU times: user 28.3 s, sys: 647 ms, total: 29 s\nWall time: 29.2 s\n" ], [ "%time pd.read_excel(filename + '.xlsx', 'Sheet1').cumsum().plot()\n# tag: data_paths\n# title: Paths of random data from Excel file\n# size: 60", "CPU times: user 6.17 s, sys: 91.2 ms, total: 6.27 s\nWall time: 6.28 s\n" ], [ "ll $path*", "-rw-r--r--@ 1 yves staff 48833731 Jul 11 19:25 ./data/numbs.csv\r\n-rw-r--r--@ 1 yves staff 52633600 Jul 11 19:24 ./data/numbs.db\r\n-rw-r--r--@ 1 yves staff 48007192 Jul 11 19:24 ./data/numbs.h5s\r\n-rw-r--r--@ 1 yves staff 4371165 Jul 11 19:25 ./data/numbs.xlsx\r\n" ], [ "rm -f $path*", "_____no_output_____" ] ], [ [ "## Fast I/O with PyTables", "_____no_output_____" ] ], [ [ "import numpy as np\nimport tables as tb\nimport datetime as dt\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "### Working with Tables", "_____no_output_____" ] ], [ [ "filename = path + 'tab.h5'\nh5 = tb.open_file(filename, 'w') ", "_____no_output_____" ], [ "rows = 2000000", "_____no_output_____" ], [ "row_des = {\n 'Date': tb.StringCol(26, pos=1),\n 'No1': tb.IntCol(pos=2),\n 'No2': tb.IntCol(pos=3),\n 'No3': tb.Float64Col(pos=4),\n 'No4': tb.Float64Col(pos=5)\n }", "_____no_output_____" ], [ "filters = tb.Filters(complevel=0) # no compression\ntab = h5.create_table('/', 'ints_floats', row_des,\n title='Integers and Floats',\n expectedrows=rows, filters=filters)", "_____no_output_____" ], [ "tab", "_____no_output_____" ], [ "pointer = tab.row", "_____no_output_____" ], [ "ran_int = np.random.randint(0, 10000, size=(rows, 2))\nran_flo = np.random.standard_normal((rows, 2)).round(5)", "_____no_output_____" ], [ "%%time\nfor i in range(rows):\n pointer['Date'] = dt.datetime.now()\n pointer['No1'] = ran_int[i, 0]\n pointer['No2'] = ran_int[i, 1] \n pointer['No3'] = ran_flo[i, 0]\n pointer['No4'] = ran_flo[i, 1] \n pointer.append()\n # this appends the data and\n # moves the pointer one row forward\ntab.flush()", "CPU times: user 7.43 s, sys: 102 ms, total: 7.53 s\nWall time: 7.56 s\n" ], [ "tab", "_____no_output_____" ], [ "ll $path*", "-rw-r--r--@ 1 yves staff 100156248 Jul 11 19:25 ./data/tab.h5\r\n" ], [ "dty = np.dtype([('Date', 'S26'), ('No1', '<i4'), ('No2', '<i4'),\n ('No3', '<f8'), ('No4', '<f8')])\nsarray = np.zeros(len(ran_int), dtype=dty)", "_____no_output_____" ], [ "sarray", "_____no_output_____" ], [ "%%time\nsarray['Date'] = dt.datetime.now()\nsarray['No1'] = ran_int[:, 0]\nsarray['No2'] = ran_int[:, 1]\nsarray['No3'] = ran_flo[:, 0]\nsarray['No4'] = ran_flo[:, 1]", "CPU times: user 67.2 ms, sys: 26.8 ms, total: 94 ms\nWall time: 93.5 ms\n" ], [ "%%time\nh5.create_table('/', 'ints_floats_from_array', sarray,\n title='Integers and Floats',\n expectedrows=rows, filters=filters)", "CPU times: user 22 ms, sys: 30.8 ms, total: 52.7 ms\nWall time: 52.9 ms\n" ], [ "h5", "_____no_output_____" ], [ "h5.remove_node('/', 'ints_floats_from_array')", "_____no_output_____" ], [ "tab[:3]", "_____no_output_____" ], [ "tab[:4]['No4']", "_____no_output_____" ], [ "%time np.sum(tab[:]['No3'])", "CPU times: user 34.6 ms, sys: 63.5 ms, total: 98.1 ms\nWall time: 94.1 ms\n" ], [ "%time np.sum(np.sqrt(tab[:]['No1']))", "CPU times: user 35 ms, sys: 48 ms, total: 83 ms\nWall time: 82 ms\n" ], [ "%%time\nplt.hist(tab[:]['No3'], bins=30)\nplt.grid(True)\nprint(len(tab[:]['No3']))\n# tag: data_hist\n# title: Histogram of data\n# size: 60", "2000000\nCPU times: user 171 ms, sys: 103 ms, total: 274 ms\nWall time: 273 ms\n" ], [ "%%time\nres = np.array([(row['No3'], row['No4']) for row in\n tab.where('((No3 < -0.5) | (No3 > 0.5)) \\\n & ((No4 < -1) | (No4 > 1))')])[::100]", "CPU times: user 288 ms, sys: 95.2 ms, total: 383 ms\nWall time: 328 ms\n" ], [ "plt.plot(res.T[0], res.T[1], 'ro')\nplt.grid(True)\n# tag: scatter_data\n# title: Scatter plot of query result\n# size: 70", "_____no_output_____" ], [ "%%time\nvalues = tab.cols.No3[:]\nprint(\"Max %18.3f\" % values.max())\nprint(\"Ave %18.3f\" % values.mean())\nprint(\"Min %18.3f\" % values.min())\nprint(\"Std %18.3f\" % values.std())", "Max 5.054\nAve -0.001\nMin -5.337\nStd 1.000\nCPU times: user 24.5 ms, sys: 30.5 ms, total: 55 ms\nWall time: 53.9 ms\n" ], [ "%%time\nresults = [(row['No1'], row['No2']) for row in\n tab.where('((No1 > 9800) | (No1 < 200)) \\\n & ((No2 > 4500) & (No2 < 5500))')]\nfor res in results[:4]:\n print(res)", "(9909, 4637)\n(9918, 4986)\n(9854, 5173)\n(132, 4860)\nCPU times: user 54.8 ms, sys: 46.6 ms, total: 101 ms\nWall time: 56.6 ms\n" ], [ "%%time\nresults = [(row['No1'], row['No2']) for row in\n tab.where('(No1 == 1234) & (No2 > 9776)')]\nfor res in results:\n print(res)", "(1234, 9994)\n(1234, 9997)\n(1234, 9921)\n(1234, 9902)\n(1234, 9975)\n(1234, 9970)\nCPU times: user 28 ms, sys: 44.7 ms, total: 72.6 ms\nWall time: 51.7 ms\n" ] ], [ [ "### Working with Compressed Tables", "_____no_output_____" ] ], [ [ "filename = path + 'tab.h5c'\nh5c = tb.open_file(filename, 'w') ", "_____no_output_____" ], [ "filters = tb.Filters(complevel=4, complib='blosc')", "_____no_output_____" ], [ "tabc = h5c.create_table('/', 'ints_floats', sarray,\n title='Integers and Floats',\n expectedrows=rows, filters=filters)", "_____no_output_____" ], [ "%%time\nres = np.array([(row['No3'], row['No4']) for row in\n tabc.where('((No3 < -0.5) | (No3 > 0.5)) \\\n & ((No4 < -1) | (No4 > 1))')])[::100]", "CPU times: user 326 ms, sys: 93 ms, total: 419 ms\nWall time: 383 ms\n" ], [ "%time arr_non = tab.read()", "CPU times: user 27.7 ms, sys: 54.2 ms, total: 81.9 ms\nWall time: 85.3 ms\n" ], [ "%time arr_com = tabc.read()", "CPU times: user 30.7 ms, sys: 66.3 ms, total: 97 ms\nWall time: 106 ms\n" ], [ "ll $path*", "-rw-r--r--@ 1 yves staff 200312336 Jul 11 19:25 ./data/tab.h5\r\n-rw-r--r--@ 1 yves staff 100030828 Jul 11 19:25 ./data/tab.h5c\r\n" ], [ "h5c.close()", "_____no_output_____" ] ], [ [ "### Working with Arrays", "_____no_output_____" ] ], [ [ "%%time\narr_int = h5.create_array('/', 'integers', ran_int)\narr_flo = h5.create_array('/', 'floats', ran_flo)", "CPU times: user 1.91 ms, sys: 27.9 ms, total: 29.8 ms\nWall time: 49.5 ms\n" ], [ "h5", "_____no_output_____" ], [ "ll $path*", "-rw-r--r--@ 1 yves staff 262344490 Jul 11 19:25 ./data/tab.h5\r\n-rw-r--r--@ 1 yves staff 100030828 Jul 11 19:25 ./data/tab.h5c\r\n" ], [ "h5.close()", "_____no_output_____" ], [ "!rm -f $path*", "_____no_output_____" ] ], [ [ "### Out-of-Memory Computations", "_____no_output_____" ] ], [ [ "filename = path + 'array.h5'\nh5 = tb.open_file(filename, 'w') ", "_____no_output_____" ], [ "n = 100\near = h5.create_earray(h5.root, 'ear',\n atom=tb.Float64Atom(),\n shape=(0, n))", "_____no_output_____" ], [ "%%time\nrand = np.random.standard_normal((n, n))\nfor i in range(750):\n ear.append(rand)\near.flush()", "CPU times: user 32.8 ms, sys: 31 ms, total: 63.9 ms\nWall time: 85.4 ms\n" ], [ "ear", "_____no_output_____" ], [ "ear.size_on_disk", "_____no_output_____" ], [ "out = h5.create_earray(h5.root, 'out',\n atom=tb.Float64Atom(),\n shape=(0, n))", "_____no_output_____" ], [ "expr = tb.Expr('3 * sin(ear) + sqrt(abs(ear))')\n # the numerical expression as a string object\nexpr.set_output(out, append_mode=True)\n # target to store results is disk-based array", "_____no_output_____" ], [ "%time expr.eval()\n # evaluation of the numerical expression\n # and storage of results in disk-based array", "CPU times: user 108 ms, sys: 56.4 ms, total: 164 ms\nWall time: 85.1 ms\n" ], [ "out[0, :10]", "_____no_output_____" ], [ "%time imarray = ear.read()\n # read whole array into memory", "CPU times: user 20.1 ms, sys: 32.2 ms, total: 52.3 ms\nWall time: 51.8 ms\n" ], [ "import numexpr as ne\nexpr = '3 * sin(imarray) + sqrt(abs(imarray))'", "_____no_output_____" ], [ "ne.set_num_threads(16)\n%time ne.evaluate(expr)[0, :10]", "CPU times: user 101 ms, sys: 64.2 ms, total: 165 ms\nWall time: 33.7 ms\n" ], [ "h5.close()", "_____no_output_____" ], [ "!rm -f $path*", "_____no_output_____" ] ], [ [ "## Conclusions", "_____no_output_____" ], [ "## Further Reading", "_____no_output_____" ], [ "<img src=\"http://hilpisch.com/tpq_logo.png\" alt=\"The Python Quants\" width=\"35%\" align=\"right\" border=\"0\"><br>\n\n<a href=\"http://tpq.io\" target=\"_blank\">http://tpq.io</a> | <a href=\"http://twitter.com/dyjh\" target=\"_blank\">@dyjh</a> | <a href=\"mailto:[email protected]\">[email protected]</a>\n\n**Quant Platform** |\n<a href=\"http://quant-platform.com\">http://quant-platform.com</a>\n\n**Python for Finance** |\n<a href=\"http://python-for-finance.com\" target=\"_blank\">Python for Finance @ O'Reilly</a>\n\n**Derivatives Analytics with Python** |\n<a href=\"http://derivatives-analytics-with-python.com\" target=\"_blank\">Derivatives Analytics @ Wiley Finance</a>\n\n**Listed Volatility and Variance Derivatives** |\n<a href=\"http://lvvd.tpq.io\" target=\"_blank\">Listed VV Derivatives @ Wiley Finance</a>\n\n**Python Training** |\n<a href=\"http://training.tpq.io\" target=\"_blank\">Python for Finance University Certificate</a>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
d0a72e839a8c5bb2f0b9475d362a6ebfd8b9924f
320,469
ipynb
Jupyter Notebook
tools/colab/keras_mnist_tpu.ipynb
eliabruni/tpu
d81974097c4b08e813d978edc002b8e4184d3e19
[ "Apache-2.0" ]
null
null
null
tools/colab/keras_mnist_tpu.ipynb
eliabruni/tpu
d81974097c4b08e813d978edc002b8e4184d3e19
[ "Apache-2.0" ]
3
2021-03-31T20:15:40.000Z
2022-02-09T23:50:46.000Z
tools/colab/keras_mnist_tpu.ipynb
eliabruni/tpu
d81974097c4b08e813d978edc002b8e4184d3e19
[ "Apache-2.0" ]
null
null
null
252.13926
26,298
0.885362
[ [ [ "<a href=\"https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/keras_mnist_tpu.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "##### Copyright 2018 The TensorFlow Hub Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");", "_____no_output_____" ] ], [ [ "# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================", "_____no_output_____" ] ], [ [ "## MNIST on TPU (Tensor Processing Unit)<br>or GPU using tf.Keras and tf.data.Dataset\n<table><tr><td><img valign=\"middle\" src=\"https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/keras-tensorflow-tpu300px.png\" width=\"300\" alt=\"Keras+Tensorflow+Cloud TPU\"></td></tr></table>\n\n\n## Overview\n\nThis sample trains an \"MNIST\" handwritten digit \nrecognition model on a GPU or TPU backend using a Keras\nmodel. Data are handled using the tf.data.Datset API. This is\na very simple sample provided for educational purposes. Do\nnot expect outstanding TPU performance on a dataset as\nsmall as MNIST.\n\nThis notebook is hosted on GitHub. To view it in its original repository, after opening the notebook, select **File > View** on GitHub.\n\n## Learning objectives\n\nIn this notebook, you will learn how to:\n* Authenticate in Colab to access Google Cloud Storage (GSC)\n* Format and prepare a dataset using tf.data.Dataset\n* Create convolutional and dense layers using tf.keras.Sequential\n* Build a Keras classifier with softmax, cross-entropy, and the adam optimizer\n* Run training and validation in Keras using Cloud TPU\n* Export a model for serving from ML Engine\n* Deploy a trained model to ML Engine\n* Test predictions on a deployed model\n\n## Instructions\n\n<h3><a href=\"https://cloud.google.com/gpu/\"><img valign=\"middle\" src=\"https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/gpu-hexagon.png\" width=\"50\"></a> &nbsp;&nbsp;Train on GPU or TPU&nbsp;&nbsp; <a href=\"https://cloud.google.com/tpu/\"><img valign=\"middle\" src=\"https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/tpu-hexagon.png\" width=\"50\"></a></h3>\n\n 1. Select a GPU or TPU backend (Runtime > Change runtime type) \n 1. Runtime > Run All <br/>(Watch out: the \"Colab-only auth\" cell requires user input. <br/>The \"Deploy\" part at the end requires cloud project and bucket configuration.)\n\n<h3><a href=\"https://cloud.google.com/ml-engine/\"><img valign=\"middle\" src=\"https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/mlengine-hexagon.png\" width=\"50\"></a> &nbsp;&nbsp;Deploy to AI Platform</h3>\nAt the bottom of this notebook you can deploy your trained model to AI Platform for a serverless, autoscaled, REST API experience. You will need a Google Cloud project and a GCS (Google Cloud Storage) bucket for this last part.\n\nTPUs are located in Google Cloud, for optimal performance, they read data directly from Google Cloud Storage.", "_____no_output_____" ], [ "### Imports", "_____no_output_____" ] ], [ [ "import os, re, time, json\nimport PIL.Image, PIL.ImageFont, PIL.ImageDraw\nimport numpy as np\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\nprint(\"Tensorflow version \" + tf.__version__)", "_____no_output_____" ], [ "#@title visualization utilities [RUN ME]\n\"\"\"\nThis cell contains helper functions used for visualization\nand downloads only. You can skip reading it. There is very\nlittle useful Keras/Tensorflow code here.\n\"\"\"\n\n# Matplotlib config\nplt.rc('image', cmap='gray_r')\nplt.rc('grid', linewidth=0)\nplt.rc('xtick', top=False, bottom=False, labelsize='large')\nplt.rc('ytick', left=False, right=False, labelsize='large')\nplt.rc('axes', facecolor='F8F8F8', titlesize=\"large\", edgecolor='white')\nplt.rc('text', color='a8151a')\nplt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts\nMATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), \"mpl-data/fonts/ttf\")\n\n# pull a batch from the datasets. This code is not very nice, it gets much better in eager mode (TODO)\ndef dataset_to_numpy_util(training_dataset, validation_dataset, N):\n \n # get one batch from each: 10000 validation digits, N training digits\n batch_train_ds = training_dataset.apply(tf.data.experimental.unbatch()).batch(N)\n \n # eager execution: loop through datasets normally\n if tf.executing_eagerly():\n for validation_digits, validation_labels in validation_dataset:\n validation_digits = validation_digits.numpy()\n validation_labels = validation_labels.numpy()\n break\n for training_digits, training_labels in batch_train_ds:\n training_digits = training_digits.numpy()\n training_labels = training_labels.numpy()\n break\n \n else:\n v_images, v_labels = validation_dataset.make_one_shot_iterator().get_next()\n t_images, t_labels = batch_train_ds.make_one_shot_iterator().get_next()\n # Run once, get one batch. Session.run returns numpy results\n with tf.Session() as ses:\n (validation_digits, validation_labels,\n training_digits, training_labels) = ses.run([v_images, v_labels, t_images, t_labels])\n \n # these were one-hot encoded in the dataset\n validation_labels = np.argmax(validation_labels, axis=1)\n training_labels = np.argmax(training_labels, axis=1)\n \n return (training_digits, training_labels,\n validation_digits, validation_labels)\n\n# create digits from local fonts for testing\ndef create_digits_from_local_fonts(n):\n font_labels = []\n img = PIL.Image.new('LA', (28*n, 28), color = (0,255)) # format 'LA': black in channel 0, alpha in channel 1\n font1 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'DejaVuSansMono-Oblique.ttf'), 25)\n font2 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'STIXGeneral.ttf'), 25)\n d = PIL.ImageDraw.Draw(img)\n for i in range(n):\n font_labels.append(i%10)\n d.text((7+i*28,0 if i<10 else -4), str(i%10), fill=(255,255), font=font1 if i<10 else font2)\n font_digits = np.array(img.getdata(), np.float32)[:,0] / 255.0 # black in channel 0, alpha in channel 1 (discarded)\n font_digits = np.reshape(np.stack(np.split(np.reshape(font_digits, [28, 28*n]), n, axis=1), axis=0), [n, 28*28])\n return font_digits, font_labels\n\n# utility to display a row of digits with their predictions\ndef display_digits(digits, predictions, labels, title, n):\n plt.figure(figsize=(13,3))\n digits = np.reshape(digits, [n, 28, 28])\n digits = np.swapaxes(digits, 0, 1)\n digits = np.reshape(digits, [28, 28*n])\n plt.yticks([])\n plt.xticks([28*x+14 for x in range(n)], predictions)\n for i,t in enumerate(plt.gca().xaxis.get_ticklabels()):\n if predictions[i] != labels[i]: t.set_color('red') # bad predictions in red\n plt.imshow(digits)\n plt.grid(None)\n plt.title(title)\n \n# utility to display multiple rows of digits, sorted by unrecognized/recognized status\ndef display_top_unrecognized(digits, predictions, labels, n, lines):\n idx = np.argsort(predictions==labels) # sort order: unrecognized first\n for i in range(lines):\n display_digits(digits[idx][i*n:(i+1)*n], predictions[idx][i*n:(i+1)*n], labels[idx][i*n:(i+1)*n],\n \"{} sample validation digits out of {} with bad predictions in red and sorted first\".format(n*lines, len(digits)) if i==0 else \"\", n)\n \n# utility to display training and validation curves\ndef display_training_curves(training, validation, title, subplot):\n if subplot%10==1: # set up the subplots on the first call\n plt.subplots(figsize=(10,10), facecolor='#F0F0F0')\n plt.tight_layout()\n ax = plt.subplot(subplot)\n ax.grid(linewidth=1, color='white')\n ax.plot(training)\n ax.plot(validation)\n ax.set_title('model '+ title)\n ax.set_ylabel(title)\n ax.set_xlabel('epoch')\n ax.legend(['train', 'valid.'])", "_____no_output_____" ] ], [ [ "*(you can double-click on collapsed cells to view the non-essential code inside)*", "_____no_output_____" ], [ "### Colab-only auth for this notebook and the TPU", "_____no_output_____" ] ], [ [ "IS_COLAB_BACKEND = 'COLAB_GPU' in os.environ # this is always set on Colab, the value is 0 or 1 depending on GPU presence\nif IS_COLAB_BACKEND:\n from google.colab import auth\n # Authenticates the Colab machine and also the TPU using your\n # credentials so that they can access your private GCS buckets.\n auth.authenticate_user()", "_____no_output_____" ] ], [ [ "### TPU or GPU detection", "_____no_output_____" ] ], [ [ "# Detect hardware\ntry:\n tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection\nexcept ValueError:\n tpu = None\n gpus = tf.config.experimental.list_logical_devices(\"GPU\")\n \n# Select appropriate distribution strategy\nif tpu:\n tf.tpu.experimental.initialize_tpu_system(tpu)\n strategy = tf.distribute.experimental.TPUStrategy(tpu, steps_per_run=128) # Going back and forth between TPU and host is expensive. Better to run 128 batches on the TPU before reporting back.\n print('Running on TPU ', tpu.cluster_spec().as_dict()['worker']) \nelif len(gpus) > 1:\n strategy = tf.distribute.MirroredStrategy([gpu.name for gpu in gpus])\n print('Running on multiple GPUs ', [gpu.name for gpu in gpus])\nelif len(gpus) == 1:\n strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU\n print('Running on single GPU ', gpus[0].name)\nelse:\n strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU\n print('Running on CPU')\nprint(\"Number of accelerators: \", strategy.num_replicas_in_sync)", "Running on TPU ['10.122.37.122:8470']\nNumber of accelerators: 8\n" ] ], [ [ "### Parameters", "_____no_output_____" ] ], [ [ "BATCH_SIZE = 64 * strategy.num_replicas_in_sync # Gobal batch size.\n# The global batch size will be automatically sharded across all\n# replicas by the tf.data.Dataset API. A single TPU has 8 cores.\n# The best practice is to scale the batch size by the number of\n# replicas (cores). The learning rate should be increased as well.\n\nLEARNING_RATE = 0.01\nLEARNING_RATE_EXP_DECAY = 0.6 if strategy.num_replicas_in_sync == 1 else 0.7\n# Learning rate computed later as LEARNING_RATE * LEARNING_RATE_EXP_DECAY**epoch\n# 0.7 decay instead of 0.6 means a slower decay, i.e. a faster learnign rate.\n\ntraining_images_file = 'gs://mnist-public/train-images-idx3-ubyte'\ntraining_labels_file = 'gs://mnist-public/train-labels-idx1-ubyte'\nvalidation_images_file = 'gs://mnist-public/t10k-images-idx3-ubyte'\nvalidation_labels_file = 'gs://mnist-public/t10k-labels-idx1-ubyte'", "_____no_output_____" ] ], [ [ "### tf.data.Dataset: parse files and prepare training and validation datasets\nPlease read the [best practices for building](https://www.tensorflow.org/guide/performance/datasets) input pipelines with tf.data.Dataset", "_____no_output_____" ] ], [ [ "def read_label(tf_bytestring):\n label = tf.io.decode_raw(tf_bytestring, tf.uint8)\n label = tf.reshape(label, [])\n label = tf.one_hot(label, 10)\n return label\n \ndef read_image(tf_bytestring):\n image = tf.io.decode_raw(tf_bytestring, tf.uint8)\n image = tf.cast(image, tf.float32)/255.0\n image = tf.reshape(image, [28*28])\n return image\n \ndef load_dataset(image_file, label_file):\n imagedataset = tf.data.FixedLengthRecordDataset(image_file, 28*28, header_bytes=16)\n imagedataset = imagedataset.map(read_image, num_parallel_calls=16)\n labelsdataset = tf.data.FixedLengthRecordDataset(label_file, 1, header_bytes=8)\n labelsdataset = labelsdataset.map(read_label, num_parallel_calls=16)\n dataset = tf.data.Dataset.zip((imagedataset, labelsdataset))\n return dataset \n \ndef get_training_dataset(image_file, label_file, batch_size):\n dataset = load_dataset(image_file, label_file)\n dataset = dataset.cache() # this small dataset can be entirely cached in RAM\n dataset = dataset.shuffle(5000, reshuffle_each_iteration=True)\n dataset = dataset.repeat() # Mandatory for Keras for now\n dataset = dataset.batch(batch_size, drop_remainder=True) # drop_remainder is important on TPU, batch size must be fixed\n dataset = dataset.prefetch(-1) # fetch next batches while training on the current one (-1: autotune prefetch buffer size)\n return dataset\n \ndef get_validation_dataset(image_file, label_file):\n dataset = load_dataset(image_file, label_file)\n dataset = dataset.cache() # this small dataset can be entirely cached in RAM\n dataset = dataset.batch(10000, drop_remainder=True) # 10000 items in eval dataset, all in one batch\n dataset = dataset.repeat() # Mandatory for Keras for now\n return dataset\n\n# instantiate the datasets\ntraining_dataset = get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE)\nvalidation_dataset = get_validation_dataset(validation_images_file, validation_labels_file)", "_____no_output_____" ] ], [ [ "### Let's have a look at the data", "_____no_output_____" ] ], [ [ "N = 24\n(training_digits, training_labels,\n validation_digits, validation_labels) = dataset_to_numpy_util(training_dataset, validation_dataset, N)\ndisplay_digits(training_digits, training_labels, training_labels, \"training digits and their labels\", N)\ndisplay_digits(validation_digits[:N], validation_labels[:N], validation_labels[:N], \"validation digits and their labels\", N)\nfont_digits, font_labels = create_digits_from_local_fonts(N)", "_____no_output_____" ] ], [ [ "### Keras model: 3 convolutional layers, 2 dense layers\nIf you are not sure what cross-entropy, dropout, softmax or batch-normalization mean, head here for a crash-course: [Tensorflow and deep learning without a PhD](https://github.com/GoogleCloudPlatform/tensorflow-without-a-phd/#featured-code-sample)", "_____no_output_____" ] ], [ [ "# This model trains to 99.4% accuracy in 10 epochs (with a batch size of 64) \n\ndef make_model():\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Reshape(input_shape=(28*28,), target_shape=(28, 28, 1), name=\"image\"),\n\n tf.keras.layers.Conv2D(filters=12, kernel_size=3, padding='same', use_bias=False), # no bias necessary before batch norm\n tf.keras.layers.BatchNormalization(scale=False, center=True), # no batch norm scaling necessary before \"relu\"\n tf.keras.layers.Activation('relu'), # activation after batch norm\n\n tf.keras.layers.Conv2D(filters=24, kernel_size=6, padding='same', use_bias=False, strides=2),\n tf.keras.layers.BatchNormalization(scale=False, center=True),\n tf.keras.layers.Activation('relu'),\n\n tf.keras.layers.Conv2D(filters=32, kernel_size=6, padding='same', use_bias=False, strides=2),\n tf.keras.layers.BatchNormalization(scale=False, center=True),\n tf.keras.layers.Activation('relu'),\n\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(200, use_bias=False),\n tf.keras.layers.BatchNormalization(scale=False, center=True),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Dropout(0.4), # Dropout on dense layer only\n\n tf.keras.layers.Dense(10, activation='softmax')\n ])\n\n model.compile(optimizer='adam', # learning rate will be set by LearningRateScheduler\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n return model\n \nwith strategy.scope():\n model = make_model()\n\n# print model layers\nmodel.summary()\n\n# set up learning rate decay\nlr_decay = tf.keras.callbacks.LearningRateScheduler(\n lambda epoch: LEARNING_RATE * LEARNING_RATE_EXP_DECAY**epoch,\n verbose=True)", "W0805 18:33:32.316657 140549664556928 deprecation.py:506] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\nInstructions for updating:\nCall initializer instance with the dtype argument instead of passing it to the constructor\n" ] ], [ [ "### Train and validate the model", "_____no_output_____" ] ], [ [ "EPOCHS = 10\nsteps_per_epoch = 60000//BATCH_SIZE # 60,000 items in this dataset\nprint(\"Steps per epoch: \", steps_per_epoch)\n \n# Little wrinkle: in the present version of Tensorfow (1.14), switching a TPU\n# between training and evaluation is slow (approx. 10 sec). For small models,\n# it is recommeneded to run a single eval at the end.\nhistory = model.fit(training_dataset,\n steps_per_epoch=steps_per_epoch, epochs=EPOCHS,\n callbacks=[lr_decay])\n\nfinal_stats = model.evaluate(validation_dataset, steps=1)\nprint(\"Validation accuracy: \", final_stats[1])", "Steps per epoch: 117\n" ] ], [ [ "### Visualize predictions", "_____no_output_____" ] ], [ [ "# recognize digits from local fonts\nprobabilities = model.predict(font_digits, steps=1)\npredicted_labels = np.argmax(probabilities, axis=1)\ndisplay_digits(font_digits, predicted_labels, font_labels, \"predictions from local fonts (bad predictions in red)\", N)\n\n# recognize validation digits\nprobabilities = model.predict(validation_digits, steps=1)\npredicted_labels = np.argmax(probabilities, axis=1)\ndisplay_top_unrecognized(validation_digits, predicted_labels, validation_labels, N, 7)", "_____no_output_____" ] ], [ [ "## Deploy the trained model to AI Platform model serving\n\nPush your trained model to production on AI Platform for a serverless, autoscaled, REST API experience.\n\nYou will need a GCS (Google Cloud Storage) bucket and a GCP project for this.\nModels deployed on AI Platform autoscale to zero if not used. There will be no AI Platform charges after you are done testing.\nGoogle Cloud Storage incurs charges. Empty the bucket after deployment if you want to avoid these. Once the model is deployed, the bucket is not useful anymore.", "_____no_output_____" ], [ "### Configuration", "_____no_output_____" ] ], [ [ "PROJECT = \"\" #@param {type:\"string\"}\nBUCKET = \"gs://\" #@param {type:\"string\", default:\"jddj\"}\nNEW_MODEL = True #@param {type:\"boolean\"}\nMODEL_NAME = \"mnist\" #@param {type:\"string\"}\nMODEL_VERSION = \"v1\" #@param {type:\"string\"}\n\nassert PROJECT, 'For this part, you need a GCP project. Head to http://console.cloud.google.com/ and create one.'\nassert re.search(r'gs://.+', BUCKET), 'For this part, you need a GCS bucket. Head to http://console.cloud.google.com/storage and create one.'", "_____no_output_____" ] ], [ [ "### Export the model for serving from AI Platform", "_____no_output_____" ] ], [ [ "# Wrap the model so that we can add a serving function\nclass ExportModel(tf.keras.Model):\n def __init__(self, model):\n super().__init__(self)\n self.model = model\n\n # The serving function performig data pre- and post-processing.\n # Pre-processing: images are received in uint8 format converted\n # to float32 before being sent to through the model.\n # Post-processing: the Keras model outputs digit probabilities. We want\n # the detected digits. An additional tf.argmax is needed.\n # @tf.function turns the code in this function into a Tensorflow graph that\n # can be exported. This way, the model itself, as well as its pre- and post-\n # processing steps are exported in the SavedModel and deployed in a single step.\n @tf.function(input_signature=[tf.TensorSpec([None, 28*28], dtype=tf.uint8)])\n def my_serve(self, images):\n images = tf.cast(images, tf.float32)/255 # pre-processing\n probabilities = self.model(images) # prediction from model\n classes = tf.argmax(probabilities, axis=-1) # post-processing\n return {'digits': classes}\n \n# Must copy the model from TPU to CPU to be able to compose them.\nrestored_model = make_model()\nrestored_model.set_weights(model.get_weights()) # this copies the weights from TPU, does nothing on GPU\n\n# create the ExportModel and export it to the Tensorflow standard SavedModel format\nserving_model = ExportModel(restored_model)\nexport_path = os.path.join(BUCKET, 'keras_export', str(time.time()))\ntf.keras.backend.set_learning_phase(0) # inference only\ntf.saved_model.save(serving_model, export_path, signatures={'serving_default': serving_model.my_serve})\n\nprint(\"Model exported to: \", export_path)\n\n# Note: in Tensorflow 2.0, it will also be possible to\n# export to the SavedModel format using model.save():\n# serving_model.save(export_path, save_format='tf')", "Model exported to: gs://ml1-demo-martin/keras_export/1565030988.5267901\n" ], [ "# saved_model_cli: a useful too for troubleshooting SavedModels (the tool is part of the Tensorflow installation)\n!saved_model_cli show --dir {export_path}\n!saved_model_cli show --dir {export_path} --tag_set serve\n!saved_model_cli show --dir {export_path} --tag_set serve --signature_def serving_default\n# A note on naming:\n# The \"serve\" tag set (i.e. serving functionality) is the only one exported by tf.saved_model.save\n# All the other names are defined by the user in the fllowing lines of code:\n# def myserve(self, images):\n# ******\n# return {'digits': classes}\n# ******\n# tf.saved_model.save(..., signatures={'serving_default': serving_model.myserve})\n# ***************", "The given SavedModel contains the following tag-sets:\nserve\nThe given SavedModel MetaGraphDef contains SignatureDefs with the following keys:\nSignatureDef key: \"__saved_model_init_op\"\nSignatureDef key: \"serving_default\"\nThe given SavedModel SignatureDef contains the following input(s):\n inputs['images'] tensor_info:\n dtype: DT_UINT8\n shape: (-1, 784)\n name: serving_default_images:0\nThe given SavedModel SignatureDef contains the following output(s):\n outputs['digits'] tensor_info:\n dtype: DT_INT64\n shape: (-1)\n name: StatefulPartitionedCall:0\nMethod name is: tensorflow/serving/predict\n" ] ], [ [ "### Deploy the model\nThis uses the command-line interface. You can do the same thing through the AI Platform UI at https://console.cloud.google.com/mlengine/models\n", "_____no_output_____" ] ], [ [ "# Create the model\nif NEW_MODEL:\n !gcloud ai-platform models create {MODEL_NAME} --project={PROJECT} --regions=us-central1", "_____no_output_____" ], [ "# Create a version of this model (you can add --async at the end of the line to make this call non blocking)\n# Additional config flags are available: https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions\n# You can also deploy a model that is stored locally by providing a --staging-bucket=... parameter\n!echo \"Deployment takes a couple of minutes. You can watch your deployment here: https://console.cloud.google.com/mlengine/models/{MODEL_NAME}\"\n!gcloud ai-platform versions create {MODEL_VERSION} --model={MODEL_NAME} --origin={export_path} --project={PROJECT} --runtime-version=1.14 --python-version=3.5", "_____no_output_____" ] ], [ [ "### Test the deployed model\nYour model is now available as a REST API. Let us try to call it. The cells below use the \"gcloud ml-engine\"\ncommand line tool but any tool that can send a JSON payload to a REST endpoint will work.", "_____no_output_____" ] ], [ [ "# prepare digits to send to online prediction endpoint\ndigits_float32 = np.concatenate((font_digits, validation_digits[:100-N])) # pixel values in [0.0, 1.0] float range\ndigits_uint8 = np.round(digits_float32*255).astype(np.uint8) # pixel values in [0, 255] int range\nlabels = np.concatenate((font_labels, validation_labels[:100-N]))\nwith open(\"digits.json\", \"w\") as f:\n for digit in digits_uint8:\n # the format for AI Platform online predictions is: one JSON object per line\n data = json.dumps({\"images\": digit.tolist()}) # \"images\" because that was the name you gave this parametr in the serving funtion my_serve\n f.write(data+'\\n')", "_____no_output_____" ], [ "# Request online predictions from deployed model (REST API) using the \"gcloud ml-engine\" command line.\npredictions = !gcloud ai-platform predict --model={MODEL_NAME} --json-instances digits.json --project={PROJECT} --version {MODEL_VERSION}\nprint(predictions)\n\npredictions = np.stack([json.loads(p) for p in predictions[1:]]) # first elemet is the name of the output layer: drop it, parse the rest\ndisplay_top_unrecognized(digits_float32, predictions, labels, N, 100//N)", "['DIGITS', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3', '7', '2', '1', '0', '4', '1', '4', '9', '5', '9', '0', '6', '9', '0', '1', '5', '9', '7', '3', '4', '9', '6', '6', '5', '4', '0', '7', '4', '0', '1', '3', '1', '3', '4', '7', '2', '7', '1', '2', '1', '1', '7', '4', '2', '3', '5', '1', '2', '4', '4', '6', '3', '5', '5', '6', '0', '4', '1', '9', '5', '7', '8', '9', '3', '7', '4', '6', '4', '3', '0', '7', '0', '2', '9', '1', '7']\n" ] ], [ [ "## What's next\n\n* Learn about [Cloud TPUs](https://cloud.google.com/tpu/docs) that Google designed and optimized specifically to speed up and scale up ML workloads for training and inference and to enable ML engineers and researchers to iterate more quickly.\n* Explore the range of [Cloud TPU tutorials and Colabs](https://cloud.google.com/tpu/docs/tutorials) to find other examples that can be used when implementing your ML project.\n\nOn Google Cloud Platform, in addition to GPUs and TPUs available on pre-configured [deep learning VMs](https://cloud.google.com/deep-learning-vm/), you will find [AutoML](https://cloud.google.com/automl/)*(beta)* for training custom models without writing code and [Cloud ML Engine](https://cloud.google.com/ml-engine/docs/) which will allows you to run parallel trainings and hyperparameter tuning of your custom models on powerful distributed hardware.\n", "_____no_output_____" ], [ "## License", "_____no_output_____" ], [ "\n\n---\n\n\nauthor: Martin Gorner<br>\ntwitter: @martin_gorner\n\n\n---\n\n\nCopyright 2019 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\n---\n\n\nThis is not an official Google product but sample code provided for an educational purpose\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
d0a73514a94a165fa3a715b93873c328027a466f
5,202
ipynb
Jupyter Notebook
Python Data Science Toolbox -Part 2/Bringing it all together/08. Writing an iterator to load data in chunks (1).ipynb
nazmusshakib121/Python-Programming
3ea852641cd5fe811228f27a780109a44174e8e5
[ "MIT" ]
null
null
null
Python Data Science Toolbox -Part 2/Bringing it all together/08. Writing an iterator to load data in chunks (1).ipynb
nazmusshakib121/Python-Programming
3ea852641cd5fe811228f27a780109a44174e8e5
[ "MIT" ]
null
null
null
Python Data Science Toolbox -Part 2/Bringing it all together/08. Writing an iterator to load data in chunks (1).ipynb
nazmusshakib121/Python-Programming
3ea852641cd5fe811228f27a780109a44174e8e5
[ "MIT" ]
null
null
null
44.084746
327
0.459631
[ [ [ "### Writing an iterator to load data in chunks (1)\nAnother way to read data too large to store in memory in chunks is to read the file in as DataFrames of a certain length, say, 100. For example, with the pandas package (imported as pd), you can do pd.read_csv(filename, chunksize=100). This creates an iterable reader object, which means that you can use next() on it.\n\nIn this exercise, you will read a file in small DataFrame chunks with read_csv(). You're going to use the World Bank Indicators data 'ind_pop.csv', available in your current directory, to look at the urban population indicator for numerous countries and years.\n\n### Instructions\n\n- Use pd.read_csv() to read in 'ind_pop.csv' in chunks of size 10. Assign the result to df_reader.\n- Print the first two chunks from df_reader.", "_____no_output_____" ] ], [ [ "# Import the pandas package\nimport pandas as pd\n\n# Initialize reader object: df_reader\ndf_reader = pd.read_csv('world_ind_pop_data.csv' , chunksize= 10)\n\n# Print two chunks\nprint(next(df_reader))\nprint(next(df_reader))\n", " CountryName CountryCode Year \\\n0 Arab World ARB 1960 \n1 Caribbean small states CSS 1960 \n2 Central Europe and the Baltics CEB 1960 \n3 East Asia & Pacific (all income levels) EAS 1960 \n4 East Asia & Pacific (developing only) EAP 1960 \n5 Euro area EMU 1960 \n6 Europe & Central Asia (all income levels) ECS 1960 \n7 Europe & Central Asia (developing only) ECA 1960 \n8 European Union EUU 1960 \n9 Fragile and conflict affected situations FCS 1960 \n\n Total Population Urban population (% of total) \n0 9.249590e+07 31.285384 \n1 4.190810e+06 31.597490 \n2 9.140158e+07 44.507921 \n3 1.042475e+09 22.471132 \n4 8.964930e+08 16.917679 \n5 2.653965e+08 62.096947 \n6 6.674890e+08 55.378977 \n7 1.553174e+08 38.066129 \n8 4.094985e+08 61.212898 \n9 1.203546e+08 17.891972 \n CountryName CountryCode Year \\\n10 Heavily indebted poor countries (HIPC) HPC 1960 \n11 High income HIC 1960 \n12 High income: nonOECD NOC 1960 \n13 High income: OECD OEC 1960 \n14 Latin America & Caribbean (all income levels) LCN 1960 \n15 Latin America & Caribbean (developing only) LAC 1960 \n16 Least developed countries: UN classification LDC 1960 \n17 Low & middle income LMY 1960 \n18 Low income LIC 1960 \n19 Lower middle income LMC 1960 \n\n Total Population Urban population (% of total) \n10 1.624912e+08 12.236046 \n11 9.075975e+08 62.680332 \n12 1.866767e+08 56.107863 \n13 7.209208e+08 64.285435 \n14 2.205642e+08 49.284688 \n15 1.776822e+08 44.863308 \n16 2.410728e+08 9.616261 \n17 2.127373e+09 21.272894 \n18 1.571884e+08 11.498396 \n19 9.429116e+08 19.810513 \n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
d0a7366232b56d2c67948dd77cede0fd1b91cfad
15,119
ipynb
Jupyter Notebook
pegbook_chap6.ipynb
kkuramitsu/pegbook2021
c75ddd79631ac67d502b3705d76d168d4d5f842f
[ "MIT" ]
2
2022-02-19T13:46:47.000Z
2022-03-05T11:54:23.000Z
pegbook_chap6.ipynb
kkuramitsu/pegbook2022
c75ddd79631ac67d502b3705d76d168d4d5f842f
[ "MIT" ]
null
null
null
pegbook_chap6.ipynb
kkuramitsu/pegbook2022
c75ddd79631ac67d502b3705d76d168d4d5f842f
[ "MIT" ]
null
null
null
35.078886
1,407
0.525828
[ [ [ "# 第6章 スモール言語を作る", "_____no_output_____" ] ], [ [ "# !pip install pegtree\nimport pegtree as pg\nfrom pegtree.colab import peg, pegtree, example", "_____no_output_____" ], [ "%%peg\n\nProgram = { // 開式非終端記号 Expression*\n #Program\n} EOF\nEOF = !. // ファイル終端\n\nExpression =\n / FuncDecl // 関数定義\n / VarDecl // 変数定義\n / IfExpr // if 式\n / Binary // 二項演算\n", "UsageError: Cell magic `%%peg` not found.\n" ] ], [ [ " import pegtree as pg\npeg = pg.grammar('chibi.pegtree') parser = pg.generate(peg)", "_____no_output_____" ], [ "## 6.2.5 パーザの生成", "_____no_output_____" ] ], [ [ "import pegtree as pg\npeg = pg.grammar('chibi.pegtree') \nparser = pg.generate(peg)", "_____no_output_____" ] ], [ [ "## トランスコンパイラ\n\n", "_____no_output_____" ] ], [ [ "class Visitor(object):\n def visit(self, tree):\n tag = tree.getTag()\n name = f'accept{tag}'\n if hasattr(self, name): # accept メソッドがあるか調べる\n # メソッド名からメソッドを得る \n acceptMethod = getattr(self, name) \n return acceptMethod(tree)\n print(f'TODO: accept{tag}') \n return None\n\nclass Compiler(Visitor): # Visitor クラスの継承 \n def __init__(self):\n self.buffers = []\n peg = pg.grammar('chibi.pegtree') \n self.parser = pg.generate(peg)\n\n def compile(self, source):\n tree = self.parser(source) # 構文木に交換\n self.buffers = [] # バッファの初期化\n self.visit(tree)\n return ''.join(self.buffers) # バッファを連結してソースコードにまとめる\n\n def push(self, s): # コード片をバッファに追加\n self.buffers.append(s)", "_____no_output_____" ], [ "c = Compiler()\ncode = c.compile('1+2*3') \nprint(code)", "TODO: acceptProgram\n\n" ] ], [ [ "### 各ノードのコード変換\n\n", "_____no_output_____" ] ], [ [ "BUILTIN_FUNCTIONS = { \n 'print': 'console.log'\n}\n\nclass Compiler(Visitor): # Visitor クラスの継承 \n def __init__(self):\n self.buffers = []\n peg = pg.grammar('chibi.pegtree') \n self.parser = pg.generate(peg)\n\n def compile(self, source):\n tree = self.parser(source) # 構文木に交換\n self.buffers = [] # バッファの初期化\n self.visit(tree)\n return ''.join(self.buffers) # バッファを連結してソースコードにまとめる\n\n def push(self, s): # コード片をバッファに追加\n self.buffers.append(s)\n\n def acceptProgram(self, tree):\n for child in tree: # 子ノードのリスト\n self.visit(child) # 子ノードの変換 \n self.push('\\n') # 改行をバッファに追加\n\n def acceptInt(self, tree):\n v = tree.getToken()\n self.push(v)\n\n def acceptName(self, tree):\n name = tree.getToken()\n self.push(name)\n\n def acceptAdd(self, tree): \n self.push('(')\n self.visit(tree[0]) \n self.push('+') \n self.visit(tree[1]) \n self.push(')')\n\n def acceptEq(self, tree): \n self.push('(')\n self.visit(tree[0]) \n self.push('===') \n self.visit(tree[1]) \n self.push(') ? 1 : 0')\n\n def acceptFuncApp(self, tree):\n f = tree.getToken(0)\n self.push(BUILTIN_FUNCTIONS.get(f, f)) \n self.push('(')\n self.visit(tree[1])\n self.push(')')\n \n def accepterr(self, tree):\n print(repr(tree))\n", "_____no_output_____" ], [ "c = Compiler()\ncode = c.compile('''\nf(x) = x+1\nprint(x)\n''')\nprint(code)", "Syntax Error ((unknown source):2:-1+0)\n\n\n\n" ] ], [ [ "## インタプリタ\n\n", "_____no_output_____" ] ], [ [ "class Interpreter(Visitor):\n def __init__(self):\n self.env = {} # 空の環境を用意する \n peg = pg.grammar('chibi.pegtree') \n self.parser = pg.generate(peg)\n \n def eval(self, source):\n tree = self.parser(source)\n return self.visit(tree)", "_____no_output_____" ], [ "chibi = Interpreter() \nsource = input('>>> ') \nwhile source != '':\n result = chibi.eval(source) \n print(result)\nsource = input('>>> ')", "_____no_output_____" ], [ "class Interpreter(Visitor):\n def __init__(self):\n self.env = {} # 空の環境を用意する \n peg = pg.grammar('chibi.pegtree') \n self.parser = pg.generate(peg)\n \n def eval(self, source):\n tree = self.parser(source)\n return self.visit(tree)\n\n def acceptProgram(self, tree):\n result = None\n for child in tree:\n result = self.visit(child)\n return result\n \n def acceptInt(self, tree):\n token = tree.getToken()\n return int(token)\n\n def acceptAdd(self, tree):\n v0 = self.visit(tree[0])\n v1 = self.visit(tree[1])\n return v0 + v1\n\n def acceptEq(self, tree):\n v0 = self.visit(tree[0])\n v1 = self.visit(tree[1])\n return 1 if v0 == v1 else 0\n\n def acceptIfExpr(self, tree):\n v0 = self.visit(tree[0])\n if v0 != 0:\n return self.visit(tree[1])\n else:\n return self.visit(tree[2])\n\n def acceptVarDecl(self, tree):\n v = self.visit(tree[1])\n x = str(tree[0])\n self.env[x] = v\n return v\n \n def acceptName(self, t):\n x = t.getToken()\n if x in self.env:\n return self.env[x]\n else:\n raise NameError(x)\n\n def acceptFuncDecl(self, tree):\n f = tree.getToken(0)\n x = tree.getToken(1)\n e = tree.get(2)\n self.env[f] = (x, e)\n return self.env[f]\n\n def acceptFuncApp(self, tree):\n f = tree.getToken(0) # 関数名を得る\n v = self.visit(tree[1]) # 引数を先に評価\n x, e = self.env[f] # 関数名から引数名と関数式を取り出す \n self.env[x] = v # 環境xをvにする\n v = self.visit(e) # 関数式を評価\n return v", "_____no_output_____" ], [ "source = '''\nfib(n) = if n < 3 then 1 else fib(n-1)+fib(n-2) \nfib(4)\n'''\nc = Interpreter()\nprint(c.eval(source))", "TODO: accepterr\nNone\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d0a752f9edcf48e680373c36f2f8ea1f97771c68
42,950
ipynb
Jupyter Notebook
SQL in Python.ipynb
jamwine/SQL-in-Python
0b8018fe92129833c49290f74b00d296f8bcec50
[ "MIT" ]
null
null
null
SQL in Python.ipynb
jamwine/SQL-in-Python
0b8018fe92129833c49290f74b00d296f8bcec50
[ "MIT" ]
null
null
null
SQL in Python.ipynb
jamwine/SQL-in-Python
0b8018fe92129833c49290f74b00d296f8bcec50
[ "MIT" ]
null
null
null
29.559532
273
0.385821
[ [ [ "# SQL in Python\n### Packages\n\n - [Pandas.read_sql](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_sql.html)\n - [SQLite3](https://docs.python.org/3.6/library/sqlite3.html)\n \n### Tutorials\n- https://www.tutorialspoint.com/sqlite/sqlite_python.htm\n- https://www.pythoncentral.io/introduction-to-sqlite-in-python/\n- https://medium.com/swlh/reproducing-sql-queries-in-python-codes-35d90f716b1a\n- http://www.sqlitetutorial.net/sqlite-python/", "_____no_output_____" ], [ "### Create a SQL database connection to a sample SQL database, and read records from that database\nStructured Query Language (SQL) is an [ANSI specified](https://docs.oracle.com/database/121/SQLRF/ap_standard_sql001.htm#SQLRF55514), powerful format for interacting with large databases efficiently. **SQLite** is a lightweight and somewhat restricted version of SQL.", "_____no_output_____" ] ], [ [ "# Imports\nimport sqlite3 as sq3\nimport pandas.io.sql as pds\nimport pandas as pd", "_____no_output_____" ] ], [ [ "### Database connections\n\nOur first step will be to create a connection to our SQL database. A few common SQL databases used with Python include:\n\n - Microsoft SQL Server\n - Postgres\n - MySQL\n - AWS Redshift\n - AWS Aurora\n - Oracle DB\n - Terradata\n - Db2 Family\n - Many, many others\n \nEach of these databases will require a slightly different setup, and may require credentials (username & password), tokens, or other access requirements. We'll be using `sqlite3` to connect to our database, but other connection packages include:\n\n - [`SQLAlchemy`](https://www.sqlalchemy.org/) (most common)\n - [`psycopg2`](http://initd.org/psycopg/)\n - [`MySQLdb`](http://mysql-python.sourceforge.net/MySQLdb.html)", "_____no_output_____" ], [ "## Classic Rock Database", "_____no_output_____" ] ], [ [ "# Initialize path to SQLite database\npath = 'databases/classic_rock.db'\ncon = sq3.Connection(path)\n\n# We now have a live connection to our SQL database", "_____no_output_____" ] ], [ [ "### Reading data\n\nNow that we've got a connection to our database, we can perform queries, and load their results in as Pandas DataFrames", "_____no_output_____" ] ], [ [ "# Write the query\nquery = '''\nSELECT * \nFROM rock_songs;\n'''\n\n# Execute the query\nobservations = pds.read_sql(query, con)\n\nobservations.head()", "_____no_output_____" ], [ "# We can also run any supported SQL query\n# Write the query\nquery = '''\nSELECT Artist, Release_Year, COUNT(*) AS num_songs, AVG(PlayCount) AS avg_plays \n FROM rock_songs\n GROUP BY Artist, Release_Year\n ORDER BY num_songs desc;\n'''\n\n# Execute the query\nobservations = pds.read_sql(query, con)\n\nobservations.head()", "_____no_output_____" ] ], [ [ "### Common parameters\n\nThere are a number of common paramters that can be used to read in SQL data with formatting:\n\n - **coerce_float**: Attempt to force numbers into floats\n - **parse_dates**: List of columns to parse as dates\n - **chunksize**: Number of rows to include in each chunk\n ", "_____no_output_____" ] ], [ [ "query='''\nSELECT Artist, Release_Year, COUNT(*) AS num_songs, AVG(PlayCount) AS avg_plays \n FROM rock_songs\n GROUP BY Artist, Release_Year\n ORDER BY num_songs desc;\n'''\n\n# Execute the query\nobservations_generator = pds.read_sql(query,\n con,\n coerce_float=True, # Doesn't effect this dataset, because floats were correctly parsed\n parse_dates=['Release_Year'], # Parse `Release_Year` as a date\n chunksize=5 # Allows for streaming results as a series of shorter tables\n )\n\nfor index, observations in enumerate(observations_generator):\n if index < 5:\n print(f'Observations index: {index}'.format(index))\n display(observations)", "Observations index: 0\n" ] ], [ [ "### Baseball Database Example", "_____no_output_____" ] ], [ [ "# Create a variable, `path`, containing the path to the `baseball.db` contained in `resources/`\npath = 'databases/baseball.db'\n\n# Create a connection, `con`, that is connected to database at `path`\ncon = sq3.Connection(path)", "_____no_output_____" ], [ "# Create a variable, tables, which reads in all data from the table sqlite_master\nall_tables = pd.read_sql('SELECT * FROM sqlite_master', con)\nall_tables", "_____no_output_____" ], [ "# Displaying all tables in database\npd.read_sql(\"select name from sqlite_master where type = 'table';\", con)", "_____no_output_____" ], [ "# Create a variable, `query`, containing a SQL query which reads in all data from the `` table\nquery = \"\"\"\nSELECT *\n FROM allstarfull\n ;\n\"\"\"\n\nallstar_observations = pd.read_sql(query, con)\nallstar_observations.head()", "_____no_output_____" ], [ "best_query = \"\"\"\nSELECT playerID, sum(GP) AS num_games_played, AVG(startingPos) AS avg_starting_position\n FROM allstarfull\n GROUP BY playerID\n ORDER BY num_games_played DESC, avg_starting_position ASC\n LIMIT 3\n\"\"\"\nbest = pd.read_sql(best_query, con)\nbest.head()", "_____no_output_____" ] ], [ [ "### Artists Database Example", "_____no_output_____" ] ], [ [ "conn = sq3.connect(\"databases/artists.sqlite\")\n\n# Displaying all tables in database\npd.read_sql(\"select name from sqlite_master where type = 'table';\", conn)", "_____no_output_____" ], [ "query=\"SELECT * FROM artists\"\nmusic_reviews = pd.read_sql_query(query, conn)\nmusic_reviews.tail()", "_____no_output_____" ], [ "query1=\"SELECT * FROM artists WHERE artist='kleenex'\"\nresult=pd.read_sql_query(query1, conn)\nresult", "_____no_output_____" ], [ "# conn.commit()\nconn.close()", "_____no_output_____" ] ], [ [ "|![head.png](imgs/head.png)|![head.png](imgs/head.png)|\n|---|---|\n|![reading_nosql.png](imgs/reading_nosql.png)|![reading_online.png](imgs/reading_online.png)|", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
d0a75d0109ab1734fccf3d0e55cab7ba17c73797
315,614
ipynb
Jupyter Notebook
pittsburgh-bridges-data-set-analysis/models-analyses/grid_search_analyses/Data Space Report (Official) - Sgd Classifier-v1.0.0.ipynb
franec94/Pittsburgh-Bridge-Dataset
682ff0e3979ca565637e858cc36dc07c2aeda7d6
[ "MIT" ]
null
null
null
pittsburgh-bridges-data-set-analysis/models-analyses/grid_search_analyses/Data Space Report (Official) - Sgd Classifier-v1.0.0.ipynb
franec94/Pittsburgh-Bridge-Dataset
682ff0e3979ca565637e858cc36dc07c2aeda7d6
[ "MIT" ]
7
2021-02-02T22:51:40.000Z
2022-03-12T00:39:08.000Z
pittsburgh-bridges-data-set-analysis/models-analyses/merge_analyses/SGD Classifier-v1.0.0.ipynb
franec94/Pittsburgh-Bridge-Dataset
682ff0e3979ca565637e858cc36dc07c2aeda7d6
[ "MIT" ]
null
null
null
386.308446
55,948
0.914275
[ [ [ "# Data Space Report\n\n\n<img src=\"images/polito_logo.png\" alt=\"Polito Logo\" style=\"width: 200px;\"/>\n\n\n## Pittsburgh Bridges Data Set\n\n<img src=\"images/andy_warhol_bridge.jpg\" alt=\"Andy Warhol Bridge\" style=\"width: 200px;\"/>\n\n Andy Warhol Bridge - Pittsburgh.\n\nReport created by Student Francesco Maria Chiarlo s253666, for A.A 2019/2020.\n\n**Abstract**:The aim of this report is to evaluate the effectiveness of distinct, different statistical learning approaches, in particular focusing on their characteristics as well as on their advantages and backwards when applied onto a relatively small dataset as the one employed within this report, that is Pittsburgh Bridgesdataset.\n\n**Key words**:Statistical Learning, Machine Learning, Bridge Design.", "_____no_output_____" ], [ "### Imports Section <a class=\"anchor\" id=\"imports-section\"></a>", "_____no_output_____" ] ], [ [ "from utils.all_imports import *;\n%matplotlib inline", "None\n" ], [ "# Some global script variables\n# --------------------------------------------------------------------------- #\ndataset_path, dataset_name, column_names, TARGET_COL = \\\n get_dataset_location() # Info Data to be fetched\nestimators_list, estimators_names = get_estimators() # Estimator to be trained\n\ndataset, feature_vs_values = load_brdiges_dataset(dataset_path, dataset_name)\n\n# variables used for pass through arrays used to store results\npos_gs = 0; pos_cv = 0\n\n# Array used for storing graphs\nplots_names = list(map(lambda xi: f\"{xi}_learning_curve.png\", estimators_names))\npca_kernels_list = ['linear', 'poly', 'rbf', 'cosine', 'sigmoid']\ncv_list = list(range(10, 1, -1))", "_____no_output_____" ], [ "columns_2_avoid = ['ERECTED', 'LENGTH', 'LOCATION']", "_____no_output_____" ], [ "# Make distinction between Target Variable and Predictors\n# --------------------------------------------------------------------------- #\nrescaledX, y, columns = prepare_data_for_train(dataset, target_col=TARGET_COL)", "Summary about Target Variable {target_col}\n--------------------------------------------------\n2 57\n1 13\nName: T-OR-D, dtype: int64\nshape features matrix X, after normalizing: (70, 11)\n" ] ], [ [ "## Pricipal Component Analysis", "_____no_output_____" ] ], [ [ "show_table_pc_analysis(X=rescaledX)", "Cumulative varation explained(percentage) up to given number of pcs:\n" ] ], [ [ "#### Major Pros & Cons of PCA\n\n", "_____no_output_____" ], [ "## Learning Models <a class=\"anchor\" id=\"learning-models\"></a>", "_____no_output_____" ] ], [ [ "# Parameters to be tested for Cross-Validation Approach\n# -----------------------------------------------------\nparam_grids = []\nparmas_logreg = {\n 'penalty': ('l1', 'l2', 'elastic', None),\n 'solver': ('newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'),\n 'fit_intercept': (True, False),\n 'tol': (1e-4, 1e-3, 1e-2),\n 'class_weight': (None, 'balanced'),\n 'C': (10.0, 1.0, .1, .01, .001, .0001),\n # 'random_state': (0,),\n}; param_grids.append(parmas_logreg)\n\nparmas_knn_clf = {\n 'n_neighbors': (2,3,4,5,6,7,8,9,10),\n 'weights': ('uniform', 'distance'),\n 'metric': ('euclidean', 'minkowski', 'manhattan'),\n 'leaf_size': (5, 10, 15, 30),\n 'algorithm': ('ball_tree', 'kd_tree', 'brute'),\n}; param_grids.append(parmas_knn_clf)\n\nparams_sgd_clf = {\n 'loss': ('log', 'modified_huber'), # ('hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron')\n 'penalty': ('l2', 'l1', 'elasticnet'),\n 'alpha': (1e-1, 1e-2, 1e-3, 1e-4),\n 'max_iter': (50, 100, 150, 200, 500, 1000, 1500, 2000, 2500),\n 'class_weight': (None, 'balanced'),\n 'learning_rate': ('optimal',),\n 'tol': (None, 1e-2, 1e-4, 1e-5, 1e-6),\n # 'random_state': (0,),\n}; param_grids.append(params_sgd_clf)\n\nkernel_type = 'svm-rbf-kernel'\nparams_svm_clf = {\n # 'gamma': (1e-7, 1e-4, 1e-3, 1e-2, 0.1, 1.0, 10, 1e+2, 1e+3, 1e+5, 1e+7),\n 'gamma': (1e-5, 1e-3, 1e-2, 0.1, 1.0, 10, 1e+2, 1e+3, 1e+5),\n 'max_iter':(1e+2, 1e+3, 2 * 1e+3, 5 * 1e+3, 1e+4, 1.5 * 1e+3),\n 'degree': (1,2,4,8),\n 'coef0': (.001, .01, .1, 0.0, 1.0, 10.0),\n 'shrinking': (True, False),\n 'kernel': ['linear', 'poly', 'rbf', 'sigmoid',],\n 'class_weight': (None, 'balanced'),\n 'C': (1e-4, 1e-3, 1e-2, 0.1, 1.0, 10, 1e+2, 1e+3),\n 'probability': (True,),\n}; param_grids.append(params_svm_clf)\n\nparmas_tree = {\n 'splitter': ('random', 'best'),\n 'criterion':('gini', 'entropy'),\n 'max_features': (None, 'sqrt', 'log2'),\n 'max_depth': (None, 3, 5, 7, 10,),\n 'splitter': ('best', 'random',),\n 'class_weight': (None, 'balanced'),\n}; param_grids.append(parmas_tree)\n\nparmas_random_forest = {\n 'n_estimators': (3, 5, 7, 10, 30, 50, 70, 100, 150, 200),\n 'criterion':('gini', 'entropy'),\n 'bootstrap': (True, False),\n 'min_samples_leaf': (1,2,3,4,5),\n 'max_features': (None, 'sqrt', 'log2'),\n 'max_depth': (None, 3, 5, 7, 10,),\n 'class_weight': (None, 'balanced', 'balanced_subsample'),\n}; param_grids.append(parmas_random_forest)\n\n# Some variables to perform different tasks\n# -----------------------------------------------------\nN_CV, N_KERNEL, N_GS = 9, 5, 6;\nnrows = N_KERNEL // 2 if N_KERNEL % 2 == 0 else N_KERNEL // 2 + 1;\nncols = 2; grid_size = [nrows, ncols]", "_____no_output_____" ] ], [ [ "| Learning Technique | Type of Learner | Type of Learning | Classification | Regression | Clustering |\n| --- | --- | --- | --- | --- | --- |\n| *Stochastic Gradient Descent (SGD)* | *Linear Model* | *Supervised Learning*| *Supported* | *Supported* | *Not-Supported*|", "_____no_output_____" ] ], [ [ "n_components=9\nlearning_curves_by_kernels(\n# learning_curves_by_components(\n estimators_list[:], estimators_names[:],\n rescaledX, y,\n train_sizes=np.linspace(.1, 1.0, 10),\n n_components=9,\n pca_kernels_list=pca_kernels_list[0],\n verbose=0,\n by_pairs=True,\n savefigs=True,\n scoring='accuracy',\n figs_dest=os.path.join('figures', 'learning_curve', f\"Pcs_{n_components}\"), ignore_func=True,\n # figsize=(20,5)\n)", "_____no_output_____" ], [ "%%javascript\nIPython.OutputArea.prototype._should_scroll = function(lines) {\n return false;\n}", "_____no_output_____" ], [ "plot_dest = os.path.join(\"figures\", \"n_comp_9_analysis\", \"grid_search\")\nX = rescaledX; pos = 3\n\ndf_gs, df_auc_gs, df_pvalue = grid_search_all_by_n_components(\n estimators_list=estimators_list[pos], \\\n param_grids=param_grids[pos - 1],\n estimators_names=estimators_names[pos], \\\n X=X, y=y,\n n_components=9,\n random_state=0, show_plots=False, show_errors=False, verbose=1, plot_dest=plot_dest, debug_var=False)\ndf_9, df_9_auc = df_gs, df_auc_gs", "Kernel PCA: Linear | SGD\n====================================================================================================\n precision recall f1-score support\n\n class 0 0.32 1.00 0.48 6\n class 1 1.00 0.54 0.70 28\n\n accuracy 0.62 34\n macro avg 0.66 0.77 0.59 34\nweighted avg 0.88 0.62 0.66 34\n\nBest Score (CV-Train) Best Score (Test) AUC P-value\n 0.95 0.62 0.77 0.15842\n" ] ], [ [ "Looking at the results obtained running *Sgd Classifier* against our dataset splitted into training set and test set and adopting a different kernel trick applied to *kernel-Pca* unsupervised preprocessing method we can state generally speaking that looking at the weighted values of *Recall, Precision, and F1-Scores* we obtained good performance and and except for one trial where we got lower and worst results, when *Polynomial and Rbf Tricks* is selected, in the remaning cases have gotten remarkable results. More precisely we can say what follows:\n\n- speaking about __Linear kernel Pca based Sgd Classifier__, when adoping the default threshold of *.5* for classification purposes we have a model that reaches an accuracy of *65%* at test time against an accuracy of *92%* at train step, while the Auc score reaches a value of *79%* with a Roc Curve that shows a behavior for which the model increases its *TPR* without affecting the *FPR* score, however at a given point the Roc Curve trend turns so that the two cited scores begin to increase linearly and with a slope lower than that of Random Classifier so that FPR increases faster. The model is very precise when predicting class 1 instances but it has a recall of just *54%* so misclassified more or less half of samples from class 1 and this fact influenced instead the precision of class 0 that is a bit low, just *32%*, while class 0 recall is very high. Since the test accuracy score loses nearly 30 percent points we can assume that sucha model quite overfit to train data, we are not really encouraged to adopt it except we decied to exploit it for including it in an ensemble classifier, more boosting like than bagging one.\n\n- observing __Polynomial kernel Pca based Sgd Estimator__, we can notice that such a model exploiting a default threshold of *.5* reaches an accuracy of *76%* at test time against an accuracy of *92%* at train step, while the Auc score reaches a value of *73%*. It represents the best result obtained running th SGD based Training Algorithm upon our input dataset, in particular it obtained high precision and high recall for class 1, in other words such a model is able to recognize and correctly classify most of the data examples whose true label is indeed class 1. However, even if the model has high recall related to class 0, since the dataset is unbalanced we cannot say the same things for precision score about the class 0. So the model is somewhat uncertain when predicting class 0 as label value for new observations.\n\n- review __Rbf kernel Pca based Sgd Classifier__, we can notice that such a model exploiting a default threshold of *.5* reaches an accuracy of *82%* at test time against an accuracy of *92%* at train step, while the Auc score reaches a value of *57%*. In particular such a trial along with the *PCosine kernel Pca based Sgd Classifier* are the two attempts that lead to worts results, since the model overfit against the data employed at training time, but also the model gained weights that tend to predict every thing as class 1 instance. So, the resulting scores tell us that the model is highly precise and obtained high recall related to class 1, convercely has very low performance for precision and recall referred to class 0. Since such a model is performing just a little bit better than random classifier, can be largely adopted along other similar models for building voting classifier, following boosting like classifier policy and behavior.\n\n- looking at __Cosine kernel Pca based Sgd Classifier__, we can notice that such a model exploiting a default threshold of *.5* reaches an accuracy of *32%* at test time against an accuracy of *95%* at train step, while the Auc score reaches a value of just *59%*. Here the fine tuned model obtained from grid-search approach tells us that we are able to classify with high precision a few data examples from class 1, and even if we correctly classify all instances from class 0, we also wrongly predict class labels for most of instances,. whose true label is class 1. This means that the model is highly uncertain when predicting class 0 as the output target label. Moreover, the model's ROC Curve performs slighltly better than the random classifier, and we end up saying that such a model has gained weights and hyper-params that tend to predict the unknown instances as belonging to class 0 most of the time. We cannot neither say that switching the class labels between the two classes will allow us to obtain a better result since the roc curve trend is just a little bit better than the random classifier.\n\n- finally, referring to __Sigmoid kernel Pca based Sgd Model__, we can notice that such a model exploiting a default threshold of *.5* reaches an accuracy of *44%* at test time against an accuracy of *92%* at train step, while the Auc score reaches a value of *66%*. This model behaves more or less as the model obtained from the first trial performed for Sgd-based classifier, so as the first model is slightly worst than the best model found here when adopting as classifier Sgd technique, that is the *Cosine kernel Pca based Sgd Classifier*.\n\n__Significance Analysis__: finally, when looking at the different graphics related to the test which aims at investigating the diagnostic power of our different models we have fine tuned for *SGD Classifier*, picking the best one for such a test we can notice that beacues of the *signficance level* $\\alpha$ set equal to *0.05 that is 5% of chance to reject the Null-Hypothesis $H_{0}$*, we have obtained following results. Adopting the SGD statistical learning technique for classification fine tuned as above with hyper-params selectd also depending on the kind of *kernel-trick adopted for kernel-Pca unsupervised technique*, we can calim that only two out of five trials lead to a *p-vlaue* worst than *selected significance level equal to 5%*, which are *Linear- and Cosine-kernel Pca based Sgd Classifier*, so rejecting the *Null-Hypotesis* for those two cases will results into a *Type I Error*. While the remaining three cases, that are *Poly-, Rbf- and Sigmoid-kernel Pca based Sgd Classifier* have obtained a p-value over the range $[.9, 3]$ *in percet points*, so we are satisfyed for the results obtained in terms of significance scores, however, only *Poly-, and Rbf-kernel Pca based Sgd Classifier* really matter or are worth models since they do not overfit too much and do not go worstly as *Sigmoid-kernel Pca based Sgd Classifier* at test time.\n\n#### Table Fine Tuned Hyper-Params(SGD Classifier)", "_____no_output_____" ] ], [ [ "# create_widget_list_df([df_gs, df_auc_gs]) #print(df_gs); print(df_auc_gs)\nshow_table_summary_grid_search(df_gs, df_auc_gs, df_pvalue)", "_____no_output_____" ] ], [ [ "Looking at the table dispalyed just above that shows the details about the selected values for hyper-parameters specified during grid search, in the different situations accordingly to the fixed kernel-trick for kernel Pca unsupervised method we can state that, referring to the first two columns of *Train and Test Accuracy*, we can recognize which trials lead to more overfit results such as for *Rbfd Trick* or less overfit solution such as in the case of *Linear, Polynomial, Cosine, and Sigmoid Tricks*. Speaking about the hyper-parameters, we can say what follows:\n\n- looking at __alpha hyper-parameter__, that is constant that multiplies the regularization term. The higher the value, the stronger the regularization. Also used to compute the learning rate when set to *learning_rate* is set to *'optimal'*, as was here, we can notice that the final choice through the different trials was more or less tha same, meanning that the adopted kernel trick for performing kernel-Pca does not affected appreciably such a hyper-param, which three cases out of five was set to *0.1*, and the remaining case adopted *0.0001*, *0.001* for respectively Cosine and Sigmoid based *kernel-Pca*. This also remind us that while training the classifiers was not necessary to force a high regularization contribute for reducing the overfit as well as the learning process, even if we know that *Rbf kernel Pca based Sgd Classifier* overfits mostly against train data, and gained weights that encourages predicting all samples as belonging to class 1.\n\n- reviewing __class_weight hyper-param__, what we can state about such a parameter is that it represents weights associated with classes. If not given, all classes are supposed to have weight one. The *“balanced” mode* uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as __n_samples / (n_classes * np.bincount(y))__. In particular we can notice that three out five models that were fine tuned accepted or selected *balanced weights*, which are *Linear-, Sigomoid-, Cosine-kernel Pca based Sgd Classifier*, while the remaining obtain better, when setting uniform weights which are models *Polynomial-, Rbf-kernel Pca based Sgd Classifier*. So the choiche of the right *kernel-trick* affected the subsequent selection at fine tuning time of the *class_weight hyper-param*. What we can further notice is that *Polynomial-, Rbf-kernel Pca based Sgd Classifier* more or less adopted same kind of values for hyper-params, as an instance for penalty hyper-param, however Polynomial model got worst performance in terms of accuracy but considering the other metrics simultaneously we can understand that the Poly model overfits less than Rbf one and so get better performance in general.\n\n- speaking of __learning_rate hyper-param__, since we force this as the unique available choice it was just report for completeness.\n\n- interesting it is the discussion about __loss parameter__, if fact we know that the possible options are *‘hinge’, ‘log’, ‘modified_huber’, ‘squared_hinge’, ‘perceptron’*, where the *‘log’ loss* gives logistic regression, a probabilistic classifier. *‘modified_huber’* is another smooth loss that brings tolerance to outliers as well as probability estimates. *‘squared_hinge’* is like hinge but is quadratically penalized. ‘perceptron’ is the linear loss used by the perceptron algorithm. Here, speaking about loss parameter we can clearly understand that the choice of a particular kernel trick does not affect the following choice of the loss function to be optimized, in fact uniformly all the models adopted or tend to prefer *modified_huber* loss function, allowing the models to fit to the data taking into account the fact that such a loss function is less sensitive to outliers, recalling inn fact that the Huber loss function is used in robust statistics, M-estimation and additive modelling. This loss is so cllaed beacues it derives from the plain version normally exploited for regression problems.\n\n- also when referring to __max iteration parameter__, we can easily say that thte models evenly adopted somewhat small number of iteration before stopping the learning procedure, this might be also becaues we work with a small dataset and so a set of data points that is small tend to overfit quickly and this migth be the reason for which in order to avoid too much overfit the training procedure performed employing grid-search technique for fine-tuning tend to prefer tiny number of iterations set for training the model.\n\n- __penalty parameter__, we recall here that it represents regularization term to be used. More precisely, defaults to *‘l2’* which is the standard regularizer for linear SVM models. *‘l1’* and *‘elasticnet’* might bring *sparsity* to the model (feature selection) not achievable with *‘l2’*. Also for such a hyper-param the choice of a particular *kernel-trick* to be used for *kernel-Pca* was affecting the subsequent selection of penalty contribute to regularize learning task, as was for *class weight hyper-param*. Here three over five models that are *Linear-, Sigomoid-, Cosine-kernel Pca based Sgd Classifier* adopted *l1-norm* as regularization term so the models's weights tend to be more sparse, while the remaining *Polynomial-, Rbf-kernel Pca based Sgd Classifier* models adopted *l2-nrom*. For the trials we have done, the models with *l1-regularization term* seem to get worst performance, more precisely *Sigomoid-, Cosine-kernel Pca based Sgd Classifier* even were worser than random classifier, while the *Linear-kernel Pca based Sgd Classifier* was slightly worst than Polynomial one, so does not overfit too much however we can say it can be exploited for ensemble method that follows a Boosting Policy.\n\nIf we imagine to build up an *Ensemble Classifier* from the family of *Average Methods*, which state that the underlying principle leading their creation requires to build separate and single classifiers than averaging their prediction in regression context or adopting a majority vote strategy for the classification context, we can claim that amongst the purposed *Sgd classifier*, for sure, we could employ the classifier found from all the trials, except for *Rbf, Cosine and Sigmoid kernel Pca based Sgd Classifiers*, since the first model is overly overfitting to the data used at train time and more precisely most of the time predicted correctly just samples from class 1 and misclassifyes instances from class 0, the others instead assumed the opposite behavior. Also, because of their performance metrics and also because Ensemble Methods such as Bagging Classifier, usually work fine exploiting an ensemble of independent and fine tuned classifier differently from Boosting Methods which instead are based on weak learners.", "_____no_output_____" ] ], [ [ "# show_histogram_first_sample(Xtrain_transformed, ytrain_, estimators_)", "_____no_output_____" ] ], [ [ "### Improvements and Conclusions <a class=\"anchor\" id=\"Improvements-and-conclusions\"></a>\n\nExtension that we can think of to better improve the analyses we can perform on such a relative tiny dataset many include, for preprocessing phases:\n- Selecting different *Feature Extraction ant Dimensionality Reduction Techniques* other than Pca or kernel Pca such as: \n*linear discriminant analysis (LDA)*, or *canonical correlation analysis (CCA) techniques* as a pre-processing step.\n\nExtension that we can think of to better improve the analyses we can perform on such a relative tiny dataset many include, for training phases:\n\n- Selecting different *Ensemble Methods, investigating both Average based and Boosting based Statistical Learning Methods*.\n\nExtension that we can think of to better improve the analyses we can perform on such a relative tiny dataset many include, for diagnostic analyses after having performed train and test phases:\n\n- Using other measures, indicators and ghraphical plots such as the *Total Operating Characteristic (TOC)*, since also such a measure characterizes diagnostic ability while revealing more information than the ROC. In fact for each threshold, ROC reveals two ratios, TP/(TP + FN) and FP/(FP + TN). In other words, ROC reveals hits/(hits + misses) and false alarms/(false alarms + correct rejections). On the other hand, TOC shows the total information in the contingency table for each threshold. Lastly, the TOC method reveals all of the information that the ROC method provides, plus additional important information that ROC does not reveal, i.e. the size of every entry in the contingency table for each threshold.", "_____no_output_____" ], [ "## References section <a class=\"anchor\" id=\"references\"></a>\n### Main References\n- Data Domain Information part:\n - (Deck) https://en.wikipedia.org/wiki/Deck_(bridge)\n - (Cantilever bridge) https://en.wikipedia.org/wiki/Cantilever_bridge\n - (Arch bridge) https://en.wikipedia.org/wiki/Deck_(bridge)\n- Machine Learning part:\n - (Theory Book) https://jakevdp.github.io/PythonDataScienceHandbook/\n - (Feature Extraction: PCA) https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html\n - (Linear Model: Logistic Regression) https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n - (Neighbor-based Learning: Knn) https://scikit-learn.org/stable/modules/neighbors.html\n - (Stochastc Learning: SGD Classifier) https://scikit-learn.org/stable/modules/sgd.html#sgd\n - (Discriminative Model: SVM) https://scikit-learn.org/stable/modules/svm.html\n - (Non-Parametric Learning: Decsion Trees) https://scikit-learn.org/stable/modules/tree.html#tree\n - (Ensemble, Non-Parametric Learning: RandomForest) https://scikit-learn.org/stable/modules/ensemble.html#forest\n- Metrics:\n - (F1-Accuracy-Precision-Recall) https://towardsdatascience.com/beyond-accuracy-precision-and-recall-3da06bea9f6c\n- Statistics:\n - (Correlation and dependence) https://en.wikipedia.org/wiki/Correlation_and_dependence\n - (KDE) https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/\n- Chart part:\n - (Seaborn Charts) https://acadgild.com/blog/data-visualization-using-matplotlib-and-seaborn\n- Third Party Library:\n - (sklearn) https://scikit-learn.org/stable/index.html\n - (statsmodels) https://www.statsmodels.org/stable/index.html#\n \n### Others References\n- Plots:\n - (Python Plot) https://www.datacamp.com/community/tutorials/matplotlib-tutorial-python?utm_source=adwords_ppc&utm_campaignid=898687156&utm_adgroupid=48947256715&utm_device=c&utm_keyword=&utm_matchtype=b&utm_network=g&utm_adpostion=&utm_creative=255798340456&utm_targetid=aud-299261629574:dsa-473406587955&utm_loc_interest_ms=&utm_loc_physical_ms=1008025&gclid=Cj0KCQjw-_j1BRDkARIsAJcfmTFu4LAUDhRGK2D027PHiqIPSlxK3ud87Ek_lwOu8rt8A8YLrjFiHqsaAoLDEALw_wcB\n- Markdown Math part:\n - (Math Symbols Latex) https://oeis.org/wiki/List_of_LaTeX_mathematical_symbols\n - (Tutorial 1) https://share.cocalc.com/share/b4a30ed038ee41d868dad094193ac462ccd228e2/Homework%20/HW%201.2%20-%20Markdown%20and%20LaTeX%20Cheatsheet.ipynb?viewer=share\n - (Tutorial 2) https://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/Typesetting%20Equations.html", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d0a76646a48eb70a3fd4905437096fc5866f6f2f
30,576
ipynb
Jupyter Notebook
.ipynb_checkpoints/Teresa's Data-checkpoint.ipynb
GrayCleric4/Final-Project
768e4bb710be120e6c212c0b336f6a347924474d
[ "Unlicense" ]
1
2021-04-08T18:09:53.000Z
2021-04-08T18:09:53.000Z
.ipynb_checkpoints/Teresa's Data-checkpoint.ipynb
GrayCleric4/Final-Project
768e4bb710be120e6c212c0b336f6a347924474d
[ "Unlicense" ]
null
null
null
.ipynb_checkpoints/Teresa's Data-checkpoint.ipynb
GrayCleric4/Final-Project
768e4bb710be120e6c212c0b336f6a347924474d
[ "Unlicense" ]
null
null
null
40.986595
9,020
0.572639
[ [ [ "## Dataset\nhttps://data.wprdc.org/dataset/allegheny-county-restaurant-food-facility-inspection-violations/resource/112a3821-334d-4f3f-ab40-4de1220b1a0a\n\nThis data set is a set of all of the restaurants in Allegheny County with geographic locations including zip code, size, description of use, and a \"status\" ranging from 0 to 7 to indicate if the restaurant is currently open.", "_____no_output_____" ] ], [ [ "import pandas as pd\n\nrestaurants_all = pd.read_csv(\"r.csv\")\n", "_____no_output_____" ] ], [ [ "First, I remove the few restaurants that are outside of Pittsburgh and those with a value of 0 or 1 for their status, which indicates that they are closed.", "_____no_output_____" ] ], [ [ "query_mask = restaurants_all['status'] > 1\nzip_mask_low = restaurants_all['zip'] > 14999.0\nzip_mask_high = restaurants_all['zip'] < 16000.0\nopen_restaurants = restaurants_all[query_mask]\nopen_restaurants = open_restaurants[zip_mask_low]\nopen_restaurants = open_restaurants[zip_mask_high]\nopen_restaurants = open_restaurants[open_restaurants['zip'].notnull()]\nopen_restaurants.head(5)", "<ipython-input-3-b236f237d444>:5: UserWarning: Boolean Series key will be reindexed to match DataFrame index.\n open_restaurants = open_restaurants[zip_mask_low]\n<ipython-input-3-b236f237d444>:6: UserWarning: Boolean Series key will be reindexed to match DataFrame index.\n open_restaurants = open_restaurants[zip_mask_high]\n" ] ], [ [ "Then I count up the number of open restaurants within a certain zipcode by keeping track of the data in a dictionary, using the zipcode as a key and incrementing the value associated with it.", "_____no_output_____" ] ], [ [ "zipcode_counter = dict()\n\nfor row in open_restaurants.index:\n zipc = open_restaurants.loc[row, \"zip\"]\n \n if zipc not in zipcode_counter:\n zipcode_counter[zipc] = 1\n else:\n zipcode_counter[zipc] = zipcode_counter[zipc] + 1\n\nzipcode_counter\nzip_sorted = dict(sorted(zipcode_counter.items(), key=lambda item: item[1]))\nzip_sorted", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nnames = list(zipcode_counter.keys())\nvalues = list(zipcode_counter.values())\n\nplt.bar(names, values)\nplt.xlabel(\"Zipcodes\")\nplt.ylabel(\"Number of Restaurants\")\nplt.axis([15000, 15300, 0, 1060])\nplt.show()\n\naverage = sum(zip_sorted.values()) / len(zip_sorted)\nprint(average)", "_____no_output_____" ] ], [ [ "Plotting this data, we find that there is a very wide range from 0 to 1041 and a mean of 124 restaurants per zipcode.", "_____no_output_____" ] ], [ [ "#all_values = zipcode_counter.values()\n#max_value = max(all_values)\n\n#print(max_value)\n\nmax_key = max(zipcode_counter, key=zipcode_counter.get)\nprint(max_key)\n\nmin_key = min(zipcode_counter, key=zipcode_counter.get)\nprint(min_key)\n", "15222.0\n15047.0\n" ] ], [ [ "The top ten zipcodes with the most restaurants and their corresponding neighborhoods are: \n* 15222.0: 1041 - Strip District\n* 15212.0: 694 - North Shore/North Side\n* 15213.0: 677 - Oakland\n* 15219.0: 639 - Hill District\n* 15237.0: 509 - Ross Township\n* 15146.0: 482 - Monroeville\n* 15205.0: 423 - Crafton\n* 15108.0: 408 - Coraoplis\n* 15235.0: 396 - Penn Hills\n* 15203.0: 392 - South Side\n\n\nAccording to our metric, we divide the data into fifths and award points to the total of each of the zipcodes:\n \n \n \n \n \n ", "_____no_output_____" ] ], [ [ "print(len(zip_sorted))\n\nzipcode_points_restaurants = dict()\ni = 1\n\nfor key in zip_sorted:\n zipcode_points_restaurants[key] = i // 28 + 1\n i = i + 1\n \nzipcode_points_restaurants", "139\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0a76a45bbdc40fd9c4b27866a1cd930172d99f3
60,932
ipynb
Jupyter Notebook
BCNcode/0_vibratioon_signal/1450/DNN/DNN_1450-09-512-y.ipynb
Decaili98/BCN-code-2022
ab0ce085cb29fbf12b6d773861953cb2cef23e20
[ "MulanPSL-1.0" ]
null
null
null
BCNcode/0_vibratioon_signal/1450/DNN/DNN_1450-09-512-y.ipynb
Decaili98/BCN-code-2022
ab0ce085cb29fbf12b6d773861953cb2cef23e20
[ "MulanPSL-1.0" ]
null
null
null
BCNcode/0_vibratioon_signal/1450/DNN/DNN_1450-09-512-y.ipynb
Decaili98/BCN-code-2022
ab0ce085cb29fbf12b6d773861953cb2cef23e20
[ "MulanPSL-1.0" ]
null
null
null
133.916484
24,448
0.821621
[ [ [ "from tensorflow import keras\nfrom tensorflow.keras import *\nfrom tensorflow.keras.models import *\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.regularizers import l2#正则化L2\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "normal = np.loadtxt(r'F:\\张老师课题学习内容\\code\\数据集\\试验数据(包括压力脉动和振动)\\2013.9.12-未发生缠绕前\\2013-9.12振动\\2013-9-12振动-1450rmin-mat\\1450r_normalviby.txt', delimiter=',')\nchanrao = np.loadtxt(r'F:\\张老师课题学习内容\\code\\数据集\\试验数据(包括压力脉动和振动)\\2013.9.17-发生缠绕后\\振动\\9-17下午振动1450rmin-mat\\1450r_chanraoviby.txt', delimiter=',')\nprint(normal.shape,chanrao.shape,\"***************************************************\")\ndata_normal=normal[16:18] #提取前两行\ndata_chanrao=chanrao[16:18] #提取前两行\nprint(data_normal.shape,data_chanrao.shape)\nprint(data_normal,\"\\r\\n\",data_chanrao,\"***************************************************\")\ndata_normal=data_normal.reshape(1,-1)\ndata_chanrao=data_chanrao.reshape(1,-1)\nprint(data_normal.shape,data_chanrao.shape)\nprint(data_normal,\"\\r\\n\",data_chanrao,\"***************************************************\")", "(22, 32768) (22, 32768) ***************************************************\n(2, 32768) (2, 32768)\n[[-2.0268 -2.275 -0.18206 ... 0.53516 0.086831 0.64158 ]\n [ 0.76417 2.2918 -0.36777 ... -1.3128 0.25411 1.018 ]] \r\n [[-2.8931 1.1423 -1.3761 ... -2.4323 0.35843 1.2718 ]\n [-1.2389 -2.8513 0.90417 ... -0.30749 -0.15209 -1.2137 ]] ***************************************************\n(1, 65536) (1, 65536)\n[[-2.0268 -2.275 -0.18206 ... -1.3128 0.25411 1.018 ]] \r\n [[-2.8931 1.1423 -1.3761 ... -0.30749 -0.15209 -1.2137 ]] ***************************************************\n" ], [ "#水泵的两种故障类型信号normal正常,chanrao故障\ndata_normal=data_normal.reshape(-1, 512)#(65536,1)-(128, 515)\ndata_chanrao=data_chanrao.reshape(-1,512)\nprint(data_normal.shape,data_chanrao.shape)\n", "(128, 512) (128, 512)\n" ], [ "import numpy as np\ndef yuchuli(data,label):#(4:1)(51:13)\n #打乱数据顺序\n np.random.shuffle(data)\n train = data[0:102,:]\n test = data[102:128,:]\n label_train = np.array([label for i in range(0,102)])\n label_test =np.array([label for i in range(0,26)])\n return train,test ,label_train ,label_test\ndef stackkk(a,b,c,d,e,f,g,h):\n aa = np.vstack((a, e))\n bb = np.vstack((b, f))\n cc = np.hstack((c, g))\n dd = np.hstack((d, h))\n return aa,bb,cc,dd\nx_tra0,x_tes0,y_tra0,y_tes0 = yuchuli(data_normal,0)\nx_tra1,x_tes1,y_tra1,y_tes1 = yuchuli(data_chanrao,1)\ntr1,te1,yr1,ye1=stackkk(x_tra0,x_tes0,y_tra0,y_tes0 ,x_tra1,x_tes1,y_tra1,y_tes1)\n\nx_train=tr1\nx_test=te1\ny_train = yr1\ny_test = ye1\n\n#打乱数据\nstate = np.random.get_state()\nnp.random.shuffle(x_train)\nnp.random.set_state(state)\nnp.random.shuffle(y_train)\n\nstate = np.random.get_state()\nnp.random.shuffle(x_test)\nnp.random.set_state(state)\nnp.random.shuffle(y_test)\n\n\n#对训练集和测试集标准化\ndef ZscoreNormalization(x):\n \"\"\"Z-score normaliaztion\"\"\"\n x = (x - np.mean(x)) / np.std(x)\n return x\nx_train=ZscoreNormalization(x_train)\nx_test=ZscoreNormalization(x_test)\n# print(x_test[0])\n\n\n#转化为一维序列\nx_train = x_train.reshape(-1,512,1)\nx_test = x_test.reshape(-1,512,1)\nprint(x_train.shape,x_test.shape)\n\ndef to_one_hot(labels,dimension=2):\n results = np.zeros((len(labels),dimension))\n for i,label in enumerate(labels):\n results[i,label] = 1\n return results\none_hot_train_labels = to_one_hot(y_train)\none_hot_test_labels = to_one_hot(y_test)\n", "(204, 512, 1) (52, 512, 1)\n" ], [ "x = layers.Input(shape=[512,1,1])\nFlatten=layers.Flatten()(x)\nDense1=layers.Dense(12, activation='relu')(Flatten)\nDense2=layers.Dense(6, activation='relu')(Dense1)\nDense3=layers.Dense(2, activation='softmax')(Dense2)\nmodel = keras.Model(x, Dense3) \nmodel.summary() ", "Model: \"model\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) [(None, 512, 1, 1)] 0 \n_________________________________________________________________\nflatten (Flatten) (None, 512) 0 \n_________________________________________________________________\ndense (Dense) (None, 12) 6156 \n_________________________________________________________________\ndense_1 (Dense) (None, 6) 78 \n_________________________________________________________________\ndense_2 (Dense) (None, 2) 14 \n=================================================================\nTotal params: 6,248\nTrainable params: 6,248\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "#定义优化\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',metrics=['accuracy']) ", "_____no_output_____" ], [ "import time\ntime_begin = time.time()\nhistory = model.fit(x_train,one_hot_train_labels,\n validation_split=0.1,\n epochs=50,batch_size=10,\n shuffle=True)\ntime_end = time.time()\ntime = time_end - time_begin\nprint('time:', time)", "Epoch 1/50\n19/19 [==============================] - 3s 81ms/step - loss: 0.8286 - accuracy: 0.4925 - val_loss: 0.9839 - val_accuracy: 0.4762\nEpoch 2/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.5910 - accuracy: 0.6995 - val_loss: 0.9040 - val_accuracy: 0.4762\nEpoch 3/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.5165 - accuracy: 0.7488 - val_loss: 0.8607 - val_accuracy: 0.4762\nEpoch 4/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.4450 - accuracy: 0.8315 - val_loss: 0.8612 - val_accuracy: 0.4762\nEpoch 5/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.4235 - accuracy: 0.8283 - val_loss: 0.8558 - val_accuracy: 0.5238\nEpoch 6/50\n19/19 [==============================] - 0s 6ms/step - loss: 0.3372 - accuracy: 0.9166 - val_loss: 0.8564 - val_accuracy: 0.5238\nEpoch 7/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.3057 - accuracy: 0.9047 - val_loss: 0.8686 - val_accuracy: 0.5238\nEpoch 8/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.2634 - accuracy: 0.9178 - val_loss: 0.8913 - val_accuracy: 0.5238\nEpoch 9/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.2716 - accuracy: 0.9055 - val_loss: 0.9202 - val_accuracy: 0.5714\nEpoch 10/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.2294 - accuracy: 0.9236 - val_loss: 0.9345 - val_accuracy: 0.5714\nEpoch 11/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.2016 - accuracy: 0.9556 - val_loss: 0.9361 - val_accuracy: 0.5714\nEpoch 12/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.1741 - accuracy: 0.9526 - val_loss: 0.9524 - val_accuracy: 0.5714\nEpoch 13/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.1758 - accuracy: 0.9597 - val_loss: 0.9685 - val_accuracy: 0.5714\nEpoch 14/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.1813 - accuracy: 0.9452 - val_loss: 0.9890 - val_accuracy: 0.5714\nEpoch 15/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.1464 - accuracy: 0.9497 - val_loss: 1.0058 - val_accuracy: 0.6190\nEpoch 16/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.1313 - accuracy: 0.9474 - val_loss: 1.0049 - val_accuracy: 0.6190\nEpoch 17/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.1299 - accuracy: 0.9652 - val_loss: 1.0215 - val_accuracy: 0.7143\nEpoch 18/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.1373 - accuracy: 0.9643 - val_loss: 1.0805 - val_accuracy: 0.6667\nEpoch 19/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.1028 - accuracy: 0.9814 - val_loss: 1.1197 - val_accuracy: 0.6190\nEpoch 20/50\n19/19 [==============================] - 0s 7ms/step - loss: 0.0623 - accuracy: 0.9986 - val_loss: 1.1361 - val_accuracy: 0.6190\nEpoch 21/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0673 - accuracy: 1.0000 - val_loss: 1.1497 - val_accuracy: 0.6190\nEpoch 22/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0580 - accuracy: 1.0000 - val_loss: 1.1483 - val_accuracy: 0.6190\nEpoch 23/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0521 - accuracy: 1.0000 - val_loss: 1.1544 - val_accuracy: 0.6190\nEpoch 24/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0631 - accuracy: 0.9949 - val_loss: 1.1528 - val_accuracy: 0.6190\nEpoch 25/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0549 - accuracy: 0.9966 - val_loss: 1.1436 - val_accuracy: 0.6667\nEpoch 26/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0428 - accuracy: 0.9949 - val_loss: 1.1377 - val_accuracy: 0.7619\nEpoch 27/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0472 - accuracy: 1.0000 - val_loss: 1.1447 - val_accuracy: 0.7619\nEpoch 28/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0507 - accuracy: 1.0000 - val_loss: 1.1554 - val_accuracy: 0.7619\nEpoch 29/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0296 - accuracy: 1.0000 - val_loss: 1.1628 - val_accuracy: 0.7619\nEpoch 30/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0368 - accuracy: 1.0000 - val_loss: 1.1741 - val_accuracy: 0.7619\nEpoch 31/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0222 - accuracy: 1.0000 - val_loss: 1.1827 - val_accuracy: 0.7619\nEpoch 32/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0300 - accuracy: 1.0000 - val_loss: 1.1852 - val_accuracy: 0.7619\nEpoch 33/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0276 - accuracy: 1.0000 - val_loss: 1.1930 - val_accuracy: 0.7143\nEpoch 34/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0177 - accuracy: 1.0000 - val_loss: 1.1893 - val_accuracy: 0.7619\nEpoch 35/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0148 - accuracy: 1.0000 - val_loss: 1.1880 - val_accuracy: 0.7619\nEpoch 36/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0223 - accuracy: 1.0000 - val_loss: 1.1762 - val_accuracy: 0.7619\nEpoch 37/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.0194 - accuracy: 1.0000 - val_loss: 1.1776 - val_accuracy: 0.7619\nEpoch 38/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0161 - accuracy: 1.0000 - val_loss: 1.1911 - val_accuracy: 0.7619\nEpoch 39/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0134 - accuracy: 1.0000 - val_loss: 1.1930 - val_accuracy: 0.7619\nEpoch 40/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0092 - accuracy: 1.0000 - val_loss: 1.1908 - val_accuracy: 0.7619\nEpoch 41/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0081 - accuracy: 1.0000 - val_loss: 1.1928 - val_accuracy: 0.7619\nEpoch 42/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0109 - accuracy: 1.0000 - val_loss: 1.1933 - val_accuracy: 0.7619\nEpoch 43/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0147 - accuracy: 1.0000 - val_loss: 1.1968 - val_accuracy: 0.7619\nEpoch 44/50\n19/19 [==============================] - 0s 6ms/step - loss: 0.0074 - accuracy: 1.0000 - val_loss: 1.1972 - val_accuracy: 0.7619\nEpoch 45/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0061 - accuracy: 1.0000 - val_loss: 1.2033 - val_accuracy: 0.7619\nEpoch 46/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0104 - accuracy: 1.0000 - val_loss: 1.2040 - val_accuracy: 0.7619\nEpoch 47/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0062 - accuracy: 1.0000 - val_loss: 1.2051 - val_accuracy: 0.7619\nEpoch 48/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0074 - accuracy: 1.0000 - val_loss: 1.2112 - val_accuracy: 0.7619\nEpoch 49/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0076 - accuracy: 1.0000 - val_loss: 1.2161 - val_accuracy: 0.7619\nEpoch 50/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0059 - accuracy: 1.0000 - val_loss: 1.2186 - val_accuracy: 0.7619\ntime: 6.33783221244812\n" ], [ "import time\ntime_begin = time.time()\nscore = model.evaluate(x_test,one_hot_test_labels, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n \ntime_end = time.time()\ntime = time_end - time_begin\nprint('time:', time)", "Test loss: 1.2534676790237427\nTest accuracy: 0.6346153616905212\ntime: 0.0652627944946289\n" ], [ "#绘制acc-loss曲线\nimport matplotlib.pyplot as plt\n\nplt.plot(history.history['loss'],color='r')\nplt.plot(history.history['val_loss'],color='g')\nplt.plot(history.history['accuracy'],color='b')\nplt.plot(history.history['val_accuracy'],color='k')\nplt.title('model loss and acc')\nplt.ylabel('Accuracy')\nplt.xlabel('epoch')\nplt.legend(['train_loss', 'test_loss','train_acc', 'test_acc'], loc='center right')\n# plt.legend(['train_loss','train_acc'], loc='upper left')\n#plt.savefig('1.png')\nplt.show()", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nplt.plot(history.history['loss'],color='r')\nplt.plot(history.history['accuracy'],color='b')\nplt.title('model loss and sccuracy ')\nplt.ylabel('loss/sccuracy')\nplt.xlabel('epoch')\nplt.legend(['train_loss', 'train_sccuracy'], loc='center right')\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0a76f87edce1da561ad8fd7b8db89c77ff3c24c
110,366
ipynb
Jupyter Notebook
Module03/04-Dimensionality.SVD.ipynb
tejatammali/umd.inst414
18ffe49df18ebd28e90c879c3fd9a1cc777b4443
[ "Apache-2.0" ]
2
2022-02-09T02:39:15.000Z
2022-02-25T21:45:29.000Z
Module03/04-Dimensionality.SVD.ipynb
tejatammali/umd.inst414
18ffe49df18ebd28e90c879c3fd9a1cc777b4443
[ "Apache-2.0" ]
null
null
null
Module03/04-Dimensionality.SVD.ipynb
tejatammali/umd.inst414
18ffe49df18ebd28e90c879c3fd9a1cc777b4443
[ "Apache-2.0" ]
4
2022-02-03T03:14:32.000Z
2022-03-09T20:09:44.000Z
45.643507
7,924
0.506025
[ [ [ "# Dimensionality Reduction Example\n\nUsing the IMDB data, feature matrix and apply dimensionality reduction to this matrix via PCA and SVD.", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ], [ "import json\nimport random\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom scipy.sparse import lil_matrix\n\nfrom sklearn.neighbors import DistanceMetric\n\nfrom sklearn.metrics import jaccard_score\nfrom sklearn.metrics import pairwise_distances", "_____no_output_____" ], [ "# Let's restrict ourselves just to US titles\nrelevant_title_df = pd.read_csv(\"../data/us_relevant_titles.csv\")\n\n# And create a set of just these titles, so we can filter them\nrelevant_title_set = set(relevant_title_df[\"title\"])", "_____no_output_____" ], [ "actor_id_to_name_map = {} # Map Actor IDs to actor names\nactor_id_to_index_map = {} # Map actor IDs to a unique index of known actors\nindex_to_actor_ids = [] # Array mapping unique index back to actor ID (invert of actor_id_to_index_map)\n\nindex_counter = 0 # Unique actor index; increment for each new actor\nknown_actors = set()\n\nmovie_actor_list = [] # List of all our movies and their actors\n\ntest_count = 0\nwith open(\"../data/imdb_recent_movies.json\", \"r\") as in_file:\n for line in in_file:\n \n this_movie = json.loads(line)\n \n # Restrict to American movies\n if this_movie[\"title_name\"] not in relevant_title_set:\n continue\n \n # Keep track of all the actors in this movie\n for actor_id,actor_name in zip(this_movie['actor_ids'],this_movie['actor_names']):\n \n # Keep names and IDs\n actor_id_to_name_map[actor_id] = actor_name\n \n # If we've seen this actor before, skip...\n if actor_id in known_actors:\n continue\n \n # ... Otherwise, add to known actor set and create new index for them\n known_actors.add(actor_id)\n actor_id_to_index_map[actor_id] = index_counter\n index_to_actor_ids.append(actor_id)\n index_counter += 1\n \n # Finished with this film\n movie_actor_list.append({\n \"movie\": this_movie[\"title_name\"],\n \"actors\": set(this_movie['actor_ids']),\n \"genres\": this_movie[\"title_genre\"]\n })", "_____no_output_____" ], [ "print(\"Known Actors:\", len(known_actors))", "Known Actors: 161996\n" ] ], [ [ "## Generate Same DataFrame using Sparse Matrics\n\nThe above will break if you have too much data. We can get around that partially with sparse matrices, where we only store the non-zero elements of the feature matrix and their indices.", "_____no_output_____" ] ], [ [ "# With sparse matrix, initialize to size of Movies x Actors of 0s\nmatrix_sparse = lil_matrix((len(movie_actor_list), len(known_actors)), dtype=bool)\n\n# Update the matrix, movie by movie, setting non-zero values for the appropriate actors\nfor row,movie in enumerate(movie_actor_list): \n for actor_id in movie[\"actors\"]:\n this_index = actor_id_to_index_map[actor_id]\n matrix_sparse[row,this_index] = 1", "_____no_output_____" ], [ "df = pd.DataFrame.sparse.from_spmatrix(\n matrix_sparse, \n index=[m[\"movie\"] for m in movie_actor_list],\n columns=[index_to_actor_ids[i] for i in range(len(known_actors))]\n)\ndf", "_____no_output_____" ], [ "top_k_actors = 1000", "_____no_output_____" ], [ "# Extract the most frequent actors, so we can deal with a reasonable dataset size\nactor_df = df.sum(axis=0)\ntop_actors = set(actor_df.sort_values().tail(top_k_actors).index)", "_____no_output_____" ], [ "# Restrict the data frame to just the movies containing\n#. the top k actors\nreduced_df = df[top_actors] # restrict to just these top actors\n\n# throw away movies that don't have any of these actors\nreduced_df = reduced_df.loc[reduced_df.sum(axis=1) > 0] \n\nreduced_df", "_____no_output_____" ] ], [ [ "## Apply SVD to Feature Matrix", "_____no_output_____" ] ], [ [ "# https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.TruncatedSVD.html\nfrom sklearn.decomposition import TruncatedSVD ", "_____no_output_____" ], [ "matrix_dense = reduced_df.to_numpy()\nreduced_df", "_____no_output_____" ], [ "svd = TruncatedSVD(n_components=2)", "_____no_output_____" ], [ "svd.fit(matrix_dense)", "_____no_output_____" ], [ "matrix_reduced = svd.transform(matrix_dense)", "_____no_output_____" ], [ "np.mean(matrix_reduced, axis=0)", "_____no_output_____" ], [ "plt.scatter(matrix_reduced[:,0], matrix_reduced[:,1])", "_____no_output_____" ], [ "counter = 0\nfor index in np.argwhere((matrix_reduced[:,0] > 1.0) & (matrix_reduced[:,1] > 0.8)):\n movie_title = reduced_df.iloc[index[0]].name\n for this_movie in [m for m in movie_actor_list if m['movie'] == movie_title]:\n\n print(this_movie[\"movie\"])\n print(\"\\tGenres:\", \", \".join(this_movie[\"genres\"]))\n print(\"\\tActors:\", \", \".join([actor_id_to_name_map[actor] for actor in this_movie[\"actors\"]]))\n\n counter += 1\n if counter > 10:\n print(\"...\")\n break ", "The Alternate\n\tGenres: Action, Drama\n\tActors: Ice-T, Eric Roberts, Bryan Genesse, Michael Madsen\nLuck of the Draw\n\tGenres: Crime, Drama, Thriller\n\tActors: Michael Madsen, Dennis Hopper, Eric Roberts, James Marshall\nSkin Traffik\n\tGenres: Action, Crime\n\tActors: Michael Madsen, Eric Roberts, Mickey Rourke\nMission: The Prophet\n\tGenres: Action, Thriller\n\tActors: Aleksey Chadov, Oded Fehr, Alexander Nevsky, Eric Roberts, Stephen Baldwin, Michael Madsen, Casper Van Dien\nBeyond the Game\n\tGenres: Action, Drama\n\tActors: Michael Madsen, Eric Roberts, Mark Dacascos, Danny Trejo\n" ], [ "counter = 0\nfor index in np.argwhere((matrix_reduced[:,0] < 0.1) & (matrix_reduced[:,1] < 0.1)):\n movie_title = reduced_df.iloc[index[0]].name\n for this_movie in [m for m in movie_actor_list if m['movie'] == movie_title]:\n\n print(this_movie[\"movie\"])\n print(\"\\tGenres:\", \", \".join(this_movie[\"genres\"]))\n print(\"\\tActors:\", \", \".join([actor_id_to_name_map[actor] for actor in this_movie[\"actors\"]]))\n\n counter += 1\n if counter > 10:\n print(\"...\")\n break ", "Grizzly II: Revenge\n\tGenres: Horror, Music, Thriller\n\tActors: George Clooney, Charlie Sheen\nCrime and Punishment\n\tGenres: Drama\n\tActors: John Hurt, Crispin Glover\nFor the Cause\n\tGenres: Action, Adventure, Drama\n\tActors: Dean Cain, Justin Whalin, Thomas Ian Griffith\nFor the Cause\n\tGenres: Drama\n\tActors: Eugene Parker, Jerod Haynes, Anthony Lemay\nFor the Cause\n\tGenres: Comedy\n\tActors: Abdelghani Sannak, Ramzi Maqdisi, Jeremy Banster\nGang\n\tGenres: Action, Crime, Drama\n\tActors: Jackie Shroff, Kumar Gaurav, Nana Patekar, Jaaved Jaaferi\nGang\n\tGenres: Action\n\tActors: Ji-Hyuk Cha\nIn the Mood for Love\n\tGenres: Drama, Romance\n\tActors: Tony Chiu-Wai Leung, Siu Ping-Lam, Tung Cho 'Joe' Cheung\nChinese Coffee\n\tGenres: Drama\n\tActors: Jerry Orbach, Al Pacino\nFandango\n\tGenres: \\N\n\tActors: Moritz Bleibtreu, Lars Rudolph, Richy Müller\nFandango\n\tGenres: Drama\n\tActors: Arturo Meseguer, Martín Zapata\nThe Dancer Upstairs\n\tGenres: Crime, Drama, Thriller\n\tActors: Juan Diego Botto, Javier Bardem\nDon's Plum\n\tGenres: Comedy, Drama\n\tActors: Kevin Connolly, Tobey Maguire, Scott Bloom, Leonardo DiCaprio\nHeavy Metal 2000\n\tGenres: Action, Adventure, Animation\n\tActors: Pier Paquette, Michael Ironside, Billy Idol\nThe Sorcerer's Apprentice\n\tGenres: Adventure, Family, Fantasy\n\tActors: Robert Davi, Byron Taylor\nThe Sorcerer's Apprentice\n\tGenres: Action, Adventure, Family\n\tActors: Alfred Molina, Nicolas Cage, Jay Baruchel\n...\n" ], [ "comp1_genre_map = {}\ncomp1_actor_map = {}\n\ncomp1_counter = 0\nfor index in np.argwhere((matrix_reduced[:,0] > 1.0) & (matrix_reduced[:,1] < 0.2)):\n movie_title = reduced_df.iloc[index[0]].name\n for this_movie in [m for m in movie_actor_list if m['movie'] == movie_title]:\n for g in this_movie[\"genres\"]:\n comp1_genre_map[g] = comp1_genre_map.get(g, 0) + 1\n for a in [actor_id_to_name_map[actor] for actor in this_movie[\"actors\"]]:\n comp1_actor_map[a] = comp1_actor_map.get(a, 0) + 1\n\n comp1_counter += 1\n \nprint(\"Movies in Component 1:\", comp1_counter)\n\nprint(\"Genres:\")\nfor g in sorted(comp1_genre_map, key=comp1_genre_map.get, reverse=True)[:10]:\n print(\"\\t\", g, comp1_genre_map[g])\nprint(\"Actors:\")\nfor a in sorted(comp1_actor_map, key=comp1_actor_map.get, reverse=True)[:10]:\n print(\"\\t\", a, comp1_actor_map[a])", "Movies in Component 1: 47\nGenres:\n\t Drama 29\n\t Action 23\n\t Thriller 22\n\t Horror 17\n\t Crime 10\n\t Mystery 9\n\t Fantasy 5\n\t Adventure 4\n\t Comedy 4\n\t Sci-Fi 3\nActors:\n\t Eric Roberts 47\n\t Vernon Wells 4\n\t Armand Assante 4\n\t Martin Kove 4\n\t Gary Daniels 3\n\t Dean Cain 2\n\t David A.R. White 2\n\t John Savage 2\n\t Aaron Groben 2\n\t Noel Gugliemi 2\n" ], [ "comp2_genre_map = {}\ncomp2_actor_map = {}\n\ncomp2_counter = 0\nfor index in np.argwhere((matrix_reduced[:,0] < 0.1) & (matrix_reduced[:,1] < 0.1)):\n movie_title = reduced_df.iloc[index[0]].name\n\n for this_movie in [m for m in movie_actor_list if m['movie'] == movie_title]:\n for g in this_movie[\"genres\"]:\n comp2_genre_map[g] = comp2_genre_map.get(g, 0) + 1\n for a in [actor_id_to_name_map[actor] for actor in this_movie[\"actors\"]]:\n comp2_actor_map[a] = comp2_actor_map.get(a, 0) + 1\n\n comp2_counter += 1\n \nprint(\"Movies in Component 2:\", comp2_counter)\n\nprint(\"Genres:\")\nfor g in sorted(comp2_genre_map, key=comp2_genre_map.get, reverse=True)[:10]:\n print(\"\\t\", g, comp2_genre_map[g])\nprint(\"Actors:\")\nfor a in sorted(comp2_actor_map, key=comp2_actor_map.get, reverse=True)[:10]:\n print(\"\\t\", a, comp2_actor_map[a])", "Movies in Component 2: 18069\nGenres:\n\t Drama 17274\n\t Comedy 7327\n\t Thriller 5586\n\t Action 5152\n\t Crime 3959\n\t Romance 3727\n\t Horror 3165\n\t Mystery 2093\n\t Adventure 2032\n\t Sci-Fi 1190\nActors:\n\t Joe Hammerstone 91\n\t Tony Devon 85\n\t Louis Koo 85\n\t Brahmanandam 79\n\t Nicolas Cage 69\n\t James Franco 68\n\t Prakash Raj 65\n\t Samuel L. Jackson 63\n\t Willem Dafoe 63\n\t Simon Yam 62\n" ] ], [ [ "## Find Similar Movies in Reduced Dimensional Space", "_____no_output_____" ] ], [ [ "query_idx = [idx for idx,m in enumerate(reduced_df.index) if m == \"The Lord of the Rings: The Fellowship of the Ring\"][0]\n# query_idx = [idx for idx,m in enumerate(reduced_df.index) if m == \"Heavy Metal 2000\"][0]\n# query_idx = [idx for idx,m in enumerate(reduced_df.index) if m == \"Casino Royale\"][0]\n# query_idx = [idx for idx,m in enumerate(reduced_df.index) if m == \"Star Wars: Episode II - Attack of the Clones\"][0]\nquery_idx", "_____no_output_____" ], [ "query_v = matrix_reduced[query_idx,:]", "_____no_output_____" ], [ "query_v", "_____no_output_____" ], [ "# get distances between all films and query film\ndistances = pairwise_distances(matrix_reduced, [query_v], metric='euclidean')\n", "_____no_output_____" ], [ "distances_df = pd.DataFrame(distances, columns=[\"distance\"])\nfor idx,row in distances_df.sort_values(by=\"distance\", ascending=True).head(20).iterrows():\n print(idx, reduced_df.iloc[idx].name, row[\"distance\"])", "18 The Lord of the Rings: The Fellowship of the Ring 0.0\n11487 Days and Nights 1.2209541406754873e-05\n18457 The Tomorrow War 1.2776674116332596e-05\n6383 My Own Love Song 1.3171993015743094e-05\n10102 Best Man Down 1.639756956937894e-05\n15541 Kingsglaive: Final Fantasy XV 1.8152531475848637e-05\n11126 Mystery Road 2.189655804804624e-05\n16715 The Professor 2.5540080923330028e-05\n1529 Daddy Day Care 2.589325207709699e-05\n8976 Jupiter Ascending 3.269693367234955e-05\n5179 Gulabo Sitabo 3.298438040957619e-05\n245 Chain of Fools 3.3382949677633855e-05\n11309 Guys and a Cop 4.018040821326311e-05\n13328 Raped by an Angel 5 4.018040821326311e-05\n17381 Film Fanatic 4.018040821326311e-05\n5881 Sheep Without a Shepherd 4.018040821326311e-05\n7919 Shadows 4.018040821326311e-05\n8868 Fox Ghost 4.018040821326311e-05\n10860 Tortured Sex Goddess of Ming Dynasty 4.018040821326311e-05\n18485 Girl Dorm 4.018040821326311e-05\n" ] ], [ [ "## SVD and Column Feature Space\n\nAbove, we focused on the *movies* in the reduced feature/\"concept\" space. Here, we will use SVD to map the *actors* into the reduced \"concept\" space.", "_____no_output_____" ] ], [ [ "# See that the shape of this matrix is *reduced space* X original features\nsvd.components_.shape", "_____no_output_____" ] ], [ [ "We will use this reduced space to inspect the associations with a given actor and the concept set of concepts (i.e., the reduced space)", "_____no_output_____" ] ], [ [ "# query_actor = [idx for idx,name in actor_id_to_name_map.items() if name == \"Ewan McGregor\"][0]\n# query_actor = [idx for idx,name in actor_id_to_name_map.items() if name == \"Eric Roberts\"][0]\n# query_actor = [idx for idx,name in actor_id_to_name_map.items() if name == \"Jason Statham\"][0]\n# query_actor = [idx for idx,name in actor_id_to_name_map.items() if name == \"Leonardo DiCaprio\"][0]\nquery_actor = [idx for idx,name in actor_id_to_name_map.items() if name == \"George Clooney\"][0]\nquery_actor", "_____no_output_____" ], [ "query_actor_index = np.argwhere(reduced_df.columns == query_actor)[0,0]\nquery_actor_index", "_____no_output_____" ], [ "# Show the actor strengths across these concepts\nsvd.components_.T[query_actor_index,:] ", "_____no_output_____" ], [ "# And you can use this method to evaluate distances between actors in the concept space\ndistances = pairwise_distances(svd.components_.T, [svd.components_.T[query_actor_index,:]], metric='euclidean')\n\ndistances_df = pd.DataFrame(distances, columns=[\"distance\"])\nfor idx,row in distances_df.sort_values(by=\"distance\", ascending=True).head(20).iterrows():\n print(idx, actor_id_to_name_map[reduced_df.columns[idx]], row[\"distance\"])", "397 George Clooney 0.0\n854 Lalu Alex 2.1684959239110375e-05\n552 Steve Carell 5.765535961270728e-05\n950 Liam Neeson 8.58474608863118e-05\n494 Tanikella Bharani 0.00011361512397967896\n522 Gérard Depardieu 0.00014417276880813264\n926 Siddique 0.0001534176616126892\n888 Shawn Yue 0.00015625326486224262\n584 Kunchacko Boban 0.00022063353354574957\n22 Guy Pearce 0.00024232499554002016\n794 Robert Downey Jr. 0.0002447877571351057\n12 Paul Giamatti 0.00028857145979086976\n0 Tony Ka Fai Leung 0.00029564349774853754\n900 Mark Wahlberg 0.00029966525624988997\n521 Nassar 0.0003054147725516829\n9 Christian Bale 0.00030644785930321463\n234 Danny Glover 0.000533473514063691\n737 Morgan Freeman 0.0005355495134715015\n209 Luke Wilson 0.0005536288382481004\n236 Jagapathi Babu 0.000554439235632182\n" ] ], [ [ "## SVD is more scalable than PCA", "_____no_output_____" ] ], [ [ "from sklearn.decomposition import PCA", "_____no_output_____" ], [ "matrix_sparse.shape", "_____no_output_____" ], [ "# This will fail\npca = PCA(n_components=2)\npca.fit(matrix_sparse)", "_____no_output_____" ], [ "svd = TruncatedSVD(n_components=2)\nsvd.fit(matrix_sparse)", "_____no_output_____" ], [ "matrix_reduced = svd.transform(matrix_sparse)", "_____no_output_____" ], [ "print(np.mean(matrix_reduced, axis=0))\nplt.scatter(matrix_reduced[:,0], matrix_reduced[:,1])", "[0.00230072 0.00068161]\n" ], [ "comp1_genre_map = {}\ncomp1_actor_map = {}\n\ncomp1_counter = 0\nfor index in np.argwhere((matrix_reduced[:,0] > 1.0) & (matrix_reduced[:,1] < 0.2)):\n movie_title = df.iloc[index[0]].name\n for this_movie in [m for m in movie_actor_list if m['movie'] == movie_title]:\n for g in this_movie[\"genres\"]:\n comp1_genre_map[g] = comp1_genre_map.get(g, 0) + 1\n for a in [actor_id_to_name_map[actor] for actor in this_movie[\"actors\"]]:\n comp1_actor_map[a] = comp1_actor_map.get(a, 0) + 1\n\n comp1_counter += 1\n \nprint(\"Movies in Component 1:\", comp1_counter)\n\nprint(\"Genres:\")\nfor g in sorted(comp1_genre_map, key=comp1_genre_map.get, reverse=True)[:10]:\n print(\"\\t\", g, comp1_genre_map[g])\nprint(\"Actors:\")\nfor a in sorted(comp1_actor_map, key=comp1_actor_map.get, reverse=True)[:10]:\n print(\"\\t\", a, comp1_actor_map[a])", "Movies in Component 1: 100\nGenres:\n\t Drama 72\n\t Thriller 49\n\t Action 39\n\t Horror 31\n\t Crime 27\n\t Mystery 17\n\t Comedy 13\n\t Sci-Fi 8\n\t Romance 8\n\t Adventure 7\nActors:\n\t Eric Roberts 100\n\t Michael Madsen 5\n\t Tom Sizemore 5\n\t Vernon Wells 4\n\t Armand Assante 4\n\t Martin Kove 4\n\t Danny Trejo 3\n\t Gary Daniels 3\n\t Dean Cain 2\n\t Casper Van Dien 2\n" ], [ "comp2_genre_map = {}\ncomp2_actor_map = {}\n\ncomp2_counter = 0\nfor index in np.argwhere((matrix_reduced[:,0] < 0.1) & (matrix_reduced[:,1] < 0.1)):\n movie_title = df.iloc[index[0]].name\n\n for this_movie in [m for m in movie_actor_list if m['movie'] == movie_title]:\n for g in this_movie[\"genres\"]:\n comp2_genre_map[g] = comp2_genre_map.get(g, 0) + 1\n for a in [actor_id_to_name_map[actor] for actor in this_movie[\"actors\"]]:\n comp2_actor_map[a] = comp2_actor_map.get(a, 0) + 1\n\n comp2_counter += 1\n \nprint(\"Movies in Component 2:\", comp2_counter)\n\nprint(\"Genres:\")\nfor g in sorted(comp2_genre_map, key=comp2_genre_map.get, reverse=True)[:10]:\n print(\"\\t\", g, comp2_genre_map[g])\nprint(\"Actors:\")\nfor a in sorted(comp2_actor_map, key=comp2_actor_map.get, reverse=True)[:10]:\n print(\"\\t\", a, comp2_actor_map[a])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0a77c756e07d3d2ba8247bbd6f89297651de472
54,646
ipynb
Jupyter Notebook
docs/notebooks/04 Data -- Natural Language.ipynb
orabhan/twip
5c0411d2acfbe5b421841072814c9152591c03f7
[ "MIT" ]
19
2015-12-05T00:57:45.000Z
2020-01-08T10:38:45.000Z
docs/notebooks/04 Data -- Natural Language.ipynb
orabhan/twip
5c0411d2acfbe5b421841072814c9152591c03f7
[ "MIT" ]
null
null
null
docs/notebooks/04 Data -- Natural Language.ipynb
orabhan/twip
5c0411d2acfbe5b421841072814c9152591c03f7
[ "MIT" ]
21
2016-04-24T15:12:25.000Z
2021-04-24T17:16:22.000Z
45.95963
173
0.311496
[ [ [ "from setup import *\nimport sys\nif DATA_PATH not in sys.path: sys.path.append(DATA_PATH)\nfrom constants import *\n\n%matplotlib inline\ndisplay(HTML(\"<style>.container { width:100% !important; }</style>\"))\npd.set_option('display.max_rows', 4)\npd.set_option('display.max_columns', 200)\n", "_____no_output_____" ] ], [ [ "Let's look at some tweets", "_____no_output_____" ] ], [ [ "print('Loading previously \"cleaned\" tweets (could take a minute or so)...')\ndf = pd.read_csv(os.path.join(DATA_PATH, 'deduped_tweets.csv.gz'), index_col='id', compression='gzip',\n quotechar='\"', quoting=pd.io.common.csv.QUOTE_NONNUMERIC, low_memory=False)\nprint('Loaded {} tweets.'.format(len(df)))\ndf", "Loading previously \"cleaned\" tweets (could take a minute or so)...\nLoaded 183070 tweets.\n" ], [ "text = df.text.iloc[:10]\nfor tweet in text:\n print()\n print(tweet)\n # print(repr(tweet))", "\n#python never stop learning what you enjoy doing. https://t.co/IH5ZSKnU8K\n\nWatching Boa vs. Python — https://t.co/Pivpk02s2A\n\nMonty Python - The silly walk https://t.co/C0Ja8UHL4t via @YouTube\n\nSenior Software Engineer Full Stack Python Django And Php Jobs #jobs #jobsearch https://t.co/EuO3Et4JIT\n\nArchitect Django Solr Platform Engineer With Python 230k Jobs in Manhattan, NY #Manhattan #NY #jobs #jobsearch https://t.co/ge0RzBDoSP\n\npeaceful rain? Python - inevitability\n\n#How to make while loops in Python? [on hold]\n#Tech #Queations\nhttps://t.co/QKwHsSbmGC\n\n#How to make while loops in Python? [on hold]\n#Tech #Internet #Programming\nhttps://t.co/CcSYhYicJ1\n\n#How to make while loops in Python? [on hold]\n#HowTo #Question #Tech\nhttps://t.co/P09bSI7dGD\n\nRT @PythonWeekly: serialplot - A python 2.7 application that plots serial data in real time. https://t.co/LqHfjxhoLd #python\n" ] ], [ [ "So Even after subtracting \"-Monty\" in our search query, there are still a lot more meanings for Python than we intended \nThis is one of the key challenges of natural language procesing, \"ambiguity\" \nThere are a lot of names for dimension reduction techniques that attempt to determing meaning (semantics) from bag of words statistics (words used near each other) \n \n- Word2Vec\n- LSI: Latent Semantic Indexing\n- PCA: Principal Component Analysis\n- SVD: Singular Value Decomposition\n\n- LDA: Linear Discriminant Analysis\n- LDA: Latent Dirichlet Allocation\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d0a78b213fbf688f5cc66e61c0ecc0149ab4332d
20,072
ipynb
Jupyter Notebook
notebooks/01-workflow-introduction.ipynb
kasiarachuta-zz/jupytercon2017-holoviews-tutorial
a799f21a96ed7e760734f22f241ecd44de621edb
[ "BSD-3-Clause" ]
null
null
null
notebooks/01-workflow-introduction.ipynb
kasiarachuta-zz/jupytercon2017-holoviews-tutorial
a799f21a96ed7e760734f22f241ecd44de621edb
[ "BSD-3-Clause" ]
null
null
null
notebooks/01-workflow-introduction.ipynb
kasiarachuta-zz/jupytercon2017-holoviews-tutorial
a799f21a96ed7e760734f22f241ecd44de621edb
[ "BSD-3-Clause" ]
1
2020-04-27T22:46:07.000Z
2020-04-27T22:46:07.000Z
34.020339
358
0.610403
[ [ [ "# 1. Workflow for building and deploying interactive dashboards", "_____no_output_____" ], [ "**Let's say you want to make it easy to explore some dataset. That is, you want to:** \n\n* Make a visualization of the data\n* Maybe add some custom widgets to see the effects of some variables\n* Then deploy the result as a web app.\n\n**You can definitely do that in Python, but you would expect to:**\n* Spend days of effort to get some initial prototype working in a Jupyter notebook\n* Work hard to tame the resulting opaque mishmash of domain-specific, widget, and plotting code\n* Start over nearly from scratch whenever you need to:\n - Deploy in a standalone server\n - Visualize different aspects of your data\n - Scale up to larger (>100K) datasets", "_____no_output_____" ], [ "# Step-by-step data-science workflow\n\nHere we'll show a simple, flexible, powerful, step-by-step workflow, explaining which open-source tools solve each of the problems involved:\n\n- Step 1: Get some data\n- Step 2: Prototype a plot in a notebook\n- Step 3: Define your domain model\n- Step 4: Get a widget-based UI for free\n- Step 5: Link your domain model to your visualization\n- Step 6: Widgets now control your interactive plots\n- Step 7: Deploy your dashboard", "_____no_output_____" ] ], [ [ "import holoviews as hv\nimport geoviews as gv\nimport param, paramnb, parambokeh\nimport dask.dataframe as dd\n\nfrom colorcet import cm\nfrom bokeh.models import WMTSTileSource\nfrom holoviews.operation.datashader import datashade\nfrom holoviews.operation import decimate\nfrom holoviews.streams import RangeXY, PlotSize", "_____no_output_____" ] ], [ [ "## Step 1: Get some data\n\n* Here we'll use a subset of the often-studied NYC Taxi dataset\n* About 12 million points of GPS locations from taxis\n* Stored in the efficient Parquet format for easy access\n* Loaded into a Dask dataframe for multi-core<br>(and if needed, out-of-core or distributed) computation", "_____no_output_____" ], [ "<div class=\"alert alert-warning\" role=\"alert\">\n <strong>Warning!</strong> If you are low on memory (less than 8 GB) load only a subset of the data by changing the line below to:\n <br>\n <code>df = dd.read_parquet('../data/nyc_taxi_hours.parq/')[:10000].persist()</code>\n</div>", "_____no_output_____" ] ], [ [ "%time df = dd.read_parquet('../data/nyc_taxi_hours.parq/').persist()\nprint(len(df))\ndf.head(2)", "_____no_output_____" ] ], [ [ "## Step 2: Prototype a plot in a notebook\n\n* A text-based representation isn't very useful for big datasets like this, so we need to build a plot\n* But we don't want to start a software project, so we use HoloViews:\n - Simple, declarative way to annotate your data for visualization\n - Large library of Elements with associated visual representation\n - Elements combine (lay out or overlay) easily\n* And we'll want live interactivity, so we'll use a Bokeh plotting extension\n* Result:", "_____no_output_____" ] ], [ [ "hv.extension('bokeh')", "_____no_output_____" ], [ "points = hv.Points(df, kdims=['pickup_x', 'pickup_y'])\ndecimate(points)", "_____no_output_____" ] ], [ [ "Here ``Points`` declares an object wrapping `df` and visualized as a scatterplot, and `decimate` limits the number of points that will be sent to the browser to avoid crashing it.\n\nAs you can see, HoloViews makes it very simple to pop up a visualization of your data, getting *something* on screen with only a few characters of typing. But it's not particularly pretty, so let's customize it a bit:", "_____no_output_____" ] ], [ [ "options = dict(width=700, height=600, xaxis=None, yaxis=None, bgcolor='black')\npoints = points.opts(plot=options)\ndecimate(points)", "_____no_output_____" ] ], [ [ "That looks a bit better, but it's still decimating the data nearly beyond recognition, so let's try using Datashader to rasterize it into a fixed-size image to send to the browser:", "_____no_output_____" ] ], [ [ "taxi_trips = datashade(points, x_sampling=1, y_sampling=1, cmap=cm['fire']).opts(plot=options)\ntaxi_trips", "_____no_output_____" ] ], [ [ "Ok, that looks good now; there's clearly lots to explore in this dataset. To put it in context, let's overlay that on a map:", "_____no_output_____" ] ], [ [ "taxi_trips = datashade(points, x_sampling=1, y_sampling=1, cmap=cm['fire']).opts(plot=options)\nwmts = WMTSTileSource(url='https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{Z}/{Y}/{X}.jpg')\ntiles = gv.WMTS(wmts)\ntiles * taxi_trips", "_____no_output_____" ] ], [ [ "We could add lots more visual elements (laying out additional plots left and right, overlaying annotations, etc.), but let's say that this is our basic visualization we'll want to share. To sum up what we've done so far, here are the complete 11 lines of code required to generate this geo-located interactive plot of millions of datapoints in Jupyter:", "_____no_output_____" ], [ "```\nimport holoviews as hv, geoviews as gv, dask.dataframe as dd\nfrom colorcet import cm\nfrom holoviews.operation.datashader import datashade\nfrom bokeh.models import WMTSTileSource\n\nhv.extension('bokeh')\ndf = dd.read_parquet('../data/nyc_taxi_hours.parq/').persist()\noptions = dict(width=700, height=600, xaxis=None, yaxis=None, bgcolor='black')\npoints = hv.Points(df, kdims=['pickup_x', 'pickup_y'])\ntaxi_trips = datashade(points, x_sampling=1, y_sampling=1, cmap=cm['fire']).opts(plot=options)\nwmts = WMTSTileSource(url='https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{Z}/{Y}/{X}.jpg')\ngv.WMTS(wmts) * taxi_trips\n```", "_____no_output_____" ], [ "## Step 3: Define your domain model\n\nNow that we've prototyped a nice plot, we could keep editing the code above to explore whatever aspects of the data we wished. However, often at this point we will want to start sharing our workflow with people who aren't familar with how to program visualizations in this way. \n\nSo the next step: figure out what we want our intended user to be able to change, and declare those variables or parameters with:\n\n - type and range checking\n - documentation strings\n - default values\n \nThe Param library allows declaring Python attributes having these features (and more, such as dynamic values and inheritance), letting you set up a well-defined space for a user (or you!) to explore.", "_____no_output_____" ], [ "## NYC Taxi Parameters", "_____no_output_____" ] ], [ [ "class NYCTaxiExplorer(hv.streams.Stream):\n alpha = param.Magnitude(default=0.75, doc=\"Alpha value for the map opacity\")\n plot = param.ObjectSelector(default=\"pickup\", objects=[\"pickup\",\"dropoff\"])\n colormap = param.ObjectSelector(default=cm[\"fire\"], objects=cm.values())\n passengers = param.Range(default=(0, 10), bounds=(0, 10), doc=\"\"\"\n Filter for taxi trips by number of passengers\"\"\")", "_____no_output_____" ] ], [ [ "Each Parameter is a normal Python attribute, but with special checks and functions run automatically when getting or setting.\n\nParameters capture your goals and your knowledge about your domain, declaratively.", "_____no_output_____" ], [ "### Class level parameters", "_____no_output_____" ] ], [ [ "NYCTaxiExplorer.alpha", "_____no_output_____" ], [ "NYCTaxiExplorer.alpha = 0.5\nNYCTaxiExplorer.alpha", "_____no_output_____" ] ], [ [ "### Validation", "_____no_output_____" ] ], [ [ "try:\n NYCTaxiExplorer.alpha = '0'\nexcept Exception as e:\n print(e) ", "_____no_output_____" ] ], [ [ "### Instance parameters", "_____no_output_____" ] ], [ [ "explorer = NYCTaxiExplorer(alpha=0.6)\nexplorer.alpha", "_____no_output_____" ], [ "NYCTaxiExplorer.alpha", "_____no_output_____" ] ], [ [ "## Step 4: Get a widget-based UI for free\n\n* Parameters are purely declarative and independent of any widget toolkit, but contain all the information needed to build interactive widgets\n* ParamNB generates UIs in Jupyter from Parameters, using ipywidgets", "_____no_output_____" ] ], [ [ "paramnb.Widgets(NYCTaxiExplorer)", "_____no_output_____" ], [ "NYCTaxiExplorer.passengers", "_____no_output_____" ] ], [ [ "* ipywidgets work with Jupyter Dashboards Server for deployment", "_____no_output_____" ], [ "* Declaration of parameters is independent of the UI library used\n* ParamBokeh generates UIs from the same Parameters, using Bokeh widgets, either in Jupyter or in Bokeh Server", "_____no_output_____" ] ], [ [ "parambokeh.Widgets(NYCTaxiExplorer)", "_____no_output_____" ] ], [ [ "## Step 5: Link your domain model to your visualization\n\nWe've now defined the space that's available for exploration, and the next step is to link up the parameter space with the code that specifies the plot:", "_____no_output_____" ] ], [ [ "class NYCTaxiExplorer(hv.streams.Stream):\n alpha = param.Magnitude(default=0.75, doc=\"Alpha value for the map opacity\")\n colormap = param.ObjectSelector(default=cm[\"fire\"], objects=cm.values())\n plot = param.ObjectSelector(default=\"pickup\", objects=[\"pickup\",\"dropoff\"])\n passengers = param.Range(default=(0, 10), bounds=(0, 10))\n\n def make_view(self, x_range=None, y_range=None, **kwargs):\n map_tiles = tiles.opts(style=dict(alpha=self.alpha), plot=options) \n\n points = hv.Points(df, kdims=[self.plot+'_x', self.plot+'_y'], vdims=['passenger_count'])\n selected = points.select(passenger_count=self.passengers)\n taxi_trips = datashade(selected, x_sampling=1, y_sampling=1, cmap=self.colormap,\n dynamic=False, x_range=x_range, y_range=y_range,\n width=800, height=475)\n return map_tiles * taxi_trips", "_____no_output_____" ] ], [ [ "Note that the `NYCTaxiExplorer` class is entirely declarative (no widgets), and can be used \"by hand\" to provide range-checked and type-checked plotting for values from the declared parameter space:", "_____no_output_____" ] ], [ [ "explorer = NYCTaxiExplorer(alpha=0.2, plot=\"dropoff\")\nexplorer.make_view()", "_____no_output_____" ] ], [ [ "## Step 6: Widgets now control your interactive plots\n\nBut in practice, why not pop up the widgets to make it fully interactive?", "_____no_output_____" ] ], [ [ "explorer = NYCTaxiExplorer()\nparamnb.Widgets(explorer, callback=explorer.event)\nhv.DynamicMap(explorer.make_view, streams=[explorer, RangeXY()])", "_____no_output_____" ], [ "explorer = NYCTaxiExplorer()\nparambokeh.Widgets(explorer, callback=explorer.event)\nhv.DynamicMap(explorer.make_view, streams=[explorer, RangeXY()])", "_____no_output_____" ] ], [ [ "## Step 7: Deploy your dashboard\n\nOk, now you've got something worth sharing, running inside Jupyter. But if you want to share your work with people who don't use Python, you'll now want to run a server with this same code.\n\n* If you used **ParamBokeh**, deploy with **Bokeh Server**:\n - Write the above code to a file ``nyc_parambokeh.py``\n - Add ``, mode='server'`` to the ``Widgets()`` call to declare which object should be served\n - ``bokeh serve nyc_parambokeh.py``", "_____no_output_____" ], [ "* If you used **ParamNB**, deploy with **Jupyter Dashboard Server**:\n - Use [Jupyter Dashboards Extension](https://github.com/jupyter/dashboards) to select cells from the notebook to display\n - Use preview mode to see layout\n - Use [Jupyter Dashboards Server](https://github.com/jupyter-incubator/dashboards_server) to deploy\n - Note various caveats below", "_____no_output_____" ], [ "# Complete dashboard code\n\n```\nimport holoviews as hv, geoviews as gv, param, parambokeh, dask.dataframe as dd\n\nfrom colorcet import cm\nfrom bokeh.models import WMTSTileSource\nfrom holoviews.operation.datashader import datashade\nfrom holoviews.streams import RangeXY, PlotSize\n\nhv.extension('bokeh')\n\ndf = dd.read_parquet('./data/nyc_taxi.parq/').persist()\nurl='https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{Z}/{Y}/{X}.jpg'\ntiles = gv.WMTS(WMTSTileSource(url=url))\ntile_options = dict(width=800,height=475,xaxis=None,yaxis=None,bgcolor='black',show_grid=False)\n\npassenger_counts = (0, df.passenger_count.max().compute()+1)\n\nclass NYCTaxiExplorer(hv.streams.Stream):\n alpha = param.Magnitude(default=0.75, doc=\"Alpha value for the map opacity\")\n colormap = param.ObjectSelector(default=cm[\"fire\"], objects=[cm[k] for k in cm.keys() if not '_' in k])\n plot = param.ObjectSelector(default=\"pickup\", objects=[\"pickup\",\"dropoff\"])\n passengers = param.Range(default=passenger_counts, bounds=passenger_counts)\n output = parambokeh.view.Plot()\n\n def make_view(self, x_range, y_range, alpha, colormap, plot, passengers, **kwargs):\n map_tiles = tiles(style=dict(alpha=alpha), plot=tile_options)\n points = hv.Points(df, kdims=[plot+'_x', plot+'_y'], vdims=['passenger_count'])\n if passengers != passenger_counts: points = points.select(passenger_count=passengers)\n taxi_trips = datashade(points, x_sampling=1, y_sampling=1, cmap=colormap,\n dynamic=False, x_range=x_range, y_range=y_range)\n return map_tiles * taxi_trips\n\nselector = NYCTaxiExplorer(name=\"NYC Taxi Trips\")\nselector.output = hv.DynamicMap(selector.make_view, streams=[selector, RangeXY(), PlotSize()])\n\nparambokeh.Widgets(selector, view_position='right', callback=selector.event, mode='server')\n```", "_____no_output_____" ], [ "# Branching out\n\nThe other sections in this tutorial will expand on steps in this workflow, providing more step-by-step instructions for each of the major tasks. These techniques can create much more ambitious apps with very little additional code or effort:\n\n* Adding additional linked or separate subplots of any type; see [2 - Annotating your data](./02-annotating-data.ipynb) and [4 - Exploration with containers](./04-exploration-with-containers.ipynb).\n* Declaring code that runs for clicking or selecting *within* the Bokeh plot; see [7 - Custom interactivity](./07-custom-interactivity.ipynb).\n* Using multiple sets of widgets of many different types; see [ParamNB](https://github.com/ioam/paramnb) and [ParamBokeh](https://github.com/ioam/parambokeh).\n* Using datasets too big for any one machine, with [Dask.Distributed](https://distributed.readthedocs.io).", "_____no_output_____" ], [ "# Future work\n\n* Jupyter Dashboards Server not currently maintained; requires older ipywidgets version\n* Bokeh Server is mature and well supported, but does not currently support drag-and-drop layout like Jupyter Dashboards does\n* ParamBokeh and ParamNB still need some polishing and work to make them ready for widespread use\n* E.g. ParamNB and ParamBokeh should provide more flexible widget layouts", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0a79f7139a2f272db7fec15dfecef43a2691879
4,269
ipynb
Jupyter Notebook
Newbie Bot.ipynb
andrewmaurer/newbie-bot
d6227d14b2bdf256dd21734c2e5a393e651bf928
[ "MIT" ]
null
null
null
Newbie Bot.ipynb
andrewmaurer/newbie-bot
d6227d14b2bdf256dd21734c2e5a393e651bf928
[ "MIT" ]
null
null
null
Newbie Bot.ipynb
andrewmaurer/newbie-bot
d6227d14b2bdf256dd21734c2e5a393e651bf928
[ "MIT" ]
null
null
null
33.880952
891
0.596861
[ [ [ "# Newbie Bot\n\nScrape subreddits and use NLP to answer frequently asked questions.", "_____no_output_____" ] ], [ [ "import sys\nimport os\n\nimport numpy as np\nimport pandas as pd\n\nimport joblib\nfrom joblib import dump, load\n\npackages = [joblib, np, pandas]\n\nprint(f'python: {sys.version.spl}')\nfor package in packages:\n print(f'{package.__name__}: {package.__version__}')", "_____no_output_____" ] ], [ [ "# Scrape Reddit Data", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ] ]
d0a7b4b4665ac20355eb111218f96fb5109399d5
62,424
ipynb
Jupyter Notebook
examples/models/sklearn_spacy_text/sklearn_spacy_text_classifier_example.ipynb
markusgay/seldon-core
b9ebfdfd63e5f7b23311b81ba78e36aa08e87640
[ "Apache-2.0" ]
null
null
null
examples/models/sklearn_spacy_text/sklearn_spacy_text_classifier_example.ipynb
markusgay/seldon-core
b9ebfdfd63e5f7b23311b81ba78e36aa08e87640
[ "Apache-2.0" ]
null
null
null
examples/models/sklearn_spacy_text/sklearn_spacy_text_classifier_example.ipynb
markusgay/seldon-core
b9ebfdfd63e5f7b23311b81ba78e36aa08e87640
[ "Apache-2.0" ]
null
null
null
45.202028
6,108
0.611191
[ [ [ "# SKLearn Spacy Reddit Text Classification Example\n\nIn this example we will be buiding a text classifier using the reddit content moderation dataset.\n\nFor this, we will be using SpaCy for the word tokenization and lemmatization. \n\nThe classification will be done with a Logistic Regression binary classifier.\n\nThe steps in this tutorial include:\n\n1) Train and build your NLP model\n\n2) Build your containerized model\n\n3) Test your model as a docker container\n\n4) Run Seldon in your kubernetes cluster\n\n5) Deploy your model with Seldon\n\n6) Interact with your model through API\n\n7) Clean your environment\n\n\n### Before you start\nMake sure you install the following dependencies, as they are critical for this example to work:\n\n* Helm v2.13.1+\n* A Kubernetes cluster running v1.13 or above (minkube / docker-for-windows work well if enough RAM)\n* kubectl v1.14+\n* Python 3.6+\n* Python DEV requirements (we'll install them below)\n\nLet's get started! 🚀🔥\n\n## 1) Train and build your NLP model", "_____no_output_____" ] ], [ [ "# Let's first install any dependencies\n!pip install -r requirements.txt", "_____no_output_____" ], [ "import pandas as pd \nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom seldon_core.seldon_client import SeldonClient\nimport dill\nimport sys, os\n\n# This import may take a while as it will download the Spacy ENGLISH model\nfrom ml_utils import CleanTextTransformer, SpacyTokenTransformer", "_____no_output_____" ], [ "df_cols = [\"prev_idx\", \"parent_idx\", \"body\", \"removed\"]\n\nTEXT_COLUMN = \"body\" \nCLEAN_COLUMN = \"clean_body\"\nTOKEN_COLUMN = \"token_body\"\n\n# Downloading the 50k reddit dataset of moderated comments\ndf = pd.read_csv(\"https://raw.githubusercontent.com/axsauze/reddit-classification-exploration/master/data/reddit_train.csv\", \n names=df_cols, skiprows=1, encoding=\"ISO-8859-1\")\n\ndf.head()", "_____no_output_____" ], [ "# Let's see how many examples we have of each class\ndf[\"removed\"].value_counts().plot.bar()", "_____no_output_____" ], [ "x = df[\"body\"].values\ny = df[\"removed\"].values\nx_train, x_test, y_train, y_test = train_test_split(\n x, y, \n stratify=y, \n random_state=42, \n test_size=0.1, shuffle=True)", "_____no_output_____" ], [ "# Clean the text\nclean_text_transformer = CleanTextTransformer()\nx_train_clean = clean_text_transformer.transform(x_train)", "_____no_output_____" ], [ "# Tokenize the text and get the lemmas\nspacy_tokenizer = SpacyTokenTransformer()\nx_train_tokenized = spacy_tokenizer.transform(x_train_clean)", "_____no_output_____" ], [ "# Build tfidf vectorizer\ntfidf_vectorizer = TfidfVectorizer(\n max_features=10000,\n preprocessor=lambda x: x, \n tokenizer=lambda x: x, \n token_pattern=None,\n ngram_range=(1, 3))\n\ntfidf_vectorizer.fit(x_train_tokenized)", "_____no_output_____" ], [ "# Transform our tokens to tfidf vectors\nx_train_tfidf = tfidf_vectorizer.transform(\n x_train_tokenized)", "_____no_output_____" ], [ "# Train logistic regression classifier\nlr = LogisticRegression(C=0.1, solver='sag')\nlr.fit(x_train_tfidf, y_train)", "_____no_output_____" ], [ "# These are the models we'll deploy\nwith open('tfidf_vectorizer.model', 'wb') as model_file:\n dill.dump(tfidf_vectorizer, model_file)\nwith open('lr.model', 'wb') as model_file:\n dill.dump(lr, model_file)", "_____no_output_____" ] ], [ [ "## 2) Build your containerized model", "_____no_output_____" ] ], [ [ "# This is the class we will use to deploy\n!cat RedditClassifier.py", "import dill\r\n\r\nfrom ml_utils import CleanTextTransformer, SpacyTokenTransformer\r\n\r\nclass RedditClassifier(object):\r\n def __init__(self):\r\n \r\n self._clean_text_transformer = CleanTextTransformer()\r\n self._spacy_tokenizer = SpacyTokenTransformer()\r\n \r\n with open('tfidf_vectorizer.model', 'rb') as model_file:\r\n self._tfidf_vectorizer = dill.load(model_file)\r\n \r\n with open('lr.model', 'rb') as model_file:\r\n self._lr_model = dill.load(model_file)\r\n\r\n def predict(self, X, feature_names):\r\n clean_text = self._clean_text_transformer.transform(X)\r\n spacy_tokens = self._spacy_tokenizer.transform(clean_text)\r\n tfidf_features = self._tfidf_vectorizer.transform(spacy_tokens)\r\n predictions = self._lr_model.predict_proba(tfidf_features)\r\n return predictions\r\n\r\n" ], [ "# test that our model works\nfrom RedditClassifier import RedditClassifier\n# With one sample\nsample = x_test[0:1]\nprint(sample)\nprint(RedditClassifier().predict(sample, [\"feature_name\"]))", "['This is the study that the article is based on:\\r\\n\\r\\nhttps://www.nature.com/articles/nature25778.epdf']\n[[0.82767095 0.17232905]]\n" ] ], [ [ "### Create Docker Image with the S2i utility\nUsing the S2I command line interface we wrap our current model to seve it through the Seldon interface", "_____no_output_____" ] ], [ [ "# To create a docker image we need to create the .s2i folder configuration as below:\n!cat .s2i/environment", "MODEL_NAME=RedditClassifier\r\nAPI_TYPE=REST\r\nSERVICE_TYPE=MODEL\r\nPERSISTENCE=0\r\n" ], [ "# As well as a requirements.txt file with all the relevant dependencies\n!cat requirements.txt", "scipy>= 0.13.3\r\nscikit-learn>=0.18\r\nspacy==2.0.18\r\ndill==0.2.9\r\nseldon-core==0.2.7\r\n" ], [ "!s2i build . seldonio/seldon-core-s2i-python3:0.11 reddit-classifier:0.1", "---> Installing application source...\n---> Installing dependencies ...\nLooking in links: /whl\nCollecting scipy>=0.13.3 (from -r requirements.txt (line 1))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/72/4c/5f81e7264b0a7a8bd570810f48cd346ba36faedbd2ba255c873ad556de76/scipy-1.3.0-cp36-cp36m-manylinux1_x86_64.whl (25.2MB)\nCollecting scikit-learn>=0.18 (from -r requirements.txt (line 2))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/85/04/49633f490f726da6e454fddc8e938bbb5bfed2001681118d3814c219b723/scikit_learn-0.21.2-cp36-cp36m-manylinux1_x86_64.whl (6.7MB)\nCollecting spacy==2.0.18 (from -r requirements.txt (line 3))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/ae/6e/a89da6b5c83f8811e46e3a9270c1aed90e9b9ee6c60faf52b7239e5d3d69/spacy-2.0.18-cp36-cp36m-manylinux1_x86_64.whl (25.2MB)\nCollecting dill==0.2.9 (from -r requirements.txt (line 4))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/fe/42/bfe2e0857bc284cbe6a011d93f2a9ad58a22cb894461b199ae72cfef0f29/dill-0.2.9.tar.gz (150kB)\nRequirement already satisfied: seldon-core==0.2.7 in /usr/local/lib/python3.6/site-packages (from -r requirements.txt (line 5)) (0.2.7)\nRequirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.6/site-packages (from scipy>=0.13.3->-r requirements.txt (line 1)) (1.16.3)\nCollecting joblib>=0.11 (from scikit-learn>=0.18->-r requirements.txt (line 2))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/cd/c1/50a758e8247561e58cb87305b1e90b171b8c767b15b12a1734001f41d356/joblib-0.13.2-py2.py3-none-any.whl (278kB)\nCollecting murmurhash<1.1.0,>=0.28.0 (from spacy==2.0.18->-r requirements.txt (line 3))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/a6/e6/63f160a4fdf0e875d16b28f972083606d8d54f56cd30cb8929f9a1ee700e/murmurhash-1.0.2-cp36-cp36m-manylinux1_x86_64.whl\nCollecting thinc<6.13.0,>=6.12.1 (from spacy==2.0.18->-r requirements.txt (line 3))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/db/a7/46640a46fd707aeb204aa4257a70974b6a22a0204ba703164d803215776f/thinc-6.12.1-cp36-cp36m-manylinux1_x86_64.whl (1.9MB)\nCollecting plac<1.0.0,>=0.9.6 (from spacy==2.0.18->-r requirements.txt (line 3))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/9e/9b/62c60d2f5bc135d2aa1d8c8a86aaf84edb719a59c7f11a4316259e61a298/plac-0.9.6-py2.py3-none-any.whl\nCollecting ujson>=1.35 (from spacy==2.0.18->-r requirements.txt (line 3))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/16/c4/79f3409bc710559015464e5f49b9879430d8f87498ecdc335899732e5377/ujson-1.35.tar.gz (192kB)\nCollecting regex==2018.01.10 (from spacy==2.0.18->-r requirements.txt (line 3))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/76/f4/7146c3812f96fcaaf2d06ff6862582302626a59011ccb6f2833bb38d80f7/regex-2018.01.10.tar.gz (612kB)\nRequirement already satisfied: requests<3.0.0,>=2.13.0 in /usr/local/lib/python3.6/site-packages (from spacy==2.0.18->-r requirements.txt (line 3)) (2.21.0)\nCollecting preshed<2.1.0,>=2.0.1 (from spacy==2.0.18->-r requirements.txt (line 3))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/20/93/f222fb957764a283203525ef20e62008675fd0a14ffff8cc1b1490147c63/preshed-2.0.1-cp36-cp36m-manylinux1_x86_64.whl (83kB)\nCollecting cymem<2.1.0,>=2.0.2 (from spacy==2.0.18->-r requirements.txt (line 3))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/3d/61/9b0520c28eb199a4b1ca667d96dd625bba003c14c75230195f9691975f85/cymem-2.0.2-cp36-cp36m-manylinux1_x86_64.whl\nRequirement already satisfied: jaeger-client in /usr/local/lib/python3.6/site-packages (from seldon-core==0.2.7->-r requirements.txt (line 5)) (4.0.0)\nRequirement already satisfied: Flask-OpenTracing==0.2.0 in /usr/local/lib/python3.6/site-packages (from seldon-core==0.2.7->-r requirements.txt (line 5)) (0.2.0)\nRequirement already satisfied: grpcio in /usr/local/lib/python3.6/site-packages (from seldon-core==0.2.7->-r requirements.txt (line 5)) (1.20.1)\nRequirement already satisfied: tensorflow in /usr/local/lib/python3.6/site-packages (from seldon-core==0.2.7->-r requirements.txt (line 5)) (1.13.1)\nRequirement already satisfied: grpcio-opentracing in /usr/local/lib/python3.6/site-packages (from seldon-core==0.2.7->-r requirements.txt (line 5)) (1.1.4)\nRequirement already satisfied: flatbuffers in /usr/local/lib/python3.6/site-packages (from seldon-core==0.2.7->-r requirements.txt (line 5)) (1.11)\nRequirement already satisfied: pyyaml in /usr/local/lib/python3.6/site-packages (from seldon-core==0.2.7->-r requirements.txt (line 5)) (5.1)\nRequirement already satisfied: protobuf in /usr/local/lib/python3.6/site-packages (from seldon-core==0.2.7->-r requirements.txt (line 5)) (3.7.1)\nRequirement already satisfied: opentracing<2,>=1.2.2 in /usr/local/lib/python3.6/site-packages (from seldon-core==0.2.7->-r requirements.txt (line 5)) (1.3.0)\nRequirement already satisfied: flask-cors in /usr/local/lib/python3.6/site-packages (from seldon-core==0.2.7->-r requirements.txt (line 5)) (3.0.7)\nRequirement already satisfied: flask in /usr/local/lib/python3.6/site-packages (from seldon-core==0.2.7->-r requirements.txt (line 5)) (1.0.2)\nRequirement already satisfied: redis in /usr/local/lib/python3.6/site-packages (from seldon-core==0.2.7->-r requirements.txt (line 5)) (3.2.1)\nCollecting tqdm<5.0.0,>=4.10.0 (from thinc<6.13.0,>=6.12.1->spacy==2.0.18->-r requirements.txt (line 3))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/45/af/685bf3ce889ea191f3b916557f5677cc95a5e87b2fa120d74b5dd6d049d0/tqdm-4.32.1-py2.py3-none-any.whl (49kB)\nCollecting msgpack<0.6.0,>=0.5.6 (from thinc<6.13.0,>=6.12.1->spacy==2.0.18->-r requirements.txt (line 3))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/22/4e/dcf124fd97e5f5611123d6ad9f40ffd6eb979d1efdc1049e28a795672fcd/msgpack-0.5.6-cp36-cp36m-manylinux1_x86_64.whl (315kB)\nRequirement already satisfied: six<2.0.0,>=1.10.0 in /usr/local/lib/python3.6/site-packages (from thinc<6.13.0,>=6.12.1->spacy==2.0.18->-r requirements.txt (line 3)) (1.12.0)\nCollecting wrapt<1.11.0,>=1.10.0 (from thinc<6.13.0,>=6.12.1->spacy==2.0.18->-r requirements.txt (line 3))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/a0/47/66897906448185fcb77fc3c2b1bc20ed0ecca81a0f2f88eda3fc5a34fc3d/wrapt-1.10.11.tar.gz\nCollecting msgpack-numpy<0.4.4 (from thinc<6.13.0,>=6.12.1->spacy==2.0.18->-r requirements.txt (line 3))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/ad/45/464be6da85b5ca893cfcbd5de3b31a6710f636ccb8521b17bd4110a08d94/msgpack_numpy-0.4.3.2-py2.py3-none-any.whl\nCollecting cytoolz<0.10,>=0.9.0 (from thinc<6.13.0,>=6.12.1->spacy==2.0.18->-r requirements.txt (line 3))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/36/f4/9728ba01ccb2f55df9a5af029b48ba0aaca1081bbd7823ea2ee223ba7a42/cytoolz-0.9.0.1.tar.gz (443kB)\n" ] ], [ [ "## 3) Test your model as a docker container", "_____no_output_____" ] ], [ [ "# Remove previously deployed containers for this model\n!docker rm -f reddit_predictor", "Error: No such container: reddit_predictor\r\n" ], [ "!docker run --name \"reddit_predictor\" -d --rm -p 5001:5000 reddit-classifier:0.1", "be29c6a00adec0f708dc5a1c83613e0656fddc06daba4ca02d93b5a7ece9b92b\r\n" ] ], [ [ "### Make sure you wait for language model\nSpaCy will download the English language model, so you have to make sure the container finishes downloading it before it can be used. You can view this by running the logs until you see \"Linking successful\".", "_____no_output_____" ] ], [ [ "# Here we need to wait until we see \"Linking successful\", as it's downloading the Spacy English model\n# You can hit stop when this happens\n!docker logs -t -f reddit_predictor", "2019-05-27T13:50:12.739381600Z starting microservice\n2019-05-27T13:50:14.023399000Z 2019-05-27 13:50:14,023 - seldon_core.microservice:main:154 - INFO: Starting microservice.py:main\n2019-05-27T13:50:14.024836400Z 2019-05-27 13:50:14,024 - seldon_core.microservice:main:185 - INFO: Annotations: {}\n2019-05-27T13:50:14.686919400Z Collecting en_core_web_sm==2.0.0 from https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.0.0/en_core_web_sm-2.0.0.tar.gz#egg=en_core_web_sm==2.0.0\n2019-05-27T13:50:15.402484400Z Downloading https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.0.0/en_core_web_sm-2.0.0.tar.gz (37.4MB)\n2019-05-27T13:50:47.771818100Z Building wheels for collected packages: en-core-web-sm\n2019-05-27T13:50:47.772287600Z Building wheel for en-core-web-sm (setup.py): started\n2019-05-27T13:50:49.845376700Z Building wheel for en-core-web-sm (setup.py): finished with status 'done'\n2019-05-27T13:50:49.845641500Z Stored in directory: /tmp/pip-ephem-wheel-cache-wszfsf1z/wheels/54/7c/d8/f86364af8fbba7258e14adae115f18dd2c91552406edc3fdaa\n2019-05-27T13:50:50.163985100Z Successfully built en-core-web-sm\n2019-05-27T13:50:50.164057000Z Installing collected packages: en-core-web-sm\n2019-05-27T13:50:50.242852700Z Successfully installed en-core-web-sm-2.0.0\n2019-05-27T13:50:50.400850200Z WARNING: You are using pip version 19.1, however version 19.1.1 is available.\n2019-05-27T13:50:50.400901100Z You should consider upgrading via the 'pip install --upgrade pip' command.\n2019-05-27T13:50:51.728895100Z --- Logging error ---\n2019-05-27T13:50:51.728944900Z Traceback (most recent call last):\n2019-05-27T13:50:51.728954200Z File \"/usr/local/lib/python3.6/logging/__init__.py\", line 994, in emit\n2019-05-27T13:50:51.728958900Z msg = self.format(record)\n2019-05-27T13:50:51.728963000Z File \"/usr/local/lib/python3.6/logging/__init__.py\", line 840, in format\n2019-05-27T13:50:51.728966900Z return fmt.format(record)\n2019-05-27T13:50:51.728970500Z File \"/usr/local/lib/python3.6/logging/__init__.py\", line 577, in format\n2019-05-27T13:50:51.728974300Z record.message = record.getMessage()\n2019-05-27T13:50:51.728977900Z File \"/usr/local/lib/python3.6/logging/__init__.py\", line 338, in getMessage\n2019-05-27T13:50:51.728981600Z msg = msg % self.args\n2019-05-27T13:50:51.728985100Z TypeError: not all arguments converted during string formatting\n2019-05-27T13:50:51.728988800Z Call stack:\n2019-05-27T13:50:51.728992300Z File \"/usr/local/bin/seldon-core-microservice\", line 10, in <module>\n2019-05-27T13:50:51.728996500Z sys.exit(main())\n2019-05-27T13:50:51.729000000Z File \"/usr/local/lib/python3.6/site-packages/seldon_core/microservice.py\", line 189, in main\n2019-05-27T13:50:51.729004000Z logger.info(\"Importing \",args.interface_name)\n2019-05-27T13:50:51.729007800Z Message: 'Importing '\n2019-05-27T13:50:51.729011400Z Arguments: ('RedditClassifier',)\n2019-05-27T13:50:51.729025900Z /usr/local/lib/python3.6/site-packages/sklearn/base.py:306: UserWarning: Trying to unpickle estimator TfidfTransformer from version 0.20.3 when using version 0.21.2. This might lead to breaking code or invalid results. Use at your own risk.\n2019-05-27T13:50:51.729030000Z UserWarning)\n2019-05-27T13:50:51.729033400Z /usr/local/lib/python3.6/site-packages/sklearn/base.py:306: UserWarning: Trying to unpickle estimator TfidfVectorizer from version 0.20.3 when using version 0.21.2. This might lead to breaking code or invalid results. Use at your own risk.\n2019-05-27T13:50:51.729036900Z UserWarning)\n2019-05-27T13:50:51.729040100Z /usr/local/lib/python3.6/site-packages/sklearn/base.py:306: UserWarning: Trying to unpickle estimator LogisticRegression from version 0.20.3 when using version 0.21.2. This might lead to breaking code or invalid results. Use at your own risk.\n2019-05-27T13:50:51.729044000Z UserWarning)\n2019-05-27T13:50:51.729047500Z 2019-05-27 13:50:51,727 - seldon_core.microservice:main:226 - INFO: REST microservice running on port 5000\n2019-05-27T13:50:51.729051200Z 2019-05-27 13:50:51,728 - seldon_core.microservice:main:260 - INFO: Starting servers\n2019-05-27T13:50:51.730423900Z \n2019-05-27T13:50:51.730464700Z \u001b[93m Linking successful\u001b[0m\n2019-05-27T13:50:51.730473700Z /usr/local/lib/python3.6/site-packages/en_core_web_sm -->\n2019-05-27T13:50:51.730477700Z /usr/local/lib/python3.6/site-packages/spacy/data/en_core_web_sm\n2019-05-27T13:50:51.730481100Z \n2019-05-27T13:50:51.730484300Z You can now load the model via spacy.load('en_core_web_sm')\n2019-05-27T13:50:51.730487600Z \n2019-05-27T13:50:51.743475000Z * Serving Flask app \"seldon_core.wrapper\" (lazy loading)\n2019-05-27T13:50:51.743530400Z * Environment: production\n2019-05-27T13:50:51.743538900Z WARNING: Do not use the development server in a production environment.\n2019-05-27T13:50:51.743542800Z Use a production WSGI server instead.\n2019-05-27T13:50:51.743546000Z * Debug mode: off\n2019-05-27T13:50:51.760002000Z 2019-05-27 13:50:51,759 - werkzeug:_log:122 - INFO: * Running on http://0.0.0.0:5000/ (Press CTRL+C to quit)\n^C\n" ], [ "# We now test the REST endpoint expecting the same result\nendpoint = \"0.0.0.0:5001\"\nbatch = sample\npayload_type = \"ndarray\"\n\nsc = SeldonClient(microservice_endpoint=endpoint)\nresponse = sc.microservice(\n data=batch,\n method=\"predict\",\n payload_type=payload_type,\n names=[\"tfidf\"])\n\nprint(response)", "Success:True message:\nRequest:\ndata {\n names: \"tfidf\"\n ndarray {\n values {\n string_value: \"This is the study that the article is based on:\\r\\n\\r\\nhttps://www.nature.com/articles/nature25778.epdf\"\n }\n }\n}\n\nResponse:\nmeta {\n}\ndata {\n names: \"t:0\"\n names: \"t:1\"\n ndarray {\n values {\n list_value {\n values {\n number_value: 0.8276709475641506\n }\n values {\n number_value: 0.1723290524358494\n }\n }\n }\n }\n}\n\n" ], [ "# We now stop it to run it in docker\n!docker stop reddit_predictor", "reddit_predictor\r\n" ] ], [ [ "## 4) Run Seldon in your kubernetes cluster\nIn order to run Seldon we need to make sure that Helm is initialised and Tiller is running. \n\nFor this we can run the following initialisation and waiting commands.", "_____no_output_____" ] ], [ [ "# If not running you can install it\n# First initialise helm\n!kubectl create clusterrolebinding kube-system-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default\n!helm init\n!kubectl rollout status deploy/tiller-deploy -n kube-system", "clusterrolebinding.rbac.authorization.k8s.io/kube-system-cluster-admin created\n$HELM_HOME has been configured at /home/alejandro/.helm.\nWarning: Tiller is already installed in the cluster.\n(Use --client-only to suppress this message, or --upgrade to upgrade Tiller to the current version.)\nHappy Helming!\ndeployment \"tiller-deploy\" successfully rolled out\n" ] ], [ [ "Now we can install run the Seldon Operator using the latest Helm charts", "_____no_output_____" ] ], [ [ "!helm install seldon-core-operator --name seldon-core-operator --repo https://storage.googleapis.com/seldon-charts", "NAME: seldon-core-operator\nLAST DEPLOYED: Mon May 27 15:04:30 2019\nNAMESPACE: default\nSTATUS: DEPLOYED\n\nRESOURCES:\n==> v1/ClusterRole\nNAME AGE\nseldon-operator-manager-role 0s\n\n==> v1/ClusterRoleBinding\nNAME AGE\nseldon-operator-manager-rolebinding 0s\n\n==> v1/Pod(related)\nNAME READY STATUS RESTARTS AGE\nseldon-operator-controller-manager-0 0/1 ContainerCreating 0 0s\n\n==> v1/Secret\nNAME TYPE DATA AGE\nseldon-operator-webhook-server-secret Opaque 0 0s\n\n==> v1/Service\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nseldon-operator-controller-manager-service ClusterIP 10.101.147.136 <none> 443/TCP 0s\n\n==> v1/StatefulSet\nNAME READY AGE\nseldon-operator-controller-manager 0/1 0s\n\n==> v1beta1/CustomResourceDefinition\nNAME AGE\nseldondeployments.machinelearning.seldon.io 0s\n\n\nNOTES:\nNOTES: TODO\n\n\n" ] ], [ [ "And we can make sure that it is actually running with the following command ", "_____no_output_____" ] ], [ [ "!kubectl get pod | grep seldon", "seldon-operator-controller-manager-0 1/1 Running 1 12s\r\n" ] ], [ [ "In order for us to be able to reach the model, we will need to set up an ingress. For this we will use ambassador:", "_____no_output_____" ] ], [ [ "!helm install stable/ambassador --name ambassador --set crds.keep=false", "NAME: ambassador\nLAST DEPLOYED: Mon May 27 15:04:50 2019\nNAMESPACE: default\nSTATUS: DEPLOYED\n\nRESOURCES:\n==> v1/Deployment\nNAME READY UP-TO-DATE AVAILABLE AGE\nambassador 0/3 3 0 0s\n\n==> v1/Pod(related)\nNAME READY STATUS RESTARTS AGE\nambassador-7bfc87f865-jkxs8 0/1 ContainerCreating 0 0s\nambassador-7bfc87f865-nr7bn 0/1 ContainerCreating 0 0s\nambassador-7bfc87f865-q4lng 0/1 ContainerCreating 0 0s\n\n==> v1/Service\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nambassador LoadBalancer 10.101.89.32 localhost 80:30004/TCP,443:31350/TCP 0s\nambassador-admins ClusterIP 10.98.228.159 <none> 8877/TCP 0s\n\n==> v1/ServiceAccount\nNAME SECRETS AGE\nambassador 1 1s\n\n==> v1beta1/ClusterRole\nNAME AGE\nambassador 1s\n\n==> v1beta1/ClusterRoleBinding\nNAME AGE\nambassador 1s\n\n\nNOTES:\nCongratuations! You've successfully installed Ambassador.\n\nFor help, visit our Slack at https://d6e.co/slack or view the documentation online at https://www.getambassador.io.\n\nTo get the IP address of Ambassador, run the following commands:\nNOTE: It may take a few minutes for the LoadBalancer IP to be available.\n You can watch the status of by running 'kubectl get svc -w --namespace default ambassador'\n\n On GKE/Azure:\n export SERVICE_IP=$(kubectl get svc --namespace default ambassador -o jsonpath='{.status.loadBalancer.ingress[0].ip}')\n\n On AWS:\n export SERVICE_IP=$(kubectl get svc --namespace default ambassador -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')\n\n echo http://$SERVICE_IP:\n\n" ] ], [ [ "We can now see the ambassador service is running. In our case we can reach it through the external IP which is our localhost, but if you are using a cloud provider, make sure you have access to the ambassador endpoint.", "_____no_output_____" ] ], [ [ "!kubectl get svc ambassador", "NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\r\nambassador LoadBalancer 10.101.89.32 localhost 80:30004/TCP,443:31350/TCP 2m43s\r\n" ] ], [ [ "## 5) Deploy your model with Seldon\nWe can now deploy our model by using the Seldon graph definition:", "_____no_output_____" ] ], [ [ "# We'll use our seldon deployment file\n!cat reddit_clf.json", "{\r\n \"apiVersion\": \"machinelearning.seldon.io/v1alpha2\",\r\n \"kind\": \"SeldonDeployment\",\r\n \"metadata\": {\r\n \"labels\": {\r\n \"app\": \"seldon\"\r\n },\r\n \"name\": \"reddit-classifier\"\r\n },\r\n \"spec\": {\r\n \"annotations\": {\r\n \"project_name\": \"Reddit classifier\",\r\n \"deployment_version\": \"v1\"\r\n },\r\n \"name\": \"reddit-classifier\",\r\n \"oauth_key\": \"oauth-key\",\r\n \"oauth_secret\": \"oauth-secret\",\r\n \"predictors\": [\r\n {\r\n \"componentSpecs\": [{\r\n \"spec\": {\r\n \"containers\": [\r\n {\r\n \"image\": \"reddit-classifier:0.1\",\r\n \"imagePullPolicy\": \"IfNotPresent\",\r\n \"name\": \"classifier\",\r\n \"resources\": {\r\n \"requests\": {\r\n \"memory\": \"1Mi\"\r\n }\r\n }\r\n }\r\n ],\r\n \"terminationGracePeriodSeconds\": 20\r\n }\r\n }],\r\n \"graph\": {\r\n \"children\": [],\r\n \"name\": \"classifier\",\r\n \"endpoint\": {\r\n \"type\" : \"REST\"\r\n },\r\n \"type\": \"MODEL\"\r\n },\r\n \"name\": \"single-model\",\r\n \"replicas\": 1,\r\n \"annotations\": {\r\n \"predictor_version\" : \"v1\"\r\n }\r\n }\r\n ]\r\n }\r\n}\r\n" ], [ "!kubectl apply -f reddit_clf.json", "seldondeployment.machinelearning.seldon.io/reddit-classifier created\r\n" ], [ "!kubectl get pods ", "NAME READY STATUS RESTARTS AGE\r\nambassador-7bfc87f865-jkxs8 1/1 Running 0 5m2s\r\nambassador-7bfc87f865-nr7bn 1/1 Running 0 5m2s\r\nambassador-7bfc87f865-q4lng 1/1 Running 0 5m2s\r\nreddit-classifier-single-model-9199e4b-bcc5cdcc-g8j2q 2/2 Running 1 77s\r\nseldon-operator-controller-manager-0 1/1 Running 1 5m23s\r\n" ] ], [ [ "## 6) Interact with your model through API\nNow that our Seldon Deployment is live, we are able to interact with it through its API.\n\nThere are two options in which we can interact with our new model. These are:\n\na) Using CURL from the CLI (or another rest client like Postman)\n\nb) Using the Python SeldonClient\n\n#### a) Using CURL from the CLI", "_____no_output_____" ] ], [ [ "%%bash\ncurl -X POST -H 'Content-Type: application/json' \\\n -d \"{'data': {'names': ['text'], 'ndarray': ['Hello world this is a test']}}\" \\\n http://127.0.0.1/seldon/default/reddit-classifier/api/v0.1/predictions", "{\n \"meta\": {\n \"puid\": \"bvj1rjiq3vvnieo0oir4h7bf6f\",\n \"tags\": {\n },\n \"routing\": {\n },\n \"requestPath\": {\n \"classifier\": \"reddit-classifier:0.1\"\n },\n \"metrics\": []\n },\n \"data\": {\n \"names\": [\"t:0\", \"t:1\"],\n \"ndarray\": [[0.6815614604065544, 0.3184385395934456]]\n }\n}" ] ], [ [ "#### b) Using the Python SeldonClient", "_____no_output_____" ] ], [ [ "from seldon_core.seldon_client import SeldonClient\nimport numpy as np\n\nhost = \"localhost\"\nport = \"80\" # Make sure you use the port above\nbatch = np.array([\"Hello world this is a test\"])\npayload_type = \"ndarray\"\ndeployment_name=\"reddit-classifier\"\ntransport=\"rest\"\nnamespace=\"default\"\n\nsc = SeldonClient(\n gateway=\"ambassador\", \n ambassador_endpoint=host + \":\" + port,\n namespace=namespace)\n\nclient_prediction = sc.predict(\n data=batch, \n deployment_name=deployment_name,\n names=[\"text\"],\n payload_type=payload_type,\n transport=\"rest\")\n\nprint(client_prediction)", "Success:True message:\nRequest:\ndata {\n names: \"text\"\n ndarray {\n values {\n string_value: \"Hello world this is a test\"\n }\n }\n}\n\nResponse:\nmeta {\n puid: \"uld2famhfrb97vd7regu0q7k32\"\n requestPath {\n key: \"classifier\"\n value: \"reddit-classifier:0.1\"\n }\n}\ndata {\n names: \"t:0\"\n names: \"t:1\"\n ndarray {\n values {\n list_value {\n values {\n number_value: 0.6815614604065544\n }\n values {\n number_value: 0.3184385395934456\n }\n }\n }\n }\n}\n\n" ] ], [ [ "## 7) Clean your environment", "_____no_output_____" ] ], [ [ "!kubectl delete -f reddit_clf.json", "_____no_output_____" ], [ "!helm del --purge ambassador", "release \"ambassador\" deleted\r\n" ], [ "!helm del --purge seldon-core-operator", "release \"seldon-core-operator\" deleted\r\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0a7bb80e9c684e91a861206d7f2ef913c817adf
14,583
ipynb
Jupyter Notebook
Image-Segmentation/lesson3-camvid-tiramisu.ipynb
dustinvanstee/aicoc-ai-immersion
ab73cda69a6b630ef7b535ab3c52daccbdc03505
[ "Apache-2.0" ]
3
2020-10-07T16:19:23.000Z
2020-10-07T19:28:01.000Z
Image-Segmentation/lesson3-camvid-tiramisu.ipynb
dustinvanstee/aicoc-ai-immersion
ab73cda69a6b630ef7b535ab3c52daccbdc03505
[ "Apache-2.0" ]
null
null
null
Image-Segmentation/lesson3-camvid-tiramisu.ipynb
dustinvanstee/aicoc-ai-immersion
ab73cda69a6b630ef7b535ab3c52daccbdc03505
[ "Apache-2.0" ]
2
2020-10-07T18:07:33.000Z
2020-10-07T18:21:44.000Z
24.97089
1,469
0.55695
[ [ [ "## Image segmentation with CamVid", "_____no_output_____" ] ], [ [ "%reload_ext autoreload\n%autoreload 2\n%matplotlib inline", "_____no_output_____" ], [ "from fastai import *\nfrom fastai.vision import *\nfrom fastai.callbacks.hooks import *", "_____no_output_____" ] ], [ [ "The One Hundred Layer Tiramisu paper used a modified version of Camvid, with smaller images and few classes. You can get it from the CamVid directory of this repo:\n\n git clone https://github.com/alexgkendall/SegNet-Tutorial.git", "_____no_output_____" ] ], [ [ "path = Path('./data/camvid-tiramisu')", "_____no_output_____" ], [ "path.ls()", "_____no_output_____" ] ], [ [ "## Data", "_____no_output_____" ] ], [ [ "fnames = get_image_files(path/'val')\nfnames[:3]", "_____no_output_____" ], [ "lbl_names = get_image_files(path/'valannot')\nlbl_names[:3]", "_____no_output_____" ], [ "img_f = fnames[0]\nimg = open_image(img_f)\nimg.show(figsize=(5,5))", "_____no_output_____" ], [ "def get_y_fn(x): return Path(str(x.parent)+'annot')/x.name\n\ncodes = array(['Sky', 'Building', 'Pole', 'Road', 'Sidewalk', 'Tree',\n 'Sign', 'Fence', 'Car', 'Pedestrian', 'Cyclist', 'Void'])", "_____no_output_____" ], [ "mask = open_mask(get_y_fn(img_f))\nmask.show(figsize=(5,5), alpha=1)", "_____no_output_____" ], [ "src_size = np.array(mask.shape[1:])\nsrc_size,mask.data", "_____no_output_____" ] ], [ [ "## Datasets", "_____no_output_____" ] ], [ [ "bs = 8", "_____no_output_____" ], [ "src = (SegmentationItemList.from_folder(path)\n .split_by_folder(valid='val')\n .label_from_func(get_y_fn, classes=codes))", "_____no_output_____" ], [ "data = (src.transform(get_transforms(), tfm_y=True)\n .databunch(bs=bs)\n .normalize(imagenet_stats))", "_____no_output_____" ], [ "data.show_batch(2, figsize=(10,7))", "_____no_output_____" ] ], [ [ "## Model", "_____no_output_____" ] ], [ [ "name2id = {v:k for k,v in enumerate(codes)}\nvoid_code = name2id['Void']\n\ndef acc_camvid(input, target):\n target = target.squeeze(1)\n mask = target != void_code\n return (input.argmax(dim=1)[mask]==target[mask]).float().mean()", "_____no_output_____" ], [ "metrics=acc_camvid\nwd=1e-2", "_____no_output_____" ], [ "learn = unet_learner(data, models.resnet34, metrics=metrics, wd=wd, bottle=True)", "_____no_output_____" ], [ "lr_find(learn)\nlearn.recorder.plot()", "_____no_output_____" ], [ "lr=2e-3", "_____no_output_____" ], [ "learn.fit_one_cycle(10, slice(lr), pct_start=0.8)", "_____no_output_____" ], [ "learn.save('stage-1')", "_____no_output_____" ], [ "learn.load('stage-1');", "_____no_output_____" ], [ "learn.unfreeze()", "_____no_output_____" ], [ "lrs = slice(lr/100,lr)", "_____no_output_____" ], [ "learn.fit_one_cycle(12, lrs, pct_start=0.8)", "_____no_output_____" ], [ "learn.save('stage-2');", "_____no_output_____" ] ], [ [ "## Go big", "_____no_output_____" ] ], [ [ "learn=None\ngc.collect()", "_____no_output_____" ] ], [ [ "You may have to restart your kernel and come back to this stage if you run out of memory, and may also need to decrease `bs`.", "_____no_output_____" ] ], [ [ "size = src_size\nbs=8", "_____no_output_____" ], [ "data = (src.transform(get_transforms(), size=size, tfm_y=True)\n .databunch(bs=bs)\n .normalize(imagenet_stats))", "_____no_output_____" ], [ "learn = unet_learner(data, models.resnet34, metrics=metrics, wd=wd, bottle=True).load('stage-2');", "_____no_output_____" ], [ "lr_find(learn)\nlearn.recorder.plot()", "_____no_output_____" ], [ "lr=1e-3", "_____no_output_____" ], [ "learn.fit_one_cycle(10, slice(lr), pct_start=0.8)", "_____no_output_____" ], [ "learn.save('stage-1-big')", "_____no_output_____" ], [ "learn.load('stage-1-big');", "_____no_output_____" ], [ "learn.unfreeze()", "_____no_output_____" ], [ "lrs = slice(lr/1000,lr/10)", "_____no_output_____" ], [ "learn.fit_one_cycle(10, lrs)", "_____no_output_____" ], [ "learn.save('stage-2-big')", "_____no_output_____" ], [ "learn.load('stage-2-big');", "_____no_output_____" ], [ "learn.show_results(rows=3, figsize=(9,11))", "_____no_output_____" ] ], [ [ "## fin", "_____no_output_____" ] ], [ [ "# start: 480x360", "_____no_output_____" ], [ "print(learn.summary())", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0a7c6941f73b7c3ce4006b5587d3236b989fe4a
23,686
ipynb
Jupyter Notebook
lab2_grupo4.ipynb
StifhBarrage/Porgramacion_2022-hbarraganl
e61602739f2897a821e2cad98f9b9c0a74e220ba
[ "MIT" ]
null
null
null
lab2_grupo4.ipynb
StifhBarrage/Porgramacion_2022-hbarraganl
e61602739f2897a821e2cad98f9b9c0a74e220ba
[ "MIT" ]
null
null
null
lab2_grupo4.ipynb
StifhBarrage/Porgramacion_2022-hbarraganl
e61602739f2897a821e2cad98f9b9c0a74e220ba
[ "MIT" ]
null
null
null
50.828326
6,754
0.669974
[ [ [ "##Laboratorio 2\n#cumplir con cada uno de los 9 retos en sus grupos de trabajo\n#y subir el Colab a el repositorio de un seleccionado,\nfor: 30/05/2022 12:59", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "##1. Define a procedure histogram () that takes a list of whole numbers and prints a histogram on the screen. Example: procedure ([4, 9, 7]) should print the following:\n\n#****\n#*********\n#*******", "_____no_output_____" ] ], [ [ "def histogram(lista):\n for i in lista:\n if i != lista[0]: \n print((i-lista[0])*'*')\n else:\n print(lista[0]*'*')\n\nhistogram([4,9,7])\n# profe esto fue lo que yo entendi ya que en el ejemplo lo mostraba de esa forma,\n# si se puede corregir lo corrijo, gracias!!", "****\n*****\n***\n" ] ], [ [ "#2. Write a longer_long () function that takes a list of words and returns the longest.", "_____no_output_____" ] ], [ [ "def longer_long(lista):\n la_mas_larga = 0\n index_del_mas_largo = 0 \n for i in list(enumerate(lista)):\n if len(i[1]) > la_mas_larga:\n la_mas_larga = len(i[1])\n index_del_mas_largo = i[0]\n print(lista[index_del_mas_largo])\n\nlonger_long(['uno', 'dos', 'tres', 'larga', 'cuatro', 'demasiado', 'cinco'])", "demasiado\n" ] ], [ [ "#3. Write a program that tells the user to enter a string. The program has to evaluate the string and say how many capital letters it has.", "_____no_output_____" ] ], [ [ "def filt_words(string):\n num_of_capital = 0\n for i in string:\n if i.isupper() == True:\n num_of_capital = num_of_capital + 1\n return f'This string have {num_of_capital} capital letters' \nfilt_words(str(input('Write a string for be processed: ')))\n", "Write a string for be processed: Hola mundo \n" ] ], [ [ "#4.Build a small program that converts binary numbers to integers. Example:\n#![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAALsAAABTCAYAAADHh3aFAAAF/UlEQVR4Ae2aa47cOAyE+xJzgPmzx9p77HlyyPnZCwb4ggohWZKt9rMCNPim6FKZPUnm9fPz8/bHGDyBA68nPKSf0S9zcMBk9zfbY77ZTXaT3WT3V7+/+u/GAW92b3Zv9ru91X4ef1N5s3uze7OfbRP++vp6x+dsc/XOc/X5e5/zzHmX2exLZCGGrAEe8Vos/GvjnIssnbEUK+XbN//HrtuQHXLUCNsi25a4nqk6M4Vs9ddc6/OJHpg2yf56vX5vw5DoehnZpzY1Jak9tuglcmUfNjKfhx85Ei/VlHy5p+3PEHoJ1y6yZwJrQ42FX21Ijp8YUvus1TOxws4+etf8W+K559L5nGO5P9ED8y6y6+Vkoi7ZGlvSI5Y/euaSDtmUZPhyXc1P3po4NSFVp6flMcQu4X442UtDjfiUZNRBOmxkzb8lPnI+51ge8wIcTva80bF7CVEicMkX/Wp+zloTL9WUfJxheQzRA/epZM9EDZvLrenEt0gll+q551IsctfGtU71fL7t44ge2G8m++8m8i81NVLX/DMIEATTT+6pMXTNwadyS1xrrR9LcMW/SXZNtn6ei/NdjN+Fye7fjfnzo+bdXyCT3WQ32e/+lvv5xn8MuDpm3uze7N7sV3+LPf/zNnfrzr3Zvdm92VtviePenFfjgDe7N7s3+8y39r9//3nHZ2bPWi/9n9pazt7+mOlTc9G7t39v3iyMWue14rPmiD67bPatZO99UdYAFzX6mQkuveiP/QnZ8+w9OUfMttdcu5B9K4A9ZF8LWK7L9tbZ96pvzd2Kf3rO1vmt+Iz5mmSHaGxnbA7HJo4fiZ88/Ej85OEPiS9LzUFvgRXxUk6Pj1okZyLxI/GHxBdS/TmmeUu5uQd2qYYYZ6mNTh3n40cSL9nUlCT5yNwHP7IVJ2+L7CI7hIyDIB6HlmxiKrVH9mtMdfJKPmIhW0BpXPVareaEvmTXeuh8tRztnXWtz+drDF1z8CFbMY2rHvVLdsSIZ52zVZKrPtVbcc1do3eRPTdW8qkeedmmttdfyiv56BtyBKScm+3cb01cZ0Mf6VPKXeqTZyYXOdIv5y7ZGqvpzNCasSeuvdbo08leG6JG2OzPdvQr+fQcBVr96BHXD/6QpVr1aZ3q9NBcfCVZylNfTWfGiPPp7U+e9saHzLERW3NrOueE1Bz1o7fi5K2Vtyd7BrBlB5Cao3oJ5FacmlKe+nr0PBu9l/yjMZ2jVKvxHr13xtJZWjtDHyZ73rLZrg1Vy8v+bEe/kk/PUdDVH7rGQlc7x2t2rtEzSj01jl7qob5eXfPoHbLmH43lPmqHnm1mqPmJt+boiWuvNXoX2YNsfPIhLSJSlyV9cn22NY8e+FQq2OoPnUtCahwfUmPoxFQSC6n+0Jdi5FJHrtapTh51rRh59EXmupq/lKc9Nd6j187Bj9Re+GbLLrLPPvQT/fYA6xNz79XzaHxa57fiM3C6DdkDjD0AmwH6UT2Owqd1bis+C69bkd2Eb/8m5l7EgqCt81px+syQTbLPOMQ92iQ0Rp/HyGT3r/j+9RfqO790JrvJbrLf+Q33s33+R4YzYuzN7s3uzX7GN9MzPXMjz7p3b3Zvdm/2WW+T+3gbn4UD3uze7N7sZ3kbPYe/GWZxwJvdm92bfdbb5D7ezGfhwOvtP0bgIQiY7A+5aD/m+22ymwWPQcBkf8xV+0FNdnPgMQiY7I+5aj+oyW4OPAYBk/0xV+0HNdnNgccgYLI/5qr9oCa7OfAYBEz2x1y1H7Sb7N/f31W0IrYUrxY6YAR2RKCL7C0yt+I7Po+PMgJVBJpkZ2Mjcyf8yBy3bQTOgkCT7AxaInP48CPJtzQCZ0NgM9l5IJMdJCzPisBqsmdyZ/usD+y5novAJrIHwUuf58LpJz8zAqvJnh/Kmz0jYvtsCDTJ3ru5TfazXa3nyQg0yZ4LbBuBqyJgsl/15jz3MAIm+zBkLrgqAib7VW/Ocw8jYLIPQ+aCqyJgsl/15jz3MAIm+zBkLrgqAib7VW/Ocw8jYLIPQ+aCqyJgsl/15jz3MAL/A3KAUM/EuUhhAAAAAElFTkSuQmCC)", "_____no_output_____" ] ], [ [ "def aDecimal(num_bin):\n num_deci = 0 \n for position, digit_string in enumerate(num_bin[::-1]):\n num_deci += int(digit_string) * 2 ** position\n return num_deci\n\nprint(aDecimal('01110'))", "14\n" ] ], [ [ "#5.Create a function count_vols (), which receives a word and counts how many letters \"a\" it has, how many letters \"e\" it has, and so on until all the vowels are completed.\nYou can make the user who chooses the word.\nonly in lower letters", "_____no_output_____" ] ], [ [ "def contar_vocales(cadena):\n dicc = {'a': 0, 'e': 0, 'i': 0, 'o': 0, 'u': 0}\n n = 1\n for i in cadena:\n for x in dicc.keys():\n if i.lower() == x and i.lower() in dicc and dicc[x] != 0:\n dicc[x] = dicc.get(x) + 1\n elif i.lower() == x:\n dicc[x] = 1\n elif i.lower() == x:\n dicc[x]\n return f'vocal ___ cantidad\\na\\t|\\t{dicc[\"a\"]}\\ne\\t|\\t{dicc[\"e\"]}\\ni\\t|\\t{dicc[\"i\"]}\\no\\t|\\t{dicc[\"o\"]}\\nu\\t|\\t{dicc[\"u\"]}'\nprint(contar_vocales('hlas sdaliruw sowlasail'))", "vocal ___ cantidad\na\t|\t4\ne\t|\t0\ni\t|\t2\no\t|\t1\nu\t|\t1\n" ] ], [ [ "#6.Write a function is leap () that determines if a given year is a leap year \nA leap year is divisible by 4, but not by 100. It is also divisible by 400", "_____no_output_____" ] ], [ [ "agno = int(input())\nif (agno % 4 == 0 and agno % 100 != 0) or (agno % 100 == 0 and agno % 400 == 0):\n print(\"Es un año bisiesto\")\nelif agno % 100 == 0 and agno % 400 != 0:\n print(\"No es un año bisiesto\")\nelse:\n print(\"No es un año bisiesto\")", "2040\nEs un año bisiesto\n" ] ], [ [ "#7. Write a program that asks for two words and tells whether or not they rhyme. If the last three letters match, you have to say that they rhyme. If only the last two coincide, he has to say that they rhyme a bit and if not, that they do not rhyme.\nExample:\n![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAATsAAACKCAYAAADVPe2jAAATbklEQVR4Ae1cS64kNw58l/ABvJljzT3mPHOKvoiXRi9n1dte1oBoBDocoEQpK5WfynhAgp8gKSnEpLPt1/76/v37S5+fP3++/JgD94B74JN64EsHXdifdECfxS+se8A9ED3gYeevWP/DzT3wiB7wsHOjP6LR/XXnrzsPOw87Dzv3wCN6wMPOjf6IRveXnb/sPOw87Dzs3AOP6IFTht1//v2vVzxH/NP26+trep3I2ZKXnadV579//PGKJ8tZ7Tti7dVrrK6/+g7eqX/E2VtrtPoZ56lwxJ0hbznsRgflVuIjb2suX2KvRquZOH+VfsTaq9dYXb/Hfazdw1djR5y9t0avr+PsFb6an1b9U4ZdazOj/pFhdzbh1fq9Zhrl4cpxq8+3un6P21i7h38CVvFb9XeFn8HR9LDDoAmJhzeuOGOhIwdxGc5xjHMu6xwDvUU2/CGhIyck/BXGca1Yrjur95oNGKTWhh+ScfhCsp91jtE42IjhvCvouj/Y2Bvs3v6BQXIufCyBQzIWOvwhYSOGMejAIOFHfuZvxUSsYlyHsRk963nOr3COPUrfNOx4UGHoYMOZDYwl11A/Y6wjLvMBC9kjOjDGWa9qcK7qVS7jlc4NyjryMh+wkBXei4lczh+xee2z9ZH96vl4z1V+jztgWr+yeX3UUJ/aXJOxav8ZzvkzeuvdQY0KR9xRctOw083x8GE94tRG7qg/i8t8qBuyR7JiaqNO5mdfS6/WR/1RqU2tttapcMS34jI/+1iPWmqj/lky2w/7WM/2r/hoDM5b5Ste2airUvOAZ372sR45aqPOiOR3IIuv8CxnpW/5sGttvjWw1K921Mt8vE6P5B5W1eDclh41GOOao3o0ID+cVzVnhaNWKy7zs4/1qKU26p8ls/2wj/Vsj4FnD8f2amS5HM961KxsXpd1zQOW+dnHerY+6ozIqs8rfGSNPWM87Bq/Y5VdFPtaelwOY7OXVTWj4lq/whHfisv87GM9aqmN+mfJbD/sYz3bY4VXZ67yFa/sbI+9PWg9jVVc7dZ6mb/q8wrPaq70vT3s9CtL7dbmW3HqVzvqZT5ep0dyD6tqcG5LjxqMcc0RnZsvdLYjP/Nx3QpHrNZlP2NajzHsB7kzMjh6h6fWWtX+FNc6el7Fw+7VqPI1N7PVN7MHXT+zud7IWhzPenV/Fc61jtA3DbsYNnh0k9UgQp5K1NF8tTkONeBj2SK65Udu4NkTOOe2dK4DfVaiQSE1H37IGRw5KrlGhWks26M6OB6NH43r7T1qBF7V0hpZDsdoPcagI0ZrqY09Ik9x9rOO+iP5Gsv2qM79n+VUeJaz2rdp2K3e1B71zyb77PX34HB1jRUc6XBYfYan1q/ursLP4O1jh12QeTbhZ69/RkONrBm8rOLGw279X/iv7q7CR3pkRcxHD7sg7Gziz15/RdNcuaaH3dphV/VzhZ/ZO9PD7szNeu21jWx+ze8n94CHXeNXTz750n02D7Un9oCHnYdd+V8nn/hi+Myf9w8EDzsPOw8798AjesDDzo3+iEb3l9rnfanN3qmHnYedh5174BE9cMqw6/3Nh9lpXcVv+U/hkbMlL9tLqw5++z3LuYNv9f5X178yx0ecvbVGq1/BV4Uj7orylsOu9VfIlOCtFxN5W3N5D70arWbj/FV6rP1u7dX7X12/d/49+OnVr7Ajzt5bo9e3sfcKr853Fn7KsHv3sCPD7uwLqdbvNdu7/FT5e7zMq/e/un6Poz346dW/AlbxW/VvhV/hjLqH6WGHQYM/isJGYdjA4YeEH3HwQ8KPOPhDwqeSY6C3LgP+kNCRExL+CuO4VizXndH1ZctsNGuGxVrAeV34VHIMdMTAvoqMfcVesD/Y2B9s4PCzBAYJDLZK4JA9PLCIQwxyWAKDbGHsZx15kIqFnWEcV+lZT3NOhXPsVfRNww4DKQ6BwYMDZTYwllxD/YyxjrjMByxk7yICY5z1qgbnql7lMl7p0agcw7Y28YjNtULneoopXsVm+St9I+flPbOOs7FP6yGmdQaNH7G1Fq+vGOxWzMh6nMs6ao/K1ruB/ApH3FXkpmGnm+fhw3rEqY3cUX8Wl/lQN2TvEhRTG3UyP/taerU+6vdk1tCIz5qXfaxHjtotH+qrzPI15kg72w/7WI99VfZoDM6o9TRf8cpGXZWaBzzzs4913RtqjEru8SynwrOcM33Lh13rcK2BpX61o17m43V6l9DDqhqc29KjBmNcc0ZH00IiN+zsYRx6SM1v+TSH12DsbL06T4bznvlcrGsM26xzDuuICR/0kJXNsaxrHrDMzz7Ws/VRZ0RWfVzhI2scGeNh1/gdq+wi2dfS4/IY23qZaFpI1FEbfkjF1Y64zDeTj9gzZLZ39rGe7a/CI6cX08OyXI1XO9tjVgdxWT77WO/VQb2erPq4wnu1z8DeHnb6laV261CtOPWrHfUyH6/Tu4QeVtXg3JYeNRjjmjM6mhYSuWGrD1hIxdTOYlr51Vqcp3pwsAcPWlfPU9lZvuZkMeqDXXGitTNbfajNshUTfsYye6QOx7T06v4qvFX3LP+mYRfDBo9uvBpEyFOJOpqvNsehBnwsWxfR8iM38OwJnHNbOteBvlVyU3MNNDhL4JqjNschHz5I+CHhn5HgcCZnJBZ7gtSc8KtPbeSy7MX0MNRAjK6vdsQhBxK5GTYSo/k9m7Gezv2dxVV4lnO2b9OwO3vTI+uffRlnrz/C0eqYFRxkw2P1OZ5Yv7q7Cr8iZx877ILssy/k7PXParg496qze9it/wv91d1V+Fl9V6370cPOA2/9i1E12N64h93aO60GWYXvfd971psednsu7lprG9f8ml/3wO8e8LBr/OqJm+R3k5gLc/EJPTA17H78+PHy0+bg27dvr95j7trcmZtrcxN/fI2nd0+93g+sl3sE5mG34wCPC/3r+1/pc4XLPqKhvMa1h9bW+xkddlfu/03D7hM+aVecIQba3//7O30CW7Gma/qPmEf0AIZdb62r97+H3Y7/zu7ql91rVGMemr0e8LDbOCh6f/OhR/gWLC5pNm/kYrOa2bCLWvG1p1922W/GZzXVh7wzfwUDe9C9HWmv3sPq+iu5WrF3fSeyNaLH0e/6Jxz0f+Arz96rfcqX3bvDrvVXyPSgW4nVi9W6LVuHHV88Lhu5WbMAG5GRPxK3Iqba+xF7q/bw7rlX1393f738FXvXdyJbA/3PfY+hx/0feG//q7BTht27hxkZdmcQisuOC9YL58uO82fNMsNL5M/EHxl7xN7e5a/iY3X9av2r4dHP/E5l/Mz0P9c66qzTww6DBl9nsLFh2MDhh4QfcfBDwo84+EPCp5JjoLfIhD8kdOSEhL/COA6xM5fNa2Z61kwcFzjbrCOXZQ9XLGzkMsb+bH3kqEQN9bONGF0jYhi7io69Z/sBBqkx8EMyHr6wMwx+SMRqfi8XmMpWDayBfuc41Wf6H++M1lhpbxp2GEixMQwebDKzgbHkGupnjHXEZT5gIXtE6qW1YjM/56oe6+KyA8PnO2RgvMeejgaLGNY5Z9SvcWGzb8TmdaFzDfggWxivpTrncj7HIeZsqfvT/TCeYYzr+TKba1Q4YnkN9sHPdeCLOPazzf2OeirR/+h5fg+y/g9ca6y0Nw073RAPH9YjTm3kjvqzuMyHuiF7JCqmNupkfvZlOi47MFw4ZHbZWKsnuRE5btSvcWpHTfaxrtjI+qM5vE5Lx3qMw3cVqXtTW/eZ4exjPXJnbayneVqL8ZbOtaKnueeBsUT/o+f5Pcj6v6rHtffQlw+71iZbA0v9ake9zMfr9EjsYVUNzs10XHZguHDI7LJ5Pdaj+fhhDDo3KHwh1V/ZmqPxXJv1XlwLY/+IjvU4Fr4zZeyHH95LtVfOYx01RvIR25NZHfaN6KgfsdHT3PPAWKL/0fP8HmT9X9Xj2nvoHnaNX5/JLoJ9mY7LDgwXDplddnaB3ISBq42cnj8wPIiHzPLYxzpyMtmLa2HsH9GxLsfCd5bUvVS27lPj98ZRL1uHfSM614qe5p4HxhL9j57n9yDr/6oe195Df3vY6VeW2q1NtuLUr3bUy3y8To/EHlbV4NxMx2UHhguHzC6b14OuTcg2YkLO+pEbeZyb2YjtSa6hcS2M/T1dMbZ1rZYdd8B31Iqb9fNeQmc7amU+XmME53jVdT3FYWdx7OvpioU9wif6Hz3P70HW/yvuB+fP5KZhF8MGjxatBhHyVKKO5qvNcagBH8sWkS0/cgPPnsA5N9P5sgPHpYfMLhtrqozm4odx9rOOGPZBBwYJPyT8IcPHturIUdmLA8a1W3rEVrVRrydxh72YrVi1v1m8x4XukWMVC1vXhg0MOVyHdcQhD9gInzP9z+8P9rRabhp2qze1R/0zyJy57D3OmNVAczKW+Rj/VP2MHvhkLis+Z/q/qrWCx48ddkHW0YTyZcfXXKyPr7vAVlyg1swGW+bTvE+yg/ej7/6T+MvOMsIp+p/7Puv/s+7mo4ddXNqRxOKyccE88I4adnHmGG78ZM1rn//i/0wPjA67bNDFe4D+P/J91PNND7ut/z+sJ+TFhV75/+f1hDvwGdf8//Qw7Hr8Xr3/Pex2/p93xoW3nl6jGFvzkprXfXgdHXat3g//2XcxNez0s9C2/yjkHnAP3KUHPOwav1R8lwv0Pj1s3ANjPeBh52F3yH8l9gs59kKap3U8edh52HnYuQce0QMedm70RzS6v5jWfTHdhVsPOw87Dzv3wCN6wMPOjf6IRr/L14f3ue4L1MPOw87Dzj3wiB7wsHOjP6LR/cW07ovpLtx62HnYedi5Bx7RA18v/5gBM2AGHsCAh90DLtlHNANm4PXysHMXmAEz8AgGPOwecc0+pBkwAx527gEzYAYewYCH3SOu2Yc0A2bAw849YAbMwCMY8LB7xDX7kGbADHjYuQfMgBl4BAMedo+4Zh/SDJgBDzv3gBkwA49gYPdh9+eff77i8Y8ZMANm4EoMTA07DDLI7CA9LIu3zwyYATNwBAPTw4435S84ZsO6GTADV2Zg12GHr7psCDKmuhIEPPOHr4VrvG0zYAbMABjYddihaAwj/eEBpTrHci7rEcN5sDnXuhkwA2agxcD0sMPA0UHEC2QY+1o61wid40ZszbdtBsyAGQAD08MOiTqI4A+ZYexr6cgNHE+vLtfhOOtmwAyYAWXgrWHXGjaZn30jemyU40ZsPZxtM2AGzAAY2DzsooAOIxTN/Owb1TkuW09xrG9pBsyAGVAG3h52PHBCz55YVOOwEfYjjmsgDljPZsy6GTADZoAZmBp2nGjdDJgBM3AnBjzs7nRb3qsZMAObGfCw20ydE82AGbgTAx52d7ot79UMmIHNDHjYbabOiWbADNyJAQ+7O92W92oGzMBmBjzsNlPnRDNgBu7EwO2GHX4H704ke69mwAycz8DUsMsGjf5S8OojZXtYvabrmwEzcH8Gbjfs7k+5T2AGzMAZDEwPu9gkf82xDgxfX4pVB0Q88jUefsQxzpjqHBc68MzfwzXethkwA/dhYNdhp0NE7YoWjQ87+8n8nKs61+Bc1iOG82BzrnUzYAbuy8Duw06p0IGiONsaqzZiMz/7WjryITkufJWNPEszYAbux8CmYRfHxGCAZB/TwDj7M300NotjX0vHHgPHw/vgPMQybt0MmIH7MvCoYVcNswq/7zV752bADGwedkGdfh1VdkW3DptWfBbHvlGd43AeXlNxxqybATNwLwZ2HXZx9BgQ/MzQUQ0Xrss61sVaXId1xGlulodYYJZmwAzcm4GpYXfvo3r3ZsAMPJkBD7sn377PbgYexICH3YMu20c1A09mwMPuybfvs5uBBzHgYfegy/ZRzcCTGfCwe/Lt++xm4EEMeNg96LJ9VDPwZAZ2H3b4HbYnk+qzmwEzcD0GpoYdBhlkdpwelsXbZwbMgBk4goHpYcebisHmHzNgBszAHRjYddjhqy4bgoyprkQBz/zha+Ear3bk8U/Lfrc+8rV+rM3YCpzPZ90MmIHfDOw67FC29xJHDF546MhTW+twnsZyjZae1ePYPerzGlm9Hl6dqarHZ7FuBszAPxmYHnZ44fil/WfJX8Os5+Nc1ns5gWms2pqvtsbP2lpPba0XOPtYRy77WAfOMsMzH+dYNwNm4BcD08MOxPVesgxjX0uP2oHxg/WA9WzGMp3XHamn8VlN9mXx7GMdeexjHTjLDM98nGPdDJiBXwy8NexaL1rmZ9+IHtvjuBG7utTZeho/W1/3nNVjH+vZWhme+bJc+8zA0xnYPOyCuNaLlvnZN6pzXLae4tVlcnzobO9dv1WP19Q9qK3nUVxtjbdtBszAbwbeHnbxwuEHL5/KwDWOc6AjjvMV69mMtXTUDpz3NGK3asKP2pDwswQGyVjo8EPO4hpv2wyYgV8MTA07k9ZnIAaUf8yAGbgmAx52O96Lh92OZLqUGdiZAQ+7HQn1sNuRTJcyAzsz4GG3M6EuZwbMwDUZ8LC75r14V2bADOzMgIfdzoS6nBkwA9dkwMPumvfiXZkBM7AzAx52OxPqcmbADFyTAQ+7a96Ld2UGzMDODHjY7Uyoy5kBM3BNBjzsrnkv3pUZMAM7M+BhtzOhLmcGzMA1GfCwu+a9eFdmwAzszMD/AdRzit3Macm8AAAAAElFTkSuQmCC)", "_____no_output_____" ] ], [ [ "def rim(uno,dos):\n if uno[-1] == dos[-1] and uno[-2] == dos[-2] and uno[-3] == dos[-3]:\n return 'Riman'\n elif uno[-1] == dos[-1] and uno[-2] == dos[-2]:\n return 'Riman un poco'\n else:\n return 'No riman'\nprint(rim('accion', 'pretencion'))\nprint(rim('venta', 'pretencion'))\nprint(rim('algoritmo', 'acerrimo'))\n", "Riman\nNo riman\nRiman un poco\n" ] ], [ [ "#8.It has a program that asks the user for a dollar amount, an interest rate, and a number of years. It shows on the screen how much the initial capital will have been converted after those years if the interest rate entered is applied each year.\nRemember that a capital C dollars at an interest of x percent for n years becomes C * (1 + x / 100) raised to n (years). Test the program knowing that an amount of $ 10,000 at 4.5% annual interest becomes $ 24,117.14 after 20 years.", "_____no_output_____" ] ], [ [ "def calculo(dinero, inte, cant_anos):\n resultado = dinero * (1 + inte / 100)**cant_anos\n print(f'los intereses de sus {dinero} USD$ a {cant_anos} años son {resultado:.2f} USD$')\ncalculo(10000, 4.5, 20)", "los intereses de sus 10000 USD$ a 20 años son 24117.14 USD$\n" ] ], [ [ "", "_____no_output_____" ], [ "##9. Create an algorithm that generates 10 files iteratively, call this files 'tabla_del_n.txt' been n the (0 to 9) numbers with the multiplication tables from (0 to 9)\nfollowing the format:\n![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAACCCAYAAACO9sDAAAAD0UlEQVR4nO2cXY7bIBRGcdXHzgKa/a8tXUD6Tp88cRFguPzE5jtHGikTJ0Di44ttPmXz3nsHsvz49ADgsyCAOAggDgKI01WAbduy2/a/UZS2PWMsPZgxvp+lHbdeLOzvv8KXfqWx5PDeu23bmr/7HFEBjl8QV4lrExXgjOOREwqS22Zts/R9Z9t6yJxrM7VtP5BSn9Eyzl6fzSRAOPiwc0sFOWszRvi645cSa6O1ou3tx9o46y98nGpzPzfZ/z+bBlqrdXMFSA2oZ5tWRrSZ+3yl30vqgNkfl467R0WrFiB31FkZ0aZzfb6gK/Q38mSw6TLwSkdt7H2zx9dj7LOvTKorwFmJKjnRCee9mrKXGkvsJCt2rR++Ljevp/qLfcZcf9Y2Z7CxGngfRkwD3Aq+EZc7B4D7gwDimO4DXIE/f9Pbfv+a29/j6763zKkA4iCAOAggTlSAq6+TQz+SeYDcTYezbWFbI3l8/d/f87VWf6MZMgV476embp4v/70jwh20Qn8jqboMTC1atK5Jx9bEj/+neHxtU4/A2f3NIFkBYgs0xyN7f2yZJmJ9Hd8XPldCzc45hkKt4dBVZBhyI8hSCcKVvRpqd0breckqO9+5EwEsQYSWacCyLGzZGS2p55V2vnOD7wPUZPt2rNkA58p3znH6yk1jvfq7MmYBYvNnGGwsbef43vD5HMcz8Vln/zP7m8HpOUBsGsgdMZa5v6WN2Ufg3Y/4kKIKcNeVLjiHtQBxEEAcQqHi3DYRNJvZCaRZMAWIgwDiIIA4JILEuX0iyLqCaIVEUAGzEkGf+gkbEkHuGomg2Tt/hdW/kGUSQTWQCHqzRCKoFhJBb26fCLJAIujN0FvBLYmgkVWgV9sryGAWoOZnUkrasfykm/WnXqw8X36pNJBzN08EfeoScCVIBInDWoA4CCAOAoiDAOIggDgIIA6BEHGWCYTM6Ms5AiFFzA6E7H+zKheBEEcgZBUIhBAI6c8nAiFW4SyssvOdWyQQYhljCIGQAcwIhFirTQ9WkGGJQMisS0ECIYfncq+vgUDIZyEQIg5rAeIggDj8RIw4VABxEEAcBBAHAcRBAHEQQBwEEAcBxEEAcRBAnKQA1qwc3AsqgDjJQAhrRBpQAcRBAHEQQBwEEAcBxEEAcRBAHAQQBwHEQQBxEEAcBBAHAcRBAHEIhIhDBRCHQIg4VABxEEAcBBAHAcRBAHEQQBwEEAcBxEEAcRBAHAQQBwHEQQBxEEAcAiHiUAHEIRAiDhVAHAQQBwHEQQBxEEAcBBAHAcRBAHEQQBwEEAcBxEEAcf4BB2cTktECnSoAAAAASUVORK5CYII=)", "_____no_output_____" ] ], [ [ "def tablas():\n for i in range(10):\n archivo = open(f'/content/drive/MyDrive/tabla_del_{i}.txt', 'w')\n archivo.write(f'Tabla del {i}')\n for x in range(10):\n archivo.write(f'\\n{i} x {x} = {i*x}')\ntablas()\n", "_____no_output_____" ] ], [ [ "##On the same file system, make an algorithm that asks the user for a number n and a number m, so that it shows the user of the file table_del_n.txt, the multiplication 'm'.", "_____no_output_____" ] ], [ [ "def tabla_request(n,m):\n archivo = open(f'/content/drive/MyDrive/tabla_del_{n}.txt', 'r')\n linea = archivo.readlines()\n print(linea[m+1])\ntabla_request(6,6)", "6 x 6 = 36\n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0a7d3c1877cbb7489f0bf4679459d5299e7ce15
28,497
ipynb
Jupyter Notebook
courses/udacity_intro_to_tensorflow_for_deep_learning/l05c04_exercise_flowers_with_data_augmentation_solution.ipynb
riyaj5246/skin-cancer-with-tflite
2a3fbd58deb0984c1224464638ec7f45f7e1fd9b
[ "Apache-2.0" ]
6,484
2019-02-13T21:32:29.000Z
2022-03-31T20:50:20.000Z
courses/udacity_intro_to_tensorflow_for_deep_learning/l05c04_exercise_flowers_with_data_augmentation_solution.ipynb
riyaj5246/skin-cancer-with-tflite
2a3fbd58deb0984c1224464638ec7f45f7e1fd9b
[ "Apache-2.0" ]
288
2019-02-13T22:56:03.000Z
2022-03-24T11:15:19.000Z
courses/udacity_intro_to_tensorflow_for_deep_learning/l05c04_exercise_flowers_with_data_augmentation_solution.ipynb
riyaj5246/skin-cancer-with-tflite
2a3fbd58deb0984c1224464638ec7f45f7e1fd9b
[ "Apache-2.0" ]
7,222
2019-02-13T21:39:34.000Z
2022-03-31T22:23:54.000Z
37.348624
573
0.531845
[ [ [ "##### Copyright 2018 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Image Classification using tf.keras", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c04_exercise_flowers_with_data_augmentation_solution.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c04_exercise_flowers_with_data_augmentation_solution.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>", "_____no_output_____" ], [ "In this Colab you will classify images of flowers. You will build an image classifier using `tf.keras.Sequential` model and load data using `tf.keras.preprocessing.image.ImageDataGenerator`.\n", "_____no_output_____" ], [ "# Importing Packages", "_____no_output_____" ], [ "Let's start by importing required packages. **os** package is used to read files and directory structure, **numpy** is used to convert python list to numpy array and to perform required matrix operations and **matplotlib.pyplot** is used to plot the graph and display images in our training and validation data.", "_____no_output_____" ] ], [ [ "import os\nimport numpy as np\nimport glob\nimport shutil\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "### TODO: Import TensorFlow and Keras Layers\n\nIn the cell below, import Tensorflow and the Keras layers and models you will use to build your CNN. Also, import the `ImageDataGenerator` from Keras so that you can perform image augmentation.", "_____no_output_____" ] ], [ [ "import tensorflow as tf", "_____no_output_____" ], [ "from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator", "_____no_output_____" ] ], [ [ "# Data Loading", "_____no_output_____" ], [ "In order to build our image classifier, we can begin by downloading the flowers dataset. We first need to download the archive version of the dataset and after the download we are storing it to \"/tmp/\" directory.", "_____no_output_____" ], [ "After downloading the dataset, we need to extract its contents.", "_____no_output_____" ] ], [ [ "_URL = \"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz\"\n\nzip_file = tf.keras.utils.get_file(origin=_URL,\n fname=\"flower_photos.tgz\",\n extract=True)\n\nbase_dir = os.path.join(os.path.dirname(zip_file), 'flower_photos')", "_____no_output_____" ] ], [ [ "The dataset we downloaded contains images of 5 types of flowers:\n\n1. Rose\n2. Daisy\n3. Dandelion\n4. Sunflowers\n5. Tulips\n\nSo, let's create the labels for these 5 classes: ", "_____no_output_____" ] ], [ [ "classes = ['roses', 'daisy', 'dandelion', 'sunflowers', 'tulips']", "_____no_output_____" ] ], [ [ "Also, the dataset we have downloaded has following directory structure. n\n<pre style=\"font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;\" >\n<b>flower_photos</b>\n|__ <b>daisy</b>\n|__ <b>dandelion</b>\n|__ <b>roses</b>\n|__ <b>sunflowers</b>\n|__ <b>tulips</b>\n</pre>\n\nAs you can see there are no folders containing training and validation data. Therefore, we will have to create our own training and validation set. Let's write some code that will do this.\n\n\nThe code below creates a `train` and a `val` folder each containing 5 folders (one for each type of flower). It then moves the images from the original folders to these new folders such that 80% of the images go to the training set and 20% of the images go into the validation set. In the end our directory will have the following structure:\n\n\n<pre style=\"font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;\" >\n<b>flower_photos</b>\n|__ <b>daisy</b>\n|__ <b>dandelion</b>\n|__ <b>roses</b>\n|__ <b>sunflowers</b>\n|__ <b>tulips</b>\n|__ <b>train</b>\n |______ <b>daisy</b>: [1.jpg, 2.jpg, 3.jpg ....]\n |______ <b>dandelion</b>: [1.jpg, 2.jpg, 3.jpg ....]\n |______ <b>roses</b>: [1.jpg, 2.jpg, 3.jpg ....]\n |______ <b>sunflowers</b>: [1.jpg, 2.jpg, 3.jpg ....]\n |______ <b>tulips</b>: [1.jpg, 2.jpg, 3.jpg ....]\n |__ <b>val</b>\n |______ <b>daisy</b>: [507.jpg, 508.jpg, 509.jpg ....]\n |______ <b>dandelion</b>: [719.jpg, 720.jpg, 721.jpg ....]\n |______ <b>roses</b>: [514.jpg, 515.jpg, 516.jpg ....]\n |______ <b>sunflowers</b>: [560.jpg, 561.jpg, 562.jpg .....]\n |______ <b>tulips</b>: [640.jpg, 641.jpg, 642.jpg ....]\n</pre>\n\nSince we don't delete the original folders, they will still be in our `flower_photos` directory, but they will be empty. The code below also prints the total number of flower images we have for each type of flower. ", "_____no_output_____" ] ], [ [ "for cl in classes:\n img_path = os.path.join(base_dir, cl)\n images = glob.glob(img_path + '/*.jpg')\n print(\"{}: {} Images\".format(cl, len(images)))\n num_train = int(round(len(images)*0.8))\n train, val = images[:num_train], images[num_train:]\n\n for t in train:\n if not os.path.exists(os.path.join(base_dir, 'train', cl)):\n os.makedirs(os.path.join(base_dir, 'train', cl))\n shutil.move(t, os.path.join(base_dir, 'train', cl))\n\n for v in val:\n if not os.path.exists(os.path.join(base_dir, 'val', cl)):\n os.makedirs(os.path.join(base_dir, 'val', cl))\n shutil.move(v, os.path.join(base_dir, 'val', cl))", "_____no_output_____" ], [ "round(len(images)*0.8)", "_____no_output_____" ] ], [ [ "For convenience, let us set up the path for the training and validation sets", "_____no_output_____" ] ], [ [ "train_dir = os.path.join(base_dir, 'train')\nval_dir = os.path.join(base_dir, 'val')", "_____no_output_____" ] ], [ [ "# Data Augmentation", "_____no_output_____" ], [ "Overfitting generally occurs when we have small number of training examples. One way to fix this problem is to augment our dataset so that it has sufficient number of training examples. Data augmentation takes the approach of generating more training data from existing training samples, by augmenting the samples via a number of random transformations that yield believable-looking images. The goal is that at training time, your model will never see the exact same picture twice. This helps expose the model to more aspects of the data and generalize better.\n\nIn **tf.keras** we can implement this using the same **ImageDataGenerator** class we used before. We can simply pass different transformations we would want to our dataset as a form of arguments and it will take care of applying it to the dataset during our training process. ", "_____no_output_____" ], [ "## Experiment with Various Image Transformations\n\nIn this section you will get some practice doing some basic image transformations. Before we begin making transformations let's define our `batch_size` and our image size. Remember that the input to our CNN are images of the same size. We therefore have to resize the images in our dataset to the same size.\n\n### TODO: Set Batch and Image Size\n\nIn the cell below, create a `batch_size` of 100 images and set a value to `IMG_SHAPE` such that our training data consists of images with width of 150 pixels and height of 150 pixels.", "_____no_output_____" ] ], [ [ "batch_size = 100\nIMG_SHAPE = 150 ", "_____no_output_____" ] ], [ [ "### TODO: Apply Random Horizontal Flip\n\nIn the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and then applies a random horizontal flip. Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, and to shuffle the images. ", "_____no_output_____" ] ], [ [ "image_gen = ImageDataGenerator(rescale=1./255, horizontal_flip=True)\n\ntrain_data_gen = image_gen.flow_from_directory(\n batch_size=batch_size,\n directory=train_dir,\n shuffle=True,\n target_size=(IMG_SHAPE,IMG_SHAPE)\n )", "_____no_output_____" ] ], [ [ "Let's take 1 sample image from our training examples and repeat it 5 times so that the augmentation can be applied to the same image 5 times over randomly, to see the augmentation in action.", "_____no_output_____" ] ], [ [ "# This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column.\ndef plotImages(images_arr):\n fig, axes = plt.subplots(1, 5, figsize=(20,20))\n axes = axes.flatten()\n for img, ax in zip( images_arr, axes):\n ax.imshow(img)\n plt.tight_layout()\n plt.show()\n\n\naugmented_images = [train_data_gen[0][0][0] for i in range(5)]\nplotImages(augmented_images)", "_____no_output_____" ] ], [ [ "### TODO: Apply Random Rotation\n\nIn the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and then applies a random 45 degree rotation. Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, and to shuffle the images. ", "_____no_output_____" ] ], [ [ "image_gen = ImageDataGenerator(rescale=1./255, rotation_range=45)\n\ntrain_data_gen = image_gen.flow_from_directory(batch_size=batch_size,\n directory=train_dir,\n shuffle=True,\n target_size=(IMG_SHAPE, IMG_SHAPE))", "_____no_output_____" ] ], [ [ "Let's take 1 sample image from our training examples and repeat it 5 times so that the augmentation can be applied to the same image 5 times over randomly, to see the augmentation in action.", "_____no_output_____" ] ], [ [ "augmented_images = [train_data_gen[0][0][0] for i in range(5)]\nplotImages(augmented_images)", "_____no_output_____" ] ], [ [ "### TODO: Apply Random Zoom\n\nIn the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and then applies a random zoom of up to 50%. Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, and to shuffle the images. ", "_____no_output_____" ] ], [ [ "image_gen = ImageDataGenerator(rescale=1./255, zoom_range=0.5)\n\ntrain_data_gen = image_gen.flow_from_directory(\n batch_size=batch_size,\n directory=train_dir,\n shuffle=True,\n target_size=(IMG_SHAPE, IMG_SHAPE)\n )", "_____no_output_____" ] ], [ [ "Let's take 1 sample image from our training examples and repeat it 5 times so that the augmentation can be applied to the same image 5 times over randomly, to see the augmentation in action.", "_____no_output_____" ] ], [ [ "augmented_images = [train_data_gen[0][0][0] for i in range(5)]\nplotImages(augmented_images)", "_____no_output_____" ] ], [ [ "### TODO: Put It All Together\n\nIn the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and that applies:\n\n- random 45 degree rotation\n- random zoom of up to 50%\n- random horizontal flip\n- width shift of 0.15\n- height shift of 0.15\n\nThen use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, to shuffle the images, and to set the class mode to `sparse`.", "_____no_output_____" ] ], [ [ "image_gen_train = ImageDataGenerator(\n rescale=1./255,\n rotation_range=45,\n width_shift_range=.15,\n height_shift_range=.15,\n horizontal_flip=True,\n zoom_range=0.5\n )\n\n\ntrain_data_gen = image_gen_train.flow_from_directory(\n batch_size=batch_size,\n directory=train_dir,\n shuffle=True,\n target_size=(IMG_SHAPE,IMG_SHAPE),\n class_mode='sparse'\n )", "_____no_output_____" ] ], [ [ "Let's visualize how a single image would look like 5 different times, when we pass these augmentations randomly to our dataset. ", "_____no_output_____" ] ], [ [ "augmented_images = [train_data_gen[0][0][0] for i in range(5)]\nplotImages(augmented_images)", "_____no_output_____" ] ], [ [ "### TODO: Create a Data Generator for the Validation Set\n\nGenerally, we only apply data augmentation to our training examples. So, in the cell below, use ImageDataGenerator to create a transformation that only rescales the images by 255. Then use the `.flow_from_directory` method to apply the above transformation to the images in our validation set. Make sure you indicate the batch size, the path to the directory of the validation images, the target size for the images, and to set the class mode to `sparse`. Remember that it is not necessary to shuffle the images in the validation set. ", "_____no_output_____" ] ], [ [ "image_gen_val = ImageDataGenerator(rescale=1./255)\n\nval_data_gen = image_gen_val.flow_from_directory(batch_size=batch_size,\n directory=val_dir,\n target_size=(IMG_SHAPE, IMG_SHAPE),\n class_mode='sparse')", "_____no_output_____" ] ], [ [ "# TODO: Create the CNN\n\nIn the cell below, create a convolutional neural network that consists of 3 convolution blocks. Each convolutional block contains a `Conv2D` layer followed by a max pool layer. The first convolutional block should have 16 filters, the second one should have 32 filters, and the third one should have 64 filters. All convolutional filters should be 3 x 3. All max pool layers should have a `pool_size` of `(2, 2)`.\n\nAfter the 3 convolutional blocks you should have a flatten layer followed by a fully connected layer with 512 units. The CNN should output class probabilities based on 5 classes which is done by the **softmax** activation function. All other layers should use a **relu** activation function. You should also add Dropout layers with a probability of 20%, where appropriate.", "_____no_output_____" ] ], [ [ "model = Sequential()\n\nmodel.add(Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_SHAPE,IMG_SHAPE, 3)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(32, 3, padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, 3, padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Dropout(0.2))\nmodel.add(Dense(512, activation='relu'))\n\nmodel.add(Dropout(0.2))\nmodel.add(Dense(5))", "_____no_output_____" ] ], [ [ "# TODO: Compile the Model\n\nIn the cell below, compile your model using the ADAM optimizer, the sparse cross entropy function as a loss function. We would also like to look at training and validation accuracy on each epoch as we train our network, so make sure you also pass the metrics argument.", "_____no_output_____" ] ], [ [ "model.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "# TODO: Train the Model\n\nIn the cell below, train your model using the **fit_generator** function instead of the usual **fit** function. We have to use the `fit_generator` function because we are using the **ImageDataGenerator** class to generate batches of training and validation data for our model. Train the model for 80 epochs and make sure you use the proper parameters in the `fit_generator` function.", "_____no_output_____" ] ], [ [ "epochs = 80\n\nhistory = model.fit_generator(\n train_data_gen,\n steps_per_epoch=int(np.ceil(train_data_gen.n / float(batch_size))),\n epochs=epochs,\n validation_data=val_data_gen,\n validation_steps=int(np.ceil(val_data_gen.n / float(batch_size)))\n)", "_____no_output_____" ] ], [ [ "# TODO: Plot Training and Validation Graphs.\n\nIn the cell below, plot the training and validation accuracy/loss graphs.", "_____no_output_____" ] ], [ [ "acc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs_range = range(epochs)\n\nplt.figure(figsize=(8, 8))\nplt.subplot(1, 2, 1)\nplt.plot(epochs_range, acc, label='Training Accuracy')\nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(1, 2, 2)\nplt.plot(epochs_range, loss, label='Training Loss')\nplt.plot(epochs_range, val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0a7e225b6c15018ae2b98e6a0f0cd4b2a921d33
75,758
ipynb
Jupyter Notebook
Computerprogrameren TUDelft/Practicum nummeriek/Deel 1/Phase 2 practicum 1 v4 (einde dag 31-5-2017).ipynb
Pietervanhalem/Pieters-Personal-Repository
c31e3c86b1d42f29876455e8553f350d4d527ee5
[ "MIT" ]
2
2020-02-26T13:02:44.000Z
2020-03-06T07:09:10.000Z
Computerprogrameren TUDelft/Practicum nummeriek/Deel 1/Phase 2 practicum 1 v4 (einde dag 31-5-2017).ipynb
Pietervanhalem/Pieters-Personal-Repository
c31e3c86b1d42f29876455e8553f350d4d527ee5
[ "MIT" ]
11
2020-03-06T07:17:10.000Z
2022-02-26T22:32:59.000Z
Computerprogrameren TUDelft/Practicum nummeriek/Deel 1/Phase 2 practicum 1 v4 (einde dag 31-5-2017).ipynb
Pietervanhalem/Personal-Code-Examples
c31e3c86b1d42f29876455e8553f350d4d527ee5
[ "MIT" ]
null
null
null
157.829167
24,108
0.864397
[ [ [ "# Equilibrium analysis Chemical reaction\nNumber (code) of assignment: 2N4\n\nDescription of activity:\n\nReport on behalf of:\n\nname : Pieter van Halem\nstudent number : 4597591\n\nname : Dennis Dane\nstudent number :4592239\n\n\n\nData of student taking the role of contact person:\n\nname :\n\nemail address :", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "# Function definitons:\n\nIn the following block the function that are used for the numerical analysis are definend. These are functions for calculation the various time steps, functions for plotting tables ands functions for plotting graphs.", "_____no_output_____" ] ], [ [ "def f(t,y,a,b,i):\n if (t>round(i,2)):\n a = 0\n du = a-(b+1)*y[0,0]+y[0,0]**2*y[0,1]\n dv = b*y[0,0]-y[0,0]**2*y[0,1] \n return np.matrix([du,dv])\n\ndef FE(t,y,h,a,b,i):\n f1 = f(t,y,a,b,i)\n pred = y + f1*h\n corr = y + (h/2)*(f(t,pred,a,b,i) + f1)\n return corr\n\ndef Integrate(y0, t0, tend, N,a,b,i):\n h = (tend-t0)/N\n\n t_arr = np.zeros(N+1)\n t_arr[0] = t0\n\n w_arr = np.zeros((2,N+1))\n w_arr[:,0] = y0\n\n t = t0\n y = y0\n for k in range(1,N+1):\n y = FE(t,y,h,a,b,i)\n w_arr[:,k] = y\n t = round(t + h,4)\n t_arr[k] = t\n \n return t_arr, w_arr\n\ndef PrintTable(t_arr, w_arr):\n print (\"%6s %6s: %17s %17s\" % (\"index\", \"t\", \"u(t)\", \"v(t)\"))\n for k in range(0,N+1):\n print (\"{:6d} {:6.2f}: {:17.7e} {:17.7e}\".format(k,t_arr[k],\n w_arr[0,k],w_arr[1,k])) \n\ndef PlotGraphs(t_arr, w_arr):\n plt.figure(\"Initial value problem\")\n plt.plot(t_arr,w_arr[0,:],'r',t_arr,w_arr[1,:],'--')\n plt.legend((\"$u(t)$\", \"$v(t)$\"),loc=\"best\", shadow=True)\n plt.xlabel(\"$t$\")\n plt.ylabel(\"$u$ and $v$\")\n plt.title(\"Graphs of $u(t)$ and $v(t)$\")\n plt.show()\n \ndef PlotGraphs2(t_arr, w_arr):\n plt.figure(\"Initial value problem\")\n plt.plot(w_arr[0,:],w_arr[1,:],'g')\n plt.legend((\"$u,v$\",\"\"),loc=\"best\", shadow=True)\n plt.xlabel(\"$u(t)$\")\n plt.ylabel(\"$v(t)$\")\n plt.title(\"$Phase$ $plane$ $(u,v)$\")\n plt.axis(\"scaled\")\n plt.show()\n \ndef PlotGraphs3(t_arr, w_arr,t_arr2, w_arr2):\n plt.figure(\"Initial value problem\")\n plt.plot(t_arr,w_arr[0,:],'r',t_arr,w_arr[1,:],'b--')\n plt.plot(t_arr2,w_arr2[0,:],'r',t_arr2,w_arr2[1,:],'b--')\n #plt.plot([t_array[80],t_array2[0]],[w_array[0,80],w_array2[0,0]],'r')\n plt.legend((\"$u(t)$\", \"$v(t)$\"),loc=\"best\", shadow=True)\n plt.xlabel(\"$t$\")\n plt.ylabel(\"$u$ and $v$\")\n plt.title(\"Graphs of $u(t)$ and $v(t)$\")\n plt.show()\n\ndef PlotGraphs4(t_arr, w_arr,t_arr2, w_arr2):\n #plt.figure(\"Initial value problem\")\n plt.plot(w_arr[0,:],w_arr[1,:],'g')\n plt.plot(w_arr2[0,:],w_arr2[1,:],'g')\n #plt.legend((\"$u,v$\",\"\"),loc=\"best\", shadow=True)\n plt.xlabel(\"$u(t)$\")\n plt.ylabel(\"$v(t)$\")\n plt.title(\"$Phase$ $plane$ $(u,v)$\")\n plt.axis(\"scaled\")\n plt.show()", "_____no_output_____" ] ], [ [ "# Assignment 2.9\nIntegrate the system with Modified Euler and time step h = 0.15. Make a table of u and v on the time interval 0 ≤ t ≤ 1.5. The table needs to give u and v in an 8-digit floating-point format.", "_____no_output_____" ] ], [ [ "y0 = np.matrix([0.0,0.0])\nt0 = 0.0\ntend = 1.5\nN = 10\n\nt_array, w_array = Integrate(y0, t0, tend, N,2,4.5,11)\nprint(\"The integrated system using Modified Euler with time step h = 0.15 is shown in the following table: \\n\")\nPrintTable(t_array, w_array)\n\n", "The integrated system using Modified Euler with time step h = 0.15 is shown in the following table: \n\n index t: u(t) v(t)\n 0 0.00: 0.0000000e+00 0.0000000e+00\n 1 0.15: 1.7625000e-01 1.0125000e-01\n 2 0.30: 2.6892423e-01 2.7050835e-01\n 3 0.45: 3.1921278e-01 4.7380077e-01\n 4 0.60: 3.4812212e-01 6.9371767e-01\n 5 0.75: 3.6633149e-01 9.2138423e-01\n 6 0.90: 3.7927846e-01 1.1522633e+00\n 7 1.05: 3.8975403e-01 1.3839990e+00\n 8 1.20: 3.9921620e-01 1.6153246e+00\n 9 1.35: 4.0845332e-01 1.8455103e+00\n 10 1.50: 4.1792028e-01 2.0740846e+00\n" ] ], [ [ "# Assignmet 2.10\nIntegrate the system with Modified Euler and time step h = 0.05 for the interval [0,20]. Make plots of u and v as functions of t (put them in one figure). Also make a plot of u and v in the phase plane (u,v-plane). Do your plots correspond to your results of part 2?", "_____no_output_____" ] ], [ [ "y0 = np.matrix([0.0,0.0])\nt0 = 0.0\ntend = 20\nN = 400\n\nt_array, w_array = Integrate(y0, t0, tend, N,2,4.5, 25)\n\nprint(\"In this assignment the system has to be integrated using Modified Euler with a time step of h = 0.05 on \\na interval of [0,20].\")\nprint(\"The first graph is u(t) and v(t) against time (t).\")\nPlotGraphs(t_array, w_array)\nprint(\"The second graph shows the u-v plane\")\nPlotGraphs2(t_array, w_array)\nprint(\"\\n The system is stable and a spiral. Therefor is consistent with the conclusion from assignment 1.3.\")", "In this assignment the system has to be integrated using Modified Euler with a time step of h = 0.05 on \na interval of [0,20].\nThe first graph is u(t) and v(t) against time (t).\n" ] ], [ [ "# Assignment 2.11\nUsing the formula derived in question 7, estimate the accuracy of u and v computed with h = 0.05 at t = 8. Hence, integrate once more with time step h = 0.1.", "_____no_output_____" ], [ "The error can be estimated with Richardsons methode. were we use α = 1/3 found in assignment 7. here the estimated error is: E ≈ α( w(h) - w(2h) ).", "_____no_output_____" ] ], [ [ "y0 = np.matrix([0.0,0.0])\nt0 = 0.0\ntend = 20\nN = 400\n\nt_array, w_array = Integrate(y0, t0, tend, N, 2, 4.5,25)\n\ny0 = np.matrix([0.0,0.0])\nt0 = 0.0\ntend = 20\nN = 200\n\nt_array2, w_array2 = Integrate(y0, t0, tend, N, 2, 4.5, 25)\n\nprint(\"The value for u and v at t = 8 with h = 0.05 is:\",t_array[160], w_array[:,160])\nprint(\"The value for u and v at t = 8 with h = 0.10 is:\",t_array2[80], w_array2[:,80])", "The value for u and v at t = 8 with h = 0.05 is: 8.0 [ 2.22947774 2.14650174]\nThe value for u and v at t = 8 with h = 0.10 is: 8.0 [ 2.21871063 2.15839871]\n" ], [ "E1 = (w_array[0,160]-w_array2[0,80])*(1/3)\nE2 = (w_array[1,160]-w_array2[1,80])*(1/3)\nprint(\"The estimated acuracy for u is:\", E1)\nprint(\"The estimated acuracy for v is:\", E2)", "The estimated acuracy for u is: 0.0035890365757\nThe estimated acuracy for v is: -0.00396565753436\n" ] ], [ [ "# Assignment 2.12\nApply Modified Euler with h = 0.05. For 0 ≤ t ≤ t1 it holds that a = 2. At t = t1 the supply of materials A fails, and therefore a = 0 for t > t1. Take t1 = 4.0. Make plot of u and v as a function of t on the intervals [0, 10] in one figure and a plot of u and v in the uv-plane. Evaluate your results by comparing them to your findings form part 8.", "_____no_output_____" ] ], [ [ "y0 = np.matrix([0.0,0.0])\nt0 = 0.0\ntend = 10.0\nN = 200\n\nt_array, w_array = Integrate(y0, t0, tend, N, 2, 4.5, 4)\n\n\nPlotGraphs(t_array, w_array)", "_____no_output_____" ] ], [ [ "The first plot shows that u and v indeed converges to a certain value, as predicted in assignment 8. The phase plane shows that uv goes to a point on the u-axis. This was as well predicted in assignment 8.\nThe first plot shows a corner in the u and v graph (a discontinuity in the first derivative). This does not contradict the theory because the system of differential equations changes the first derivatives does not have to be continuous. The line itself is continuous because it is given in the initial values. \n", "_____no_output_____" ], [ "# Assignment 2.13\nTake t1 = 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0. Make a table of the value of v-tilda and t-tilda. Evaluate your rsults.", "_____no_output_____" ] ], [ [ "for i in np.arange(3.0,6.5,0.50):\n t0 = 0.0\n tend = 10.0\n N = 200\n t_array2, w_array2 = Integrate(y0, t0, tend, N, 2.0, 4.5,i)\n indices = np.nonzero(w_array2[0,:] >= 0.01)\n index = np.max(indices[0])\n t_tilde = t_array2[index+1]\n v_tilde = w_array2[1,N]\n if i == 3: \n print(\"%6s %17s: %17s \" % (\"t1\", \"t_tilde\", \"v_tilde\"))\n print(\"{:6.2f} {:17.2f} {:17.7e}\".format(i,t_tilde,v_tilde)) \n", " t1 t_tilde: v_tilde \n 3.00 3.95 4.6881922e+00\n 3.50 4.60 5.2621261e+00\n 4.00 6.20 3.1598768e+00\n 4.50 6.30 3.0999928e+00\n 5.00 6.60 3.1165068e+00\n 5.50 6.95 3.1994637e+00\n 6.00 7.35 3.3579235e+00\n" ] ], [ [ "lkksdndglnsldkgknlsdagn", "_____no_output_____" ], [ "De waarde moeten zijn: t1 = 6.0 v-tilde = 3.34762, t-tilde = 7.35", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d0a7eb9f440607a2713c2a5b62dcc6a739286921
39,857
ipynb
Jupyter Notebook
examples/notebooks/Creating Models/2-a-pde-model.ipynb
YannickNoelStephanKuhn/PyBaMM
d90636a755b7b77bbc75ae7bc2728c8ee2fa730a
[ "BSD-3-Clause" ]
1
2020-06-22T10:11:40.000Z
2020-06-22T10:11:40.000Z
examples/notebooks/Creating Models/2-a-pde-model.ipynb
YannickNoelStephanKuhn/PyBaMM
d90636a755b7b77bbc75ae7bc2728c8ee2fa730a
[ "BSD-3-Clause" ]
1
2021-01-23T08:54:49.000Z
2021-01-23T08:54:49.000Z
examples/notebooks/Creating Models/2-a-pde-model.ipynb
YannickNoelStephanKuhn/PyBaMM
d90636a755b7b77bbc75ae7bc2728c8ee2fa730a
[ "BSD-3-Clause" ]
2
2020-05-21T23:16:29.000Z
2020-06-22T10:11:40.000Z
114.203438
27,664
0.862659
[ [ [ "# Creating a simple PDE model", "_____no_output_____" ], [ "In the [previous notebook](./1-an-ode-model.ipynb) we show how to create, discretise and solve an ODE model in pybamm. In this notebook we show how to create and solve a PDE problem, which will require meshing of the spatial domain.\n\nAs an example, we consider the problem of linear diffusion on a unit sphere,\n\\begin{equation*}\n \\frac{\\partial c}{\\partial t} = \\nabla \\cdot (\\nabla c),\n\\end{equation*}\nwith the following boundary and initial conditions:\n\\begin{equation*}\n \\left.\\frac{\\partial c}{\\partial r}\\right\\vert_{r=0} = 0, \\quad \\left.\\frac{\\partial c}{\\partial r}\\right\\vert_{r=1} = 2, \\quad \\left.c\\right\\vert_{t=0} = 1.\n\\end{equation*}\n\nAs before, we begin by importing the pybamm library into this notebook, along with any other packages we require:\n", "_____no_output_____" ] ], [ [ "%pip install pybamm -q # install PyBaMM if it is not installed\nimport pybamm\nimport numpy as np\nimport matplotlib.pyplot as plt", "Note: you may need to restart the kernel to use updated packages.\n" ] ], [ [ "## Setting up the model", "_____no_output_____" ], [ "As in the previous example, we start with a `pybamm.BaseModel` object and define our model variables. Since we are now solving a PDE we need to tell pybamm the domain each variable belongs to so that it can be discretised in space in the correct way. This is done by passing the keyword argument `domain`, and in this example we choose the domain \"negative particle\".", "_____no_output_____" ] ], [ [ "model = pybamm.BaseModel()\n\nc = pybamm.Variable(\"Concentration\", domain=\"negative particle\")", "_____no_output_____" ] ], [ [ "Note that we have given our variable the (useful) name \"Concentration\", but the symbol representing this variable is simply `c`.", "_____no_output_____" ], [ "We then state out governing equations. Sometime it is useful to define intermediate quantities in order to express the governing equations more easily. In this example we define the flux, then define the rhs to be minus the divergence of the flux. The equation is then added to the dictionary `model.rhs`", "_____no_output_____" ] ], [ [ "N = -pybamm.grad(c) # define the flux\ndcdt = -pybamm.div(N) # define the rhs equation\n\nmodel.rhs = {c: dcdt} # add the equation to rhs dictionary", "_____no_output_____" ] ], [ [ "Unlike ODE models, PDE models require both initial and boundary conditions. Similar to initial conditions, boundary conditions can be added using the dictionary `model.boundary_conditions`. Boundary conditions for each variable are provided as a dictionary of the form `{side: (value, type)`, where, in 1D, side can be \"left\" or \"right\", value is the value of the boundary conditions, and type is the type of boundary condition (at present, this can be \"Dirichlet\" or \"Neumann\").", "_____no_output_____" ] ], [ [ "# initial conditions\nmodel.initial_conditions = {c: pybamm.Scalar(1)}\n\n# boundary conditions\nlbc = pybamm.Scalar(0)\nrbc = pybamm.Scalar(2)\nmodel.boundary_conditions = {c: {\"left\": (lbc, \"Neumann\"), \"right\": (rbc, \"Neumann\")}}", "_____no_output_____" ] ], [ [ "Note that in our example the boundary conditions take constant values, but the value can be any valid pybamm expression.\n\nFinally, we add any variables of interest to the dictionary `model.variables`", "_____no_output_____" ] ], [ [ "model.variables = {\"Concentration\": c, \"Flux\": N}", "_____no_output_____" ] ], [ [ "## Using the model", "_____no_output_____" ], [ "Now the model is now completely defined all that remains is to discretise and solve. Since this model is a PDE we need to define the geometry on which it will be solved, and choose how to mesh the geometry and discretise in space.", "_____no_output_____" ], [ "### Defining a geometry and mesh\n\nWe can define spatial variables in a similar way to how we defined model variables, providing a domain and a coordinate system. The geometry on which we wish to solve the model is defined using a nested dictionary. The first key is the domain name (here \"negative particle\") and the entry is a dictionary giving the limits of the domain.", "_____no_output_____" ] ], [ [ "# define geometry\nr = pybamm.SpatialVariable(\n \"r\", domain=[\"negative particle\"], coord_sys=\"spherical polar\"\n)\ngeometry = {\"negative particle\": {r: {\"min\": pybamm.Scalar(0), \"max\": pybamm.Scalar(1)}}}", "_____no_output_____" ] ], [ [ "We then create a mesh using the `pybamm.MeshGenerator` class. As inputs this class takes the type of mesh and any parameters required by the mesh. In this case we choose a uniform one-dimensional mesh which doesn't require any parameters. ", "_____no_output_____" ] ], [ [ "# mesh and discretise\nsubmesh_types = {\"negative particle\": pybamm.MeshGenerator(pybamm.Uniform1DSubMesh)}\nvar_pts = {r: 20}\nmesh = pybamm.Mesh(geometry, submesh_types, var_pts)", "_____no_output_____" ] ], [ [ "Example of meshes that do require parameters include the `pybamm.Exponential1DSubMesh` which clusters points close to one or both boundaries using an exponential rule. It takes a parameter which sets how closely the points are clustered together, and also lets the users select the side on which more points should be clustered. For example, to create a mesh with more nodes clustered to the right (i.e. the surface in the particle problem), using a stretch factor of 2, we pass an instance of the exponential submesh class and a dictionary of parameters into the `MeshGenerator` class as follows: `pybamm.MeshGenerator(pybamm.Exponential1DSubMesh, submesh_params={\"side\": \"right\", \"stretch\": 2})`", "_____no_output_____" ], [ "After defining a mesh we choose a spatial method. Here we choose the Finite Volume Method. We then set up a discretisation by passing the mesh and spatial methods to the class `pybamm.Discretisation`. The model is then processed, turning the variables into (slices of) a statevector, spatial variables into vector and spatial operators into matrix-vector multiplications.", "_____no_output_____" ] ], [ [ "spatial_methods = {\"negative particle\": pybamm.FiniteVolume()}\ndisc = pybamm.Discretisation(mesh, spatial_methods)\ndisc.process_model(model);", "_____no_output_____" ] ], [ [ "Now that the model has been discretised we are ready to solve. ", "_____no_output_____" ], [ "### Solving the model", "_____no_output_____" ], [ "As before, we choose a solver and times at which we want the solution returned. We then solve, extract the variables we are interested in, and plot the result.", "_____no_output_____" ] ], [ [ "# solve\nsolver = pybamm.ScipySolver()\nt = np.linspace(0, 1, 100)\nsolution = solver.solve(model, t)\n\n# post-process, so that the solution can be called at any time t or space r\n# (using interpolation)\nc = solution[\"Concentration\"]\n\n# plot\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 4))\n\nax1.plot(solution.t, c(solution.t, r=1))\nax1.set_xlabel(\"t\")\nax1.set_ylabel(\"Surface concentration\")\nr = np.linspace(0, 1, 100)\nax2.plot(r, c(t=0.5, r=r))\nax2.set_xlabel(\"r\")\nax2.set_ylabel(\"Concentration at t=0.5\")\nplt.tight_layout()\nplt.show()", "2021-01-24 19:28:37,091 - [WARNING] processed_variable.get_spatial_scale(518): No length scale set for negative particle. Using default of 1 [m].\n" ] ], [ [ "In the [next notebook](./3-negative-particle-problem.ipynb) we build on the example here to to solve the problem of diffusion in the negative electrode particle within the single particle model. In doing so we will also cover how to include parameters in a model. ", "_____no_output_____" ], [ "## References\n\nThe relevant papers for this notebook are:", "_____no_output_____" ] ], [ [ "pybamm.print_citations()", "[1] Joel A. E. Andersson, Joris Gillis, Greg Horn, James B. Rawlings, and Moritz Diehl. CasADi – A software framework for nonlinear optimization and optimal control. Mathematical Programming Computation, 11(1):1–36, 2019. doi:10.1007/s12532-018-0139-4.\n[2] Charles R. Harris, K. Jarrod Millman, Stéfan J. van der Walt, Ralf Gommers, Pauli Virtanen, David Cournapeau, Eric Wieser, Julian Taylor, Sebastian Berg, Nathaniel J. Smith, and others. Array programming with NumPy. Nature, 585(7825):357–362, 2020. doi:10.1038/s41586-020-2649-2.\n[3] Valentin Sulzer, Scott G. Marquis, Robert Timms, Martin Robinson, and S. Jon Chapman. Python Battery Mathematical Modelling (PyBaMM). ECSarXiv. February, 2020. doi:10.1149/osf.io/67ckj.\n[4] Pauli Virtanen, Ralf Gommers, Travis E. Oliphant, Matt Haberland, Tyler Reddy, David Cournapeau, Evgeni Burovski, Pearu Peterson, Warren Weckesser, Jonathan Bright, and others. SciPy 1.0: fundamental algorithms for scientific computing in Python. Nature Methods, 17(3):261–272, 2020. doi:10.1038/s41592-019-0686-2.\n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d0a7f3db4283cd48562f203883c68bbfe7f64d12
13,280
ipynb
Jupyter Notebook
02b_Procedural_Python/python_functions.ipynb
martaw22/LectureNotes
a30a766ec2d371f0132fe9a10be5c6ea7a0fea83
[ "BSD-2-Clause" ]
1
2021-09-23T21:49:27.000Z
2021-09-23T21:49:27.000Z
02b_Procedural_Python/python_functions.ipynb
smturbev/LectureNotes
ca50d829cbb2b59a235bbfb45e4183ba2b564106
[ "BSD-2-Clause" ]
null
null
null
02b_Procedural_Python/python_functions.ipynb
smturbev/LectureNotes
ca50d829cbb2b59a235bbfb45e4183ba2b564106
[ "BSD-2-Clause" ]
1
2020-05-28T20:13:56.000Z
2020-05-28T20:13:56.000Z
35.891892
1,322
0.570181
[ [ [ "# Writing Functions\n\nThis lecture discusses the mechanics of writing functions and how to encapsulate scripts as functions.", "_____no_output_____" ] ], [ [ "# Example: We're going to use Pandas dataframes to create a gradebook for this course\n\nimport pandas as pd\n\n# Student Rosters:\nstudents = ['Hao', 'Jennifer', 'Alex']\n\n# Gradebook columns:\ncolumns = ['raw_grade', 'did_extra_credit', 'final_grade']\n\n# Let's create two dataframes, one for each class section\ngradebook = pd.DataFrame(index=students, columns=columns)\n\nprint(\"Gradebook:\")\nprint(gradebook)", "Gradebook:\n raw_grade did_extra_credit final_grade\nHao NaN NaN NaN\nJennifer NaN NaN NaN\nAlex NaN NaN NaN\n" ], [ "# Now let's add some data\n# (in real life we might load this from a CSV or other file)\ngradebook.loc['Hao']['raw_grade'] = 80\ngradebook.loc['Hao']['did_extra_credit'] = True # python supports boolean (True/False) values\ngradebook.loc['Jennifer']['raw_grade'] = 98\ngradebook.loc['Jennifer']['did_extra_credit'] = False\ngradebook.loc['Alex']['raw_grade'] = 85\ngradebook.loc['Alex']['did_extra_credit'] = True\n \nprint(\"Gradebook:\")\nprint(gradebook)", "Gradebook:\n raw_grade did_extra_credit final_grade\nHao 80 True NaN\nJennifer 98 False NaN\nAlex 85 True NaN\n" ] ], [ [ "## Copying and pasting code can introduce bugs: \nYou might forget to change a variable name. \n\nIf you later make a change (like making extra credit worth 10 points instead of 5), you need to remember to change it in multiple places.\n\nIf we put the code in a function, we can avoid these problems!", "_____no_output_____" ] ], [ [ "# Let's put our extra credit code in a function!\n\ndef add_final_grades(student, grade):\n print(\"in add_final_grades\")\n gradebook.loc[student, 'final_grade'] = grade\n\nadd_final_grades('Jennifer', 99)\nprint(gradebook)", "in add_final_grades\n raw_grade did_extra_credit final_grade\nHao 80 True NaN\nJennifer 98 False 99\nAlex 85 True 90\n" ] ], [ [ "## Why write functions?\n1. Easily reuse code (without introducing bugs)\n2. Easy testing of components\n <ul>\n <li>Later in the course we will learn about writing unit tests. You will create a set of input values for a function representing potential scenarios, and will test that the function is generating the expected output.\n </ul>\n3. Better readability\n <ul>\n <li>Functions encapsulate your code into components with meaningful names. You can get a high-level view of what the code is doing, then dive into the function definitions if you need more detail. \n </ul>", "_____no_output_____" ], [ "## A function should have one task\n\nFunctions should usually be pretty short. \n\nIt's good to think about functions as trying to do one single thing.", "_____no_output_____" ], [ "## Mechanics of Writing a Function\n- Function definition line - How python knows that this is a function\n- Function body - Code that does the computation of the function\n- Arguments - the values passed to a function\n- Formal parameters - the values accepted by the function\n (the arguments become the formal parameters once they are inside the function)\n- Return values - value returned to the caller\n\n\nIf you are familiar with other languages like Java, you may have needed to declare the types of the parameters and return value. This is not necessary in Python.", "_____no_output_____" ] ], [ [ "\ndef example_addition_function(num_1, num_2):\n \"\"\"\n This function adds two numbers.\n \n example_addition_function is the function name\n\n Parameters:\n num_1: This is the first formal parameter\n num_2: This is the second formal parameter\n\n Returns:\n sum of num_1 and num_2\n \n \"\"\" \n added_value = num_1 + num_2\n return added_value\n\narg_1 = 5\narg_2 = 10\nresult_value = example_addition_function(arg_1, arg_2) # arg_1 and arg_2 are the arguments to the function\n ", "_____no_output_____" ] ], [ [ "# Variable names and scope\n\nIn Python, variables have a scope (a context in which they are valid). \n\nVariables created in a function cannot be referenced outside of a function", "_____no_output_____" ] ], [ [ "# Let's put our extra credit code in a function!\nsection = \"Section 1\"\ndef add_final_grades(student, grade):\n print(\"in add_final_grades %s\" % section)\n gradebook.loc[student, 'final_grade'] = grade\n\nadd_final_grades('Jennifer', 99)\nprint(gradebook)", "in add_final_grades Section 1\n raw_grade did_extra_credit final_grade\nHao 80 True NaN\nJennifer 98 False 99\nAlex 85 True 90\n" ], [ "# Let's put our extra credit code in a function!\nsection = \"Section 1\"\ndef add_final_grades(student, grade):\n print(\"in add_final_grades %s\" % section)\n gradebook.loc[student, 'final_grade'] = grade\n if False:\n section = \"new\"\n\nadd_final_grades('Jennifer', 99)\nprint(gradebook)", "_____no_output_____" ], [ "def print_message(message):\n message_to_print = \"Here is your message: \" + message\n print(message_to_print)\n \nmy_message = \"Hello, class!\"\nprint_message(my_message)\n\n#print(message_to_print) # this will cause an error. This variable only exists within the function.", "_____no_output_____" ] ], [ [ "If you modify an object (like a list or a dataframe) inside of a function, the modifications will affect its value outside of the function", "_____no_output_____" ] ], [ [ "def add_name_to_list(name_list, new_name):\n name_list.append(new_name)\n \nteachers = [\"Bernease\", \"Dave\", \"Joe\"]\nprint(teachers)\nadd_name_to_list(teachers, \"Colin\")\nprint(teachers)", "_____no_output_____" ] ], [ [ "## Exercise: Write a function to determine if a number is prime\n\nBelow is some code that checks if a number is prime. The code has a bug in it!\n", "_____no_output_____" ] ], [ [ "\n# Determine if num is prime\n# This code has a bug. What is it?\n# Also, the efficiency of the code can be improved. How?\n\nnum = 3\nis_prime = True\n\nfor integer in range(1, num):\n if num % integer == 0: \n # The \"==\" operator checks for equality and returns True or False. \n # Note the difference between \"==\" and \"=\", which assigns a value to a variable.\n #\n # The \"%\" operator calculates the remainder of a division operation\n # if the remainder is zero, integer is a divisor of num, so num is not prime\n print(\"Not prime!\")\n is_prime = False\n\nif is_prime:\n print(\"Is prime!\")\n", "_____no_output_____" ] ], [ [ "Once you've identified the bug in the above code, take the code and turn it into a function that takes a number as input and returns True if the number is prime and False if it is not.\n\nSee if you can find any ways to make the code more efficient.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d0a80389261e39e2ca3808ce4c61125f14a4b451
87,401
ipynb
Jupyter Notebook
Sesion2/1relaciones_2variables.ipynb
esjimenezro/pda_course
8129a31722e131c9274f7e28f6c07edb9b10fc34
[ "MIT" ]
null
null
null
Sesion2/1relaciones_2variables.ipynb
esjimenezro/pda_course
8129a31722e131c9274f7e28f6c07edb9b10fc34
[ "MIT" ]
null
null
null
Sesion2/1relaciones_2variables.ipynb
esjimenezro/pda_course
8129a31722e131c9274f7e28f6c07edb9b10fc34
[ "MIT" ]
null
null
null
151.212803
15,668
0.898765
[ [ [ "# Identificando y modelando relaciones entre pares de variables\n\n![correlation](https://static.thenounproject.com/png/1569699-200.png)\n\n> En la sesión anterior introdujimos el lenguaje de programación Python, y la librería de análisis de datos para Python **Pandas**. Con Pandas, aprendimos a:\n - Cargar datos desde archivos.\n - Manipular los datos de manera básica:\n - Obtener ciertos registros.\n - Obtener ciertas columnas.\n - Obtener registros basados en condiciones.\n - Calcular estadísticas agregadas de cada variable (media, mediana, desviación estándar).\n \n> Con las estadísticas de cada variable, podemos darnos una idea del comportamiento de cada variable en particular; sin embargo, no podemos inferir relaciones de una variable con otra. En esta sesión, revisaremos métodos prácticos para identificar relaciones entre variables, y además estudiaremos un método para modelar dichas relaciones de manera lineal.\n___", "_____no_output_____" ], [ "# 1. Coeficiente de correlación\n\nEl coeficiente de correlación mide qué tan fuerte es la relación entre dos variables. Dejaremos de lado la forma de calcularlo y nos concentraremos más en sus propiedades y la interpretación que podemos darle:\n\n- La correlación entre dos variables cualesquiera $x,y$ satisface: $$-1\\leq\\rho_{xy}\\leq 1.$$\n- La correlación de una variable con ella misma es igual a uno (1): $$\\rho_{xx}=1.$$\n- La correlación es simétrica: $$\\rho_{xy} = \\rho_{yx}.$$", "_____no_output_____" ] ], [ [ "# Importar numpy\nimport numpy as np", "_____no_output_____" ], [ "# Importar pyplot\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "- Una correlación positiva $\\rho_{xy}>0$ indica que los movimientos relativos entre las variables $x,y$ van en la misma dirección.", "_____no_output_____" ] ], [ [ "# Variables con correlación positiva\nx = np.random.rand(100)\ny = x + 0.5 * np.random.rand(100)\nplt.scatter(x, y)\nplt.xlabel('x')\nplt.ylabel('y')", "_____no_output_____" ], [ "# Correlación\nnp.corrcoef(x, y)", "_____no_output_____" ] ], [ [ "- Una correlación negativa $\\rho_{xy}<0$ indica que los movimientos relativos entre las variables $x,y$ van en direcciones opuestas.", "_____no_output_____" ] ], [ [ "# Variables con correlación negativa\nx = np.random.rand(100)\ny = 1 - x + 0.5 * np.random.rand(100)\nplt.scatter(x, y)\nplt.xlabel('x')\nplt.ylabel('y')", "_____no_output_____" ], [ "# Correlación\nnp.corrcoef(x, y)", "_____no_output_____" ] ], [ [ "- Una correlación nula $\\rho_{xy}=0$ indica que no hay una relación aparente entre las variables $x, y$.", "_____no_output_____" ] ], [ [ "# Variables con correlación nula\nx = np.random.rand(100)\ny = 0.5 * np.random.rand(100)\nplt.scatter(x, y)\nplt.xlabel('x')\nplt.ylabel('y')", "_____no_output_____" ], [ "# Correlación\nnp.corrcoef(x, y)", "_____no_output_____" ] ], [ [ "Ya que entendimos el coeficiente de correlación, utilicémoslo para descubrir la relación que hay entre el precio de las casas y su tamaño:", "_____no_output_____" ] ], [ [ "# Importar pandas\nimport pandas as pd", "_____no_output_____" ], [ "# Cargamos datos de las casas\ndata = pd.read_csv(\"house_pricing.csv\")", "_____no_output_____" ], [ "# Gráfico precio vs. tamaño\nplt.scatter(data['size'], data['price'])\nplt.xlabel('Tamaño ($ft^2$)')\nplt.ylabel('Precio (USD)')", "_____no_output_____" ], [ "# Correlación\nnp.corrcoef(data['size'], data['price'])", "_____no_output_____" ] ], [ [ "# 2. Regresión lineal con una variable\n\nUna vez hemos identificado la relación entre dos variables, ¿Cómo podemos modelar esta relación?\n\nLa respuesta es: **regresión lineal**.", "_____no_output_____" ], [ "En términos simples, el objetivo de la regresión lineal es encontrar un modelo de una recta:\n\n$$\ny = m x + b\n$$\n\nque **\"mejor\" (en el sentido de mínimos cuadrados) se ajuste a los puntos**. En otras palabras, el objetivo es encontrar el modelo de una recta que \"aprenda\" los datos.", "_____no_output_____" ], [ "Matemáticamente, si los puntos son\n\n$$\n\\{(x_1, y_1), (x_2, y_2), \\dots, (x_n, y_n)\\},\n$$\n\nlo que queremos es estimar los valores de los parámetros $m$ y $b$ que minimizan la siguiente función:\n\n$$\nJ(m, b) = \\sum_{i=1}^{n}(y_i - (m x_i + b))^2\n$$", "_____no_output_____" ], [ "### ¿Cómo hacemos esto en python?\n\nMediante la clase `LinearRegression` de la librería `sklearn`:", "_____no_output_____" ] ], [ [ "# Importar sklearn.linear_model.LinearRegression\nfrom sklearn.linear_model import LinearRegression", "_____no_output_____" ], [ "# Ajustar la mejor recta a los datos de tamaños y precios\nmodel = LinearRegression().fit(data[['size']], data['price'])", "_____no_output_____" ], [ "# Obtener parámetros ajustados\nm = model.coef_\nb = model.intercept_\nm, b", "_____no_output_____" ], [ "# Gráfico de datos y de modelo ajustado\nplt.plot(data['size'], model.predict(data[['size']]))\nplt.scatter(data['size'], data['price'])\nplt.xlabel('Tamaño ($ft^2$)')\nplt.ylabel('Precio (USD)')", "_____no_output_____" ] ], [ [ "### ¿Y esto para qué nos sirve? Sistema automático de avalúos\n\nSupongamos que un amigo nuestro tiene una casa de $2000$ pies cuadrados en Portland, y desea saber a qué precio la podría vender. Se acaba de enterar que justo acabamos de generar un sistema automático que determina el precio de las casas de Portland usando el tamaño de las mismas.\n\n¿A qué precio podría vender nuestro amigo la casa?", "_____no_output_____" ] ], [ [ "# Determinar el precio\nmodel.predict([[2000]])", "_____no_output_____" ], [ "# Gráfico\nplt.plot(data['size'], model.predict(data[['size']]))\nplt.plot(2000, model.predict([[2000]]), '*r', ms=10)\nplt.scatter(data['size'], data['price'])\nplt.xlabel('Tamaño ($ft^2$)')\nplt.ylabel('Precio (USD)')", "_____no_output_____" ] ], [ [ "<script>\n $(document).ready(function(){\n $('div.prompt').hide();\n $('div.back-to-top').hide();\n $('nav#menubar').hide();\n $('.breadcrumb').hide();\n $('.hidden-print').hide();\n });\n</script>\n\n<footer id=\"attribution\" style=\"float:right; color:#808080; background:#fff;\">\nCreated with Jupyter by Esteban Jiménez Rodríguez.\n</footer>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d0a8072bf214f94f72f7f9fe4505035aec112cc8
63,747
ipynb
Jupyter Notebook
d2l/tensorflow/chapter_linear-networks/linear-regression.ipynb
541979210/xdf
ab99d242fbabe56a7b4d7723605cc17aab7888b2
[ "Apache-2.0" ]
2
2021-12-11T07:19:34.000Z
2022-03-11T09:29:49.000Z
d2l/tensorflow/chapter_linear-networks/linear-regression.ipynb
541979210/xdf
ab99d242fbabe56a7b4d7723605cc17aab7888b2
[ "Apache-2.0" ]
null
null
null
d2l/tensorflow/chapter_linear-networks/linear-regression.ipynb
541979210/xdf
ab99d242fbabe56a7b4d7723605cc17aab7888b2
[ "Apache-2.0" ]
null
null
null
38.217626
598
0.499867
[ [ [ "# 线性回归\n:label:`sec_linear_regression`\n\n*回归*(regression)是能为一个或多个自变量与因变量之间关系建模的一类方法。\n在自然科学和社会科学领域,回归经常用来表示输入和输出之间的关系。\n\n在机器学习领域中的大多数任务通常都与*预测*(prediction)有关。\n当我们想预测一个数值时,就会涉及到回归问题。\n常见的例子包括:预测价格(房屋、股票等)、预测住院时间(针对住院病人等)、\n预测需求(零售销量等)。\n但不是所有的*预测*都是回归问题。\n在后面的章节中,我们将介绍分类问题。分类问题的目标是预测数据属于一组类别中的哪一个。\n\n## 线性回归的基本元素\n\n*线性回归*(linear regression)可以追溯到19世纪初,\n它在回归的各种标准工具中最简单而且最流行。\n线性回归基于几个简单的假设:\n首先,假设自变量$\\mathbf{x}$和因变量$y$之间的关系是线性的,\n即$y$可以表示为$\\mathbf{x}$中元素的加权和,这里通常允许包含观测值的一些噪声;\n其次,我们假设任何噪声都比较正常,如噪声遵循正态分布。\n\n为了解释*线性回归*,我们举一个实际的例子:\n我们希望根据房屋的面积(平方英尺)和房龄(年)来估算房屋价格(美元)。\n为了开发一个能预测房价的模型,我们需要收集一个真实的数据集。\n这个数据集包括了房屋的销售价格、面积和房龄。\n在机器学习的术语中,该数据集称为*训练数据集*(training data set)\n或*训练集*(training set)。\n每行数据(比如一次房屋交易相对应的数据)称为*样本*(sample),\n也可以称为*数据点*(data point)或*数据样本*(data instance)。\n我们把试图预测的目标(比如预测房屋价格)称为*标签*(label)或*目标*(target)。\n预测所依据的自变量(面积和房龄)称为*特征*(feature)或*协变量*(covariate)。\n\n通常,我们使用$n$来表示数据集中的样本数。\n对索引为$i$的样本,其输入表示为$\\mathbf{x}^{(i)} = [x_1^{(i)}, x_2^{(i)}]^\\top$,\n其对应的标签是$y^{(i)}$。\n\n### 线性模型\n:label:`subsec_linear_model`\n\n线性假设是指目标(房屋价格)可以表示为特征(面积和房龄)的加权和,如下面的式子:\n\n$$\\mathrm{price} = w_{\\mathrm{area}} \\cdot \\mathrm{area} + w_{\\mathrm{age}} \\cdot \\mathrm{age} + b.$$\n:eqlabel:`eq_price-area`\n\n :eqref:`eq_price-area`中的$w_{\\mathrm{area}}$和$w_{\\mathrm{age}}$\n称为*权重*(weight),权重决定了每个特征对我们预测值的影响。\n$b$称为*偏置*(bias)、*偏移量*(offset)或*截距*(intercept)。\n偏置是指当所有特征都取值为0时,预测值应该为多少。\n即使现实中不会有任何房子的面积是0或房龄正好是0年,我们仍然需要偏置项。\n如果没有偏置项,我们模型的表达能力将受到限制。\n严格来说, :eqref:`eq_price-area`是输入特征的一个\n*仿射变换*(affine transformation)。\n仿射变换的特点是通过加权和对特征进行*线性变换*(linear transformation),\n并通过偏置项来进行*平移*(translation)。\n\n给定一个数据集,我们的目标是寻找模型的权重$\\mathbf{w}$和偏置$b$,\n使得根据模型做出的预测大体符合数据里的真实价格。\n输出的预测值由输入特征通过*线性模型*的仿射变换决定,仿射变换由所选权重和偏置确定。\n\n而在机器学习领域,我们通常使用的是高维数据集,建模时采用线性代数表示法会比较方便。\n当我们的输入包含$d$个特征时,我们将预测结果$\\hat{y}$\n(通常使用“尖角”符号表示$y$的估计值)表示为:\n\n$$\\hat{y} = w_1 x_1 + ... + w_d x_d + b.$$\n\n将所有特征放到向量$\\mathbf{x} \\in \\mathbb{R}^d$中,\n并将所有权重放到向量$\\mathbf{w} \\in \\mathbb{R}^d$中,\n我们可以用点积形式来简洁地表达模型:\n\n$$\\hat{y} = \\mathbf{w}^\\top \\mathbf{x} + b.$$\n:eqlabel:`eq_linreg-y`\n\n在 :eqref:`eq_linreg-y`中,\n向量$\\mathbf{x}$对应于单个数据样本的特征。\n用符号表示的矩阵$\\mathbf{X} \\in \\mathbb{R}^{n \\times d}$\n可以很方便地引用我们整个数据集的$n$个样本。\n其中,$\\mathbf{X}$的每一行是一个样本,每一列是一种特征。\n\n对于特征集合$\\mathbf{X}$,预测值$\\hat{\\mathbf{y}} \\in \\mathbb{R}^n$\n可以通过矩阵-向量乘法表示为:\n\n$${\\hat{\\mathbf{y}}} = \\mathbf{X} \\mathbf{w} + b$$\n\n这个过程中的求和将使用广播机制\n(广播机制在 :numref:`subsec_broadcasting`中有详细介绍)。\n给定训练数据特征$\\mathbf{X}$和对应的已知标签$\\mathbf{y}$,\n线性回归的目标是找到一组权重向量$\\mathbf{w}$和偏置$b$:\n当给定从$\\mathbf{X}$的同分布中取样的新样本特征时,\n这组权重向量和偏置能够使得新样本预测标签的误差尽可能小。\n\n虽然我们相信给定$\\mathbf{x}$预测$y$的最佳模型会是线性的,\n但我们很难找到一个有$n$个样本的真实数据集,其中对于所有的$1 \\leq i \\leq n$,$y^{(i)}$完全等于$\\mathbf{w}^\\top \\mathbf{x}^{(i)}+b$。\n无论我们使用什么手段来观察特征$\\mathbf{X}$和标签$\\mathbf{y}$,\n都可能会出现少量的观测误差。\n因此,即使确信特征与标签的潜在关系是线性的,\n我们也会加入一个噪声项来考虑观测误差带来的影响。\n\n在开始寻找最好的*模型参数*(model parameters)$\\mathbf{w}$和$b$之前,\n我们还需要两个东西:\n(1)一种模型质量的度量方式;\n(2)一种能够更新模型以提高模型预测质量的方法。\n\n### 损失函数\n\n在我们开始考虑如何用模型*拟合*(fit)数据之前,我们需要确定一个拟合程度的度量。\n*损失函数*(loss function)能够量化目标的*实际*值与*预测*值之间的差距。\n通常我们会选择非负数作为损失,且数值越小表示损失越小,完美预测时的损失为0。\n回归问题中最常用的损失函数是平方误差函数。\n当样本$i$的预测值为$\\hat{y}^{(i)}$,其相应的真实标签为$y^{(i)}$时,\n平方误差可以定义为以下公式:\n\n$$l^{(i)}(\\mathbf{w}, b) = \\frac{1}{2} \\left(\\hat{y}^{(i)} - y^{(i)}\\right)^2.$$\n:eqlabel:`eq_mse`\n\n常数$\\frac{1}{2}$不会带来本质的差别,但这样在形式上稍微简单一些\n(因为当我们对损失函数求导后常数系数为1)。\n由于训练数据集并不受我们控制,所以经验误差只是关于模型参数的函数。\n为了进一步说明,来看下面的例子。\n我们为一维情况下的回归问题绘制图像,如 :numref:`fig_fit_linreg`所示。\n\n![用线性模型拟合数据。](../img/fit-linreg.svg)\n:label:`fig_fit_linreg`\n\n由于平方误差函数中的二次方项,\n估计值$\\hat{y}^{(i)}$和观测值$y^{(i)}$之间较大的差异将导致更大的损失。\n为了度量模型在整个数据集上的质量,我们需计算在训练集$n$个样本上的损失均值(也等价于求和)。\n\n$$L(\\mathbf{w}, b) =\\frac{1}{n}\\sum_{i=1}^n l^{(i)}(\\mathbf{w}, b) =\\frac{1}{n} \\sum_{i=1}^n \\frac{1}{2}\\left(\\mathbf{w}^\\top \\mathbf{x}^{(i)} + b - y^{(i)}\\right)^2.$$\n\n在训练模型时,我们希望寻找一组参数($\\mathbf{w}^*, b^*$),\n这组参数能最小化在所有训练样本上的总损失。如下式:\n\n$$\\mathbf{w}^*, b^* = \\operatorname*{argmin}_{\\mathbf{w}, b}\\ L(\\mathbf{w}, b).$$\n\n### 解析解\n\n线性回归刚好是一个很简单的优化问题。\n与我们将在本书中所讲到的其他大部分模型不同,线性回归的解可以用一个公式简单地表达出来,\n这类解叫作解析解(analytical solution)。\n首先,我们将偏置$b$合并到参数$\\mathbf{w}$中,合并方法是在包含所有参数的矩阵中附加一列。\n我们的预测问题是最小化$\\|\\mathbf{y} - \\mathbf{X}\\mathbf{w}\\|^2$。\n这在损失平面上只有一个临界点,这个临界点对应于整个区域的损失极小点。\n将损失关于$\\mathbf{w}$的导数设为0,得到解析解:\n\n$$\\mathbf{w}^* = (\\mathbf X^\\top \\mathbf X)^{-1}\\mathbf X^\\top \\mathbf{y}.$$\n\n像线性回归这样的简单问题存在解析解,但并不是所有的问题都存在解析解。\n解析解可以进行很好的数学分析,但解析解对问题的限制很严格,导致它无法广泛应用在深度学习里。\n\n### 随机梯度下降\n\n即使在我们无法得到解析解的情况下,我们仍然可以有效地训练模型。\n在许多任务上,那些难以优化的模型效果要更好。\n因此,弄清楚如何训练这些难以优化的模型是非常重要的。\n\n本书中我们用到一种名为*梯度下降*(gradient descent)的方法,\n这种方法几乎可以优化所有深度学习模型。\n它通过不断地在损失函数递减的方向上更新参数来降低误差。\n\n梯度下降最简单的用法是计算损失函数(数据集中所有样本的损失均值)\n关于模型参数的导数(在这里也可以称为梯度)。\n但实际中的执行可能会非常慢:因为在每一次更新参数之前,我们必须遍历整个数据集。\n因此,我们通常会在每次需要计算更新的时候随机抽取一小批样本,\n这种变体叫做*小批量随机梯度下降*(minibatch stochastic gradient descent)。\n\n在每次迭代中,我们首先随机抽样一个小批量$\\mathcal{B}$,\n它是由固定数量的训练样本组成的。\n然后,我们计算小批量的平均损失关于模型参数的导数(也可以称为梯度)。\n最后,我们将梯度乘以一个预先确定的正数$\\eta$,并从当前参数的值中减掉。\n\n我们用下面的数学公式来表示这一更新过程($\\partial$表示偏导数):\n\n$$(\\mathbf{w},b) \\leftarrow (\\mathbf{w},b) - \\frac{\\eta}{|\\mathcal{B}|} \\sum_{i \\in \\mathcal{B}} \\partial_{(\\mathbf{w},b)} l^{(i)}(\\mathbf{w},b).$$\n\n总结一下,算法的步骤如下:\n(1)初始化模型参数的值,如随机初始化;\n(2)从数据集中随机抽取小批量样本且在负梯度的方向上更新参数,并不断迭代这一步骤。\n对于平方损失和仿射变换,我们可以明确地写成如下形式:\n\n$$\\begin{aligned} \\mathbf{w} &\\leftarrow \\mathbf{w} - \\frac{\\eta}{|\\mathcal{B}|} \\sum_{i \\in \\mathcal{B}} \\partial_{\\mathbf{w}} l^{(i)}(\\mathbf{w}, b) = \\mathbf{w} - \\frac{\\eta}{|\\mathcal{B}|} \\sum_{i \\in \\mathcal{B}} \\mathbf{x}^{(i)} \\left(\\mathbf{w}^\\top \\mathbf{x}^{(i)} + b - y^{(i)}\\right),\\\\ b &\\leftarrow b - \\frac{\\eta}{|\\mathcal{B}|} \\sum_{i \\in \\mathcal{B}} \\partial_b l^{(i)}(\\mathbf{w}, b) = b - \\frac{\\eta}{|\\mathcal{B}|} \\sum_{i \\in \\mathcal{B}} \\left(\\mathbf{w}^\\top \\mathbf{x}^{(i)} + b - y^{(i)}\\right). \\end{aligned}$$\n:eqlabel:`eq_linreg_batch_update`\n\n公式 :eqref:`eq_linreg_batch_update`中的$\\mathbf{w}$和$\\mathbf{x}$都是向量。\n在这里,更优雅的向量表示法比系数表示法(如$w_1, w_2, \\ldots, w_d$)更具可读性。\n$|\\mathcal{B}|$表示每个小批量中的样本数,这也称为*批量大小*(batch size)。\n$\\eta$表示*学习率*(learning rate)。\n批量大小和学习率的值通常是手动预先指定,而不是通过模型训练得到的。\n这些可以调整但不在训练过程中更新的参数称为*超参数*(hyperparameter)。\n*调参*(hyperparameter tuning)是选择超参数的过程。\n超参数通常是我们根据训练迭代结果来调整的,\n而训练迭代结果是在独立的*验证数据集*(validation dataset)上评估得到的。\n\n在训练了预先确定的若干迭代次数后(或者直到满足某些其他停止条件后),\n我们记录下模型参数的估计值,表示为$\\hat{\\mathbf{w}}, \\hat{b}$。\n但是,即使我们的函数确实是线性的且无噪声,这些估计值也不会使损失函数真正地达到最小值。\n因为算法会使得损失向最小值缓慢收敛,但却不能在有限的步数内非常精确地达到最小值。\n\n线性回归恰好是一个在整个域中只有一个最小值的学习问题。\n但是对于像深度神经网络这样复杂的模型来说,损失平面上通常包含多个最小值。\n深度学习实践者很少会去花费大力气寻找这样一组参数,使得在*训练集*上的损失达到最小。\n事实上,更难做到的是找到一组参数,这组参数能够在我们从未见过的数据上实现较低的损失,\n这一挑战被称为*泛化*(generalization)。\n\n### 用模型进行预测\n\n给定“已学习”的线性回归模型$\\hat{\\mathbf{w}}^\\top \\mathbf{x} + \\hat{b}$,\n现在我们可以通过房屋面积$x_1$和房龄$x_2$来估计一个(未包含在训练数据中的)新房屋价格。\n给定特征估计目标的过程通常称为*预测*(prediction)或*推断*(inference)。\n\n本书将尝试坚持使用*预测*这个词。\n虽然*推断*这个词已经成为深度学习的标准术语,但其实*推断*这个词有些用词不当。\n在统计学中,*推断*更多地表示基于数据集估计参数。\n当深度学习从业者与统计学家交谈时,术语的误用经常导致一些误解。\n\n## 矢量化加速\n\n在训练我们的模型时,我们经常希望能够同时处理整个小批量的样本。\n为了实现这一点,需要(**我们对计算进行矢量化,\n从而利用线性代数库,而不是在Python中编写开销高昂的for循环**)。\n", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l", "_____no_output_____" ] ], [ [ "为了说明矢量化为什么如此重要,我们考虑(**对向量相加的两种方法**)。\n我们实例化两个全为1的10000维向量。\n在一种方法中,我们将使用Python的for循环遍历向量;\n在另一种方法中,我们将依赖对`+`的调用。\n", "_____no_output_____" ] ], [ [ "n = 10000\na = tf.ones(n)\nb = tf.ones(n)", "_____no_output_____" ] ], [ [ "由于在本书中我们将频繁地进行运行时间的基准测试,所以[**我们定义一个计时器**]:\n", "_____no_output_____" ] ], [ [ "class Timer: #@save\n \"\"\"记录多次运行时间\"\"\"\n def __init__(self):\n self.times = []\n self.start()\n\n def start(self):\n \"\"\"启动计时器\"\"\"\n self.tik = time.time()\n\n def stop(self):\n \"\"\"停止计时器并将时间记录在列表中\"\"\"\n self.times.append(time.time() - self.tik)\n return self.times[-1]\n\n def avg(self):\n \"\"\"返回平均时间\"\"\"\n return sum(self.times) / len(self.times)\n\n def sum(self):\n \"\"\"返回时间总和\"\"\"\n return sum(self.times)\n\n def cumsum(self):\n \"\"\"返回累计时间\"\"\"\n return np.array(self.times).cumsum().tolist()", "_____no_output_____" ] ], [ [ "现在我们可以对工作负载进行基准测试。\n\n首先,[**我们使用for循环,每次执行一位的加法**]。\n", "_____no_output_____" ] ], [ [ "c = tf.Variable(tf.zeros(n))\ntimer = Timer()\nfor i in range(n):\n c[i].assign(a[i] + b[i])\nf'{timer.stop():.5f} sec'", "_____no_output_____" ] ], [ [ "(**或者,我们使用重载的`+`运算符来计算按元素的和**)。\n", "_____no_output_____" ] ], [ [ "timer.start()\nd = a + b\nf'{timer.stop():.5f} sec'", "_____no_output_____" ] ], [ [ "结果很明显,第二种方法比第一种方法快得多。\n矢量化代码通常会带来数量级的加速。\n另外,我们将更多的数学运算放到库中,而无须自己编写那么多的计算,从而减少了出错的可能性。\n\n## 正态分布与平方损失\n:label:`subsec_normal_distribution_and_squared_loss`\n\n接下来,我们通过对噪声分布的假设来解读平方损失目标函数。\n\n正态分布和线性回归之间的关系很密切。\n正态分布(normal distribution),也称为*高斯分布*(Gaussian distribution),\n最早由德国数学家高斯(Gauss)应用于天文学研究。\n简单的说,若随机变量$x$具有均值$\\mu$和方差$\\sigma^2$(标准差$\\sigma$),其正态分布概率密度函数如下:\n\n$$p(x) = \\frac{1}{\\sqrt{2 \\pi \\sigma^2}} \\exp\\left(-\\frac{1}{2 \\sigma^2} (x - \\mu)^2\\right).$$\n\n下面[**我们定义一个Python函数来计算正态分布**]。\n", "_____no_output_____" ] ], [ [ "def normal(x, mu, sigma):\n p = 1 / math.sqrt(2 * math.pi * sigma**2)\n return p * np.exp(-0.5 / sigma**2 * (x - mu)**2)", "_____no_output_____" ] ], [ [ "我们现在(**可视化正态分布**)。\n", "_____no_output_____" ] ], [ [ "# 再次使用numpy进行可视化\nx = np.arange(-7, 7, 0.01)\n\n# 均值和标准差对\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x',\n ylabel='p(x)', figsize=(4.5, 2.5),\n legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])", "_____no_output_____" ] ], [ [ "就像我们所看到的,改变均值会产生沿$x$轴的偏移,增加方差将会分散分布、降低其峰值。\n\n均方误差损失函数(简称均方损失)可以用于线性回归的一个原因是:\n我们假设了观测中包含噪声,其中噪声服从正态分布。\n噪声正态分布如下式:\n\n$$y = \\mathbf{w}^\\top \\mathbf{x} + b + \\epsilon,$$\n\n其中,$\\epsilon \\sim \\mathcal{N}(0, \\sigma^2)$。\n\n因此,我们现在可以写出通过给定的$\\mathbf{x}$观测到特定$y$的*似然*(likelihood):\n\n$$P(y \\mid \\mathbf{x}) = \\frac{1}{\\sqrt{2 \\pi \\sigma^2}} \\exp\\left(-\\frac{1}{2 \\sigma^2} (y - \\mathbf{w}^\\top \\mathbf{x} - b)^2\\right).$$\n\n现在,根据极大似然估计法,参数$\\mathbf{w}$和$b$的最优值是使整个数据集的*似然*最大的值:\n\n$$P(\\mathbf y \\mid \\mathbf X) = \\prod_{i=1}^{n} p(y^{(i)}|\\mathbf{x}^{(i)}).$$\n\n根据极大似然估计法选择的估计量称为*极大似然估计量*。\n虽然使许多指数函数的乘积最大化看起来很困难,\n但是我们可以在不改变目标的前提下,通过最大化似然对数来简化。\n由于历史原因,优化通常是说最小化而不是最大化。\n我们可以改为*最小化负对数似然*$-\\log P(\\mathbf y \\mid \\mathbf X)$。\n由此可以得到的数学公式是:\n\n$$-\\log P(\\mathbf y \\mid \\mathbf X) = \\sum_{i=1}^n \\frac{1}{2} \\log(2 \\pi \\sigma^2) + \\frac{1}{2 \\sigma^2} \\left(y^{(i)} - \\mathbf{w}^\\top \\mathbf{x}^{(i)} - b\\right)^2.$$\n\n现在我们只需要假设$\\sigma$是某个固定常数就可以忽略第一项,\n因为第一项不依赖于$\\mathbf{w}$和$b$。\n现在第二项除了常数$\\frac{1}{\\sigma^2}$外,其余部分和前面介绍的均方误差是一样的。\n幸运的是,上面式子的解并不依赖于$\\sigma$。\n因此,在高斯噪声的假设下,最小化均方误差等价于对线性模型的极大似然估计。\n\n## 从线性回归到深度网络\n\n到目前为止,我们只谈论了线性模型。\n尽管神经网络涵盖了更多更为丰富的模型,我们依然可以用描述神经网络的方式来描述线性模型,\n从而把线性模型看作一个神经网络。\n首先,我们用“层”符号来重写这个模型。\n\n### 神经网络图\n\n深度学习从业者喜欢绘制图表来可视化模型中正在发生的事情。\n在 :numref:`fig_single_neuron`中,我们将线性回归模型描述为一个神经网络。\n需要注意的是,该图只显示连接模式,即只显示每个输入如何连接到输出,隐去了权重和偏置的值。\n\n![线性回归是一个单层神经网络。](../img/singleneuron.svg)\n:label:`fig_single_neuron`\n\n在 :numref:`fig_single_neuron`所示的神经网络中,输入为$x_1, \\ldots, x_d$,\n因此输入层中的*输入数*(或称为*特征维度*,feature dimensionality)为$d$。\n网络的输出为$o_1$,因此输出层中的*输出数*是1。\n需要注意的是,输入值都是已经给定的,并且只有一个*计算*神经元。\n由于模型重点在发生计算的地方,所以通常我们在计算层数时不考虑输入层。\n也就是说, :numref:`fig_single_neuron`中神经网络的*层数*为1。\n我们可以将线性回归模型视为仅由单个人工神经元组成的神经网络,或称为单层神经网络。\n\n对于线性回归,每个输入都与每个输出(在本例中只有一个输出)相连,\n我们将这种变换( :numref:`fig_single_neuron`中的输出层)\n称为*全连接层*(fully-connected layer)或称为*稠密层*(dense layer)。\n下一章将详细讨论由这些层组成的网络。\n\n### 生物学\n\n线性回归发明的时间(1795年)早于计算神经科学,所以将线性回归描述为神经网络似乎不合适。\n当控制学家、神经生物学家沃伦·麦库洛奇和沃尔特·皮茨开始开发人工神经元模型时,\n他们为什么将线性模型作为一个起点呢?\n我们来看一张图片 :numref:`fig_Neuron`:\n这是一张由*树突*(dendrites,输入终端)、\n*细胞核*(nucleu,CPU)组成的生物神经元图片。\n*轴突*(axon,输出线)和*轴突端子*(axon terminal,输出端子)\n通过*突触*(synapse)与其他神经元连接。\n\n![真实的神经元。](../img/neuron.svg)\n:label:`fig_Neuron`\n\n树突中接收到来自其他神经元(或视网膜等环境传感器)的信息$x_i$。\n该信息通过*突触权重*$w_i$来加权,以确定输入的影响(即,通过$x_i w_i$相乘来激活或抑制)。\n来自多个源的加权输入以加权和$y = \\sum_i x_i w_i + b$的形式汇聚在细胞核中,\n然后将这些信息发送到轴突$y$中进一步处理,通常会通过$\\sigma(y)$进行一些非线性处理。\n之后,它要么到达目的地(例如肌肉),要么通过树突进入另一个神经元。\n\n当然,许多这样的单元可以通过正确连接和正确的学习算法拼凑在一起,\n从而产生的行为会比单独一个神经元所产生的行为更有趣、更复杂,\n这种想法归功于我们对真实生物神经系统的研究。\n\n当今大多数深度学习的研究几乎没有直接从神经科学中获得灵感。\n我们援引斯图尔特·罗素和彼得·诺维格谁,在他们的经典人工智能教科书\n*Artificial Intelligence:A Modern Approach* :cite:`Russell.Norvig.2016`\n中所说:虽然飞机可能受到鸟类的启发,但几个世纪以来,鸟类学并不是航空创新的主要驱动力。\n同样地,如今在深度学习中的灵感同样或更多地来自数学、统计学和计算机科学。\n\n## 小结\n\n* 机器学习模型中的关键要素是训练数据、损失函数、优化算法,还有模型本身。\n* 矢量化使数学表达上更简洁,同时运行的更快。\n* 最小化目标函数和执行极大似然估计等价。\n* 线性回归模型也是一个简单的神经网络。\n\n## 练习\n\n1. 假设我们有一些数据$x_1, \\ldots, x_n \\in \\mathbb{R}$。我们的目标是找到一个常数$b$,使得最小化$\\sum_i (x_i - b)^2$。\n 1. 找到最优值$b$的解析解。\n 1. 这个问题及其解与正态分布有什么关系?\n1. 推导出使用平方误差的线性回归优化问题的解析解。为了简化问题,可以忽略偏置$b$(我们可以通过向$\\mathbf X$添加所有值为1的一列来做到这一点)。\n 1. 用矩阵和向量表示法写出优化问题(将所有数据视为单个矩阵,将所有目标值视为单个向量)。\n 1. 计算损失对$w$的梯度。\n 1. 通过将梯度设为0、求解矩阵方程来找到解析解。\n 1. 什么时候可能比使用随机梯度下降更好?这种方法何时会失效?\n1. 假定控制附加噪声$\\epsilon$的噪声模型是指数分布。也就是说,$p(\\epsilon) = \\frac{1}{2} \\exp(-|\\epsilon|)$\n 1. 写出模型$-\\log P(\\mathbf y \\mid \\mathbf X)$下数据的负对数似然。\n 1. 你能写出解析解吗?\n 1. 提出一种随机梯度下降算法来解决这个问题。哪里可能出错?(提示:当我们不断更新参数时,在驻点附近会发生什么情况)你能解决这个问题吗?\n", "_____no_output_____" ], [ "[Discussions](https://discuss.d2l.ai/t/1776)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d0a821191be2ed7b7b4859ec880ed8b3a1b06eb2
42,593
ipynb
Jupyter Notebook
P2.ipynb
irvlust/DRLND-Continuous-Control
d48076f77a19c2194d680141bbdcc06d4e425814
[ "Apache-2.0" ]
null
null
null
P2.ipynb
irvlust/DRLND-Continuous-Control
d48076f77a19c2194d680141bbdcc06d4e425814
[ "Apache-2.0" ]
null
null
null
P2.ipynb
irvlust/DRLND-Continuous-Control
d48076f77a19c2194d680141bbdcc06d4e425814
[ "Apache-2.0" ]
null
null
null
94.232301
23,232
0.792055
[ [ [ "# Project Submission\n\nContinuous Control for the Udacity Ud893 Deep Reinforcement Learning Nanodegree (DRLND)\n\n## Imports and Dependencies", "_____no_output_____" ] ], [ [ "import sys\nsys.path.append(\"../python\")\nimport random\nimport numpy as np\nimport torch\nfrom collections import deque\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\nfrom unityagents import UnityEnvironment\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Unity Environment\n\nNote that if your operating system is Windows (64-bit), the Unity environment is included and you can run the below environment instantiation cell. \n\nHowever, if you're using a different operating system, download the file you require from one of the following links:\n\n- Linux: [click here](https://s3-us-west-1.amazonaws.com/udacity-drlnd/P2/Reacher/Reacher_Linux.zip)\n- Mac OSX: [click here](https://s3-us-west-1.amazonaws.com/udacity-drlnd/P2/Reacher/Reacher.app.zip)\n- Windows (32-bit): [click here](https://s3-us-west-1.amazonaws.com/udacity-drlnd/P2/Reacher/Reacher_Windows_x86.zip)\n\nThen, place the file in the main project directory folder and unzip (or decompress) the file. Modify the file_name in the below cell and then run the cell.", "_____no_output_____" ] ], [ [ "env = UnityEnvironment(file_name=\"Reacher_20_Windows_x86_64/Reacher.exe\")", "INFO:unityagents:\n'Academy' started successfully!\nUnity Academy name: Academy\n Number of Brains: 1\n Number of External Brains : 1\n Lesson number : 0\n Reset Parameters :\n\t\tgoal_size -> 5.0\n\t\tgoal_speed -> 1.0\nUnity brain name: ReacherBrain\n Number of Visual Observations (per agent): 0\n Vector Observation space type: continuous\n Vector Observation space size (per agent): 33\n Number of stacked Vector Observation: 1\n Vector Action space type: continuous\n Vector Action space size (per agent): 4\n Vector Action descriptions: , , , \n" ] ], [ [ "## Get Default Brain", "_____no_output_____" ] ], [ [ "# get the default brain\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]", "_____no_output_____" ] ], [ [ "## Main Training Loop Function", "_____no_output_____" ] ], [ [ "def training_loop_20(agent, actor_model_filename='ckpnt_actor_20.pth', critic_model_filename='ckpnt_critic_20.pth', n_episodes=1000, max_t=3000): \n \"\"\"DDPG Training Loop\n Params\n ======\n agent (function): agent function\n n_episodes (int): maximum number of training episodes\n max_t (int): maximum number of timesteps per episode\n \"\"\"\n start_time = datetime.now()\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations # get the current state \n agent.reset()\n \n score = np.zeros(len(env_info.agents))\n for t in range(max_t):\n action = agent.act(state)\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations # get the next state\n reward = env_info.rewards # get the reward\n done = env_info.local_done # see if episode has finished\n agent.step(state, action, reward, next_state, done, t)\n state = next_state\n score += reward\n if np.any(done):\n break \n scores_window.append(np.mean(score)) # save most recent score\n scores.append(np.mean(score))\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\tTime: {}'.format(i_episode, np.mean(scores_window), datetime.now()-start_time), end=\"\")\n if i_episode % 1 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\tTime: {}'.format(i_episode, np.mean(scores_window), datetime.now()-start_time))\n if np.mean(scores_window)>=30.0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n torch.save(agent.actor_local.state_dict(), actor_model_filename)\n torch.save(agent.critic_local.state_dict(), critic_model_filename)\n break\n return scores", "_____no_output_____" ] ], [ [ "## DDPG Agent", "_____no_output_____" ] ], [ [ "from ddpg_agent_20 import Agent\n\nagent = Agent(state_size=33, action_size=4, random_seed=15, lr_a=1e-4, lr_c=1e-3,weight_decay=0, fc1_units=400, fc2_units=300)\n\nstart = datetime.now()\nscores = training_loop_20(agent)\nend = datetime.now()\ntime_taken = end - start\nprint('Time: ',time_taken) \n\n# plot the scores\nplt.plot(np.arange(len(scores)), scores)\nplt.ylabel('Score')\nplt.xlabel('Episodes')\nplt.title('DDPG Agent')\nplt.show()\n\n# purposely left commented. Leave as is.\n# env.close()", "Agent Parameters:\nlr_a: 0.0001\nlr_c: 0.001\nweight decay: 0\nfc1_units: 400\nfc2_units: 300\nEpisode 1\tAverage Score: 0.23\tTime: 0:01:15.565206\nEpisode 2\tAverage Score: 0.24\tTime: 0:02:38.972181\nEpisode 3\tAverage Score: 0.22\tTime: 0:04:01.302709\nEpisode 4\tAverage Score: 0.29\tTime: 0:05:24.917069\nEpisode 5\tAverage Score: 0.28\tTime: 0:06:46.803423\nEpisode 6\tAverage Score: 0.31\tTime: 0:08:10.449961\nEpisode 7\tAverage Score: 0.38\tTime: 0:09:36.084267\nEpisode 8\tAverage Score: 0.40\tTime: 0:11:01.657450\nEpisode 9\tAverage Score: 0.45\tTime: 0:12:28.094374\nEpisode 10\tAverage Score: 0.53\tTime: 0:13:53.602072\nEpisode 11\tAverage Score: 0.58\tTime: 0:15:22.684499\nEpisode 12\tAverage Score: 0.69\tTime: 0:16:53.781581\nEpisode 13\tAverage Score: 0.77\tTime: 0:18:21.720730\nEpisode 14\tAverage Score: 0.88\tTime: 0:19:50.203396\nEpisode 15\tAverage Score: 1.03\tTime: 0:21:18.765362\nEpisode 16\tAverage Score: 1.16\tTime: 0:22:45.868243\nEpisode 17\tAverage Score: 1.28\tTime: 0:24:14.504044\nEpisode 18\tAverage Score: 1.43\tTime: 0:25:43.091189\nEpisode 19\tAverage Score: 1.50\tTime: 0:27:09.548678\nEpisode 20\tAverage Score: 1.60\tTime: 0:28:35.493439\nEpisode 21\tAverage Score: 1.70\tTime: 0:30:03.994528\nEpisode 22\tAverage Score: 1.78\tTime: 0:31:31.538629\nEpisode 23\tAverage Score: 1.81\tTime: 0:32:58.675394\nEpisode 24\tAverage Score: 1.89\tTime: 0:34:25.525635\nEpisode 25\tAverage Score: 1.98\tTime: 0:35:53.312498\nEpisode 26\tAverage Score: 2.07\tTime: 0:37:18.858428\nEpisode 27\tAverage Score: 2.21\tTime: 0:38:44.766600\nEpisode 28\tAverage Score: 2.37\tTime: 0:40:12.488594\nEpisode 29\tAverage Score: 2.55\tTime: 0:41:38.339782\nEpisode 30\tAverage Score: 2.74\tTime: 0:43:05.775048\nEpisode 31\tAverage Score: 2.94\tTime: 0:44:33.889499\nEpisode 32\tAverage Score: 3.16\tTime: 0:46:01.123764\nEpisode 33\tAverage Score: 3.38\tTime: 0:47:28.543023\nEpisode 34\tAverage Score: 3.58\tTime: 0:48:52.380621\nEpisode 35\tAverage Score: 3.73\tTime: 0:50:16.397268\nEpisode 36\tAverage Score: 3.92\tTime: 0:51:40.872196\nEpisode 37\tAverage Score: 4.14\tTime: 0:53:05.986724\nEpisode 38\tAverage Score: 4.31\tTime: 0:54:30.439772\nEpisode 39\tAverage Score: 4.55\tTime: 0:55:55.067657\nEpisode 40\tAverage Score: 4.79\tTime: 0:57:19.997841\nEpisode 41\tAverage Score: 5.00\tTime: 0:58:44.653262\nEpisode 42\tAverage Score: 5.20\tTime: 1:00:10.362346\nEpisode 43\tAverage Score: 5.45\tTime: 1:01:35.398485\nEpisode 44\tAverage Score: 5.69\tTime: 1:03:00.480537\nEpisode 45\tAverage Score: 5.97\tTime: 1:04:26.845692\nEpisode 46\tAverage Score: 6.33\tTime: 1:05:53.091537\nEpisode 47\tAverage Score: 6.68\tTime: 1:07:18.700349\nEpisode 48\tAverage Score: 7.02\tTime: 1:08:43.777529\nEpisode 49\tAverage Score: 7.37\tTime: 1:10:09.588189\nEpisode 50\tAverage Score: 7.75\tTime: 1:11:35.026054\nEpisode 51\tAverage Score: 8.21\tTime: 1:13:01.071106\nEpisode 52\tAverage Score: 8.63\tTime: 1:14:27.410951\nEpisode 53\tAverage Score: 9.04\tTime: 1:15:53.780146\nEpisode 54\tAverage Score: 9.39\tTime: 1:17:19.803704\nEpisode 55\tAverage Score: 9.66\tTime: 1:18:46.066992\nEpisode 56\tAverage Score: 9.89\tTime: 1:20:13.337830\nEpisode 57\tAverage Score: 10.23\tTime: 1:21:41.128120\nEpisode 58\tAverage Score: 10.56\tTime: 1:23:08.812193\nEpisode 59\tAverage Score: 10.94\tTime: 1:24:37.087325\nEpisode 60\tAverage Score: 11.33\tTime: 1:26:06.265174\nEpisode 61\tAverage Score: 11.72\tTime: 1:27:39.032758\nEpisode 62\tAverage Score: 12.12\tTime: 1:29:10.737600\nEpisode 63\tAverage Score: 12.46\tTime: 1:30:40.643137\nEpisode 64\tAverage Score: 12.72\tTime: 1:32:12.138587\nEpisode 65\tAverage Score: 12.98\tTime: 1:33:44.625845\nEpisode 66\tAverage Score: 13.24\tTime: 1:35:14.907329\nEpisode 67\tAverage Score: 13.42\tTime: 1:36:48.594867\nEpisode 68\tAverage Score: 13.61\tTime: 1:38:21.821406\nEpisode 69\tAverage Score: 13.84\tTime: 1:39:53.650426\nEpisode 70\tAverage Score: 14.00\tTime: 1:41:24.238425\nEpisode 71\tAverage Score: 14.23\tTime: 1:42:54.653498\nEpisode 72\tAverage Score: 14.46\tTime: 1:44:24.693474\nEpisode 73\tAverage Score: 14.73\tTime: 1:45:55.334417\nEpisode 74\tAverage Score: 14.87\tTime: 1:47:25.060531\nEpisode 75\tAverage Score: 14.98\tTime: 1:48:54.718562\nEpisode 76\tAverage Score: 15.10\tTime: 1:50:24.654183\nEpisode 77\tAverage Score: 15.28\tTime: 1:51:53.688360\nEpisode 78\tAverage Score: 15.42\tTime: 1:53:23.970448\nEpisode 79\tAverage Score: 15.58\tTime: 1:54:54.050412\nEpisode 80\tAverage Score: 15.75\tTime: 1:56:24.428103\nEpisode 81\tAverage Score: 15.88\tTime: 1:57:54.433786\nEpisode 82\tAverage Score: 16.00\tTime: 1:59:24.355532\nEpisode 83\tAverage Score: 16.17\tTime: 2:00:54.505229\nEpisode 84\tAverage Score: 16.33\tTime: 2:02:24.595491\nEpisode 85\tAverage Score: 16.43\tTime: 2:03:55.017668\nEpisode 86\tAverage Score: 16.58\tTime: 2:05:26.487894\nEpisode 87\tAverage Score: 16.75\tTime: 2:06:56.688723\nEpisode 88\tAverage Score: 16.94\tTime: 2:08:26.767861\nEpisode 89\tAverage Score: 17.14\tTime: 2:09:56.600450\nEpisode 90\tAverage Score: 17.36\tTime: 2:11:26.487600\nEpisode 91\tAverage Score: 17.50\tTime: 2:12:56.539335\nEpisode 92\tAverage Score: 17.67\tTime: 2:14:26.940779\nEpisode 93\tAverage Score: 17.86\tTime: 2:15:57.525259\nEpisode 94\tAverage Score: 18.05\tTime: 2:17:27.451086\nEpisode 95\tAverage Score: 18.22\tTime: 2:18:57.199927\nEpisode 96\tAverage Score: 18.39\tTime: 2:20:27.379902\nEpisode 97\tAverage Score: 18.56\tTime: 2:21:58.124709\nEpisode 98\tAverage Score: 18.71\tTime: 2:23:27.888001\nEpisode 99\tAverage Score: 18.88\tTime: 2:24:58.113891\nEpisode 100\tAverage Score: 19.05\tTime: 2:26:27.931001\nEpisode 101\tAverage Score: 19.39\tTime: 2:27:57.940470\nEpisode 102\tAverage Score: 19.73\tTime: 2:29:28.257970\nEpisode 103\tAverage Score: 20.08\tTime: 2:30:58.632967\nEpisode 104\tAverage Score: 20.40\tTime: 2:32:28.769979\nEpisode 105\tAverage Score: 20.74\tTime: 2:33:59.034065\nEpisode 106\tAverage Score: 21.04\tTime: 2:35:29.294097\nEpisode 107\tAverage Score: 21.35\tTime: 2:36:58.965859\nEpisode 108\tAverage Score: 21.63\tTime: 2:38:29.220603\nEpisode 109\tAverage Score: 21.93\tTime: 2:39:59.076692\nEpisode 110\tAverage Score: 22.23\tTime: 2:41:28.950041\nEpisode 111\tAverage Score: 22.53\tTime: 2:42:59.544674\nEpisode 112\tAverage Score: 22.84\tTime: 2:44:29.681267\nEpisode 113\tAverage Score: 23.15\tTime: 2:45:59.927287\nEpisode 114\tAverage Score: 23.43\tTime: 2:47:30.108230\nEpisode 115\tAverage Score: 23.75\tTime: 2:48:59.572266\nEpisode 116\tAverage Score: 24.04\tTime: 2:50:30.356290\nEpisode 117\tAverage Score: 24.35\tTime: 2:52:00.311207\nEpisode 118\tAverage Score: 24.65\tTime: 2:53:30.932177\nEpisode 119\tAverage Score: 24.96\tTime: 2:55:02.176362\nEpisode 120\tAverage Score: 25.24\tTime: 2:56:31.869623\nEpisode 121\tAverage Score: 25.51\tTime: 2:58:02.026856\nEpisode 122\tAverage Score: 25.82\tTime: 2:59:31.905688\nEpisode 123\tAverage Score: 26.12\tTime: 3:01:02.420489\nEpisode 124\tAverage Score: 26.44\tTime: 3:02:32.506947\nEpisode 125\tAverage Score: 26.74\tTime: 3:04:03.340112\nEpisode 126\tAverage Score: 27.04\tTime: 3:05:34.823881\nEpisode 127\tAverage Score: 27.33\tTime: 3:07:05.181501\nEpisode 128\tAverage Score: 27.57\tTime: 3:08:35.287872\nEpisode 129\tAverage Score: 27.83\tTime: 3:10:06.333318\nEpisode 130\tAverage Score: 28.10\tTime: 3:11:36.838549\nEpisode 131\tAverage Score: 28.36\tTime: 3:13:07.223939\nEpisode 132\tAverage Score: 28.61\tTime: 3:14:37.421283\nEpisode 133\tAverage Score: 28.83\tTime: 3:16:07.172654\nEpisode 134\tAverage Score: 29.05\tTime: 3:17:37.105627\nEpisode 135\tAverage Score: 29.27\tTime: 3:19:07.105587\nEpisode 136\tAverage Score: 29.49\tTime: 3:20:37.121156\nEpisode 137\tAverage Score: 29.67\tTime: 3:22:07.100616\nEpisode 138\tAverage Score: 29.89\tTime: 3:23:37.322657\nEpisode 139\tAverage Score: 30.07\tTime: 3:25:07.409502\n\nEnvironment solved in 139 episodes!\tAverage Score: 30.07\nTime: 3:25:07.446499\n" ] ], [ [ "## Run Smart Agent", "_____no_output_____" ] ], [ [ "from ddpg_agent_20 import Agent\n\nenv_info = env.reset(train_mode=True)[brain_name]\nagent = Agent(state_size=33, action_size=4, random_seed=15, lr_a=1e-4, lr_c=1e-3,weight_decay=0, fc1_units=400, fc2_units=300)\n\nagent.actor_local.load_state_dict(torch.load('ckpnt_actor_20.pth')) # load weights from file\nagent.critic_local.load_state_dict(torch.load('ckpnt_critic_20.pth')) # load weights from file\n\nnum_agents = len(env_info.agents)\n\nepisodes = 1\n\nfor i in range(episodes):\n env_info = env.reset(train_mode=False)[brain_name] # reset the environment\n state = env_info.vector_observations # get the current state\n scores = np.zeros(num_agents) # initialize the score (for each agent)\n agent.reset()\n for j in range(1000):\n action = agent.act(state, add_noise=False)\n env_info = env.step(action)[brain_name] # send the action to the environment\n state = env_info.vector_observations # get the next state\n reward = env_info.rewards # get the reward\n done = env_info.local_done # see if episode has finished \n \n if np.any(done):\n break\n \n scores += reward\n score = np.mean(scores) \n \n if score > 30:\n break\n print('\\rEpisode: {}\\tStep: {}\\tScore: {}'.format(i+1, j+1, score), end=\"\")\n print('\\rEpisode: {}\\tStep: {}\\tScore: {}'.format(i+1, j+1, score))\n \nenv.close()", "Agent Parameters:\nlr_a: 0.0001\nlr_c: 0.001\nweight decay: 0\nfc1_units: 400\nfc2_units: 300\nEpisode: 1\tStep: 816\tScore: 30.03999932855367745\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0a83085e1097d5908b684410e695e5642e03ee1
422,379
ipynb
Jupyter Notebook
src/analysis/output_activation.ipynb
mbecker12/surface-rl-decoder
5399c4caabda8154feaa6027e14057cef82843b3
[ "MIT" ]
2
2021-07-15T16:32:42.000Z
2021-11-07T18:08:00.000Z
src/analysis/output_activation.ipynb
mbecker12/surface-rl-decoder
5399c4caabda8154feaa6027e14057cef82843b3
[ "MIT" ]
96
2021-02-22T15:08:29.000Z
2021-07-23T07:58:25.000Z
src/analysis/output_activation.ipynb
mbecker12/surface-rl-decoder
5399c4caabda8154feaa6027e14057cef82843b3
[ "MIT" ]
null
null
null
453.196352
197,136
0.934265
[ [ [ "## Load Model, plain 2D Conv", "_____no_output_____" ] ], [ [ "import os\nos.chdir(\"../..\")\nos.getcwd()", "_____no_output_____" ], [ "import numpy as np\nimport torch\nimport json\nfrom distributed.model_util import choose_model, choose_old_model, load_model, extend_model_config\nfrom distributed.util import q_value_index_to_action\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "model_name = \"conv2d\"\nmodel_config_path = \"src/config/model_spec/conv_agents_slim.json\"\ntrained_model_path = \"threshold_networks/5/72409/conv2d_5_72409.pt\"", "_____no_output_____" ], [ "with open(model_config_path, \"r\") as jsonfile:\n model_config = json.load(jsonfile)[model_name]\n\ncode_size, stack_depth = 5, 5\nsyndrome_size = code_size + 1\nmodel_config = extend_model_config(model_config, syndrome_size, stack_depth)\nmodel_config[\"network_size\"] = \"slim\"\nmodel_config[\"rl_type\"] = \"q\"", "_____no_output_____" ], [ "model = choose_model(model_name, model_config, transfer_learning=0)\nmodel, *_ = load_model(model, trained_model_path, model_device=\"cpu\")", "Not using any recurrent module\nPrepare Q Learning Conv2dAgent w/o transfer learning\n" ], [ "from evaluation.final_evaluation import main_evaluation\n\nall_ground_states = 0\nfor i in range(10):\n is_ground_state, n_syndromes, n_loops = main_evaluation(\n model,\n model.device,\n epsilon=0.0,\n code_size=code_size,\n stack_depth=stack_depth,\n block=False,\n verbosity=0,\n rl_type=model_config[\"rl_type\"]\n )\n \n all_ground_states += is_ground_state\n\nprint(all_ground_states)", "_____no_output_____" ], [ "print(all_ground_states)", "10\n" ] ], [ [ "## Prepare States", "_____no_output_____" ] ], [ [ "all_states = []\n\nstate = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32)\nstate[-2:, 1, 2] = 1\nall_states.append(state)\n\n\nstate = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32)\nstate[-1, 1, 2] = 1\nstate[-1, 2, 3] = 1\nall_states.append(state)\n\nstate = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32)\nstate[-1, 2, 3] = 1\nall_states.append(state)\n\n\nstate = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32)\nstate[-2:, 1, 2] = 1\nstate[-2:, 2, 3] = 1\nstate[-1:, 2, 3] = 0\nall_states.append(state)\n\nstate = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32)\nstate[:, 1, 2] = 1\nstate[:, 2, 3] = 1\nstate[-1, 2, 3] = 0\nall_states.append(state)\n\nfull_error_state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32)\nfull_error_state[:, 1, 2] = 1\nfull_error_state[:, 2, 3] = 1\nall_states.append(full_error_state)\n\ntorch_all_states = torch.stack(all_states)\n\n \n# for i in range(0, stack_depth, 2):\n# state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32)\n# state[:, 1, 2] = 1\n# state[:, 2, 3] = 1\n# state[i, 2, 3] = 0\n \n# all_states.append(state)\n\n# for i in range(0, stack_depth, 2):\n# state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32)\n# state[i, 2, 3] = 1\n \n# all_states.append(state)\n\n# state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32)\n# state[-1, 1, 2] = 1\n# state[-1, 2, 3] = 1\n# all_states.append(state)\n\n# state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32)\n# state[-2:, 1, 2] = 1\n# state[-2:, 2, 3] = 1\n# state[-1:, 2, 3] = 0\n# all_states.append(state)\n\n# state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32)\n# state[-2:, 1, 2] = 1\n# all_states.append(state)\n\n# torch_all_states = torch.stack(all_states)", "_____no_output_____" ], [ "def calculate_state_image(state, stack_depth, syndrome_size):\n layer_discount_factor = 0.3\n layer_exponents = np.arange(stack_depth - 1, -1, -1)\n layer_rewards = np.power(layer_discount_factor, layer_exponents)\n layer_rewards = torch.tensor(layer_rewards, dtype=torch.float32)\n\n state_image = torch.zeros((syndrome_size, syndrome_size), dtype=torch.float32)\n for j, layer in enumerate(state):\n tmp_layer = layer * layer_rewards[j]\n state_image += tmp_layer\n\n return state_image", "_____no_output_____" ] ], [ [ "## Do the plotting", "_____no_output_____" ] ], [ [ "k = 1\n# stack_depth = 5\n# syndrome_size = 5\nfrom matplotlib import colors\nplt.rcParams.update({\"font.size\": 15})\n\n\nfig, ax = plt.subplots(1, 3, figsize=(18, 8), gridspec_kw={\"width_ratios\": [4, 4, 8], \"wspace\": 0.02, \"hspace\": 0.0},)\nplot_colors = [\"#ffffff\", \"#404E5C\", \"#F76C5E\", \"#E9B44C\", \"#7F95D1\", \"#CF1259\", \"#669900\"]\nmarkers = [\"o\", \"v\", \"^\", \"X\", \"d\", \"P\"]\ncmap = colors.ListedColormap(plot_colors)\nboundaries = range(len(torch_all_states))\nnorm = colors.BoundaryNorm(boundaries, cmap.N, clip=True)\n\nmarkersize = 70\nimg_separation = 1\ncolumn_width = 8\nsyndrome_locations = np.array([[1, 2],[2, 3]])\ncolumn_filler = np.zeros((stack_depth, 1))\nimage_filler = np.zeros((stack_depth, img_separation))\nimg_width = 2 * column_width + img_separation\n\nvline_locations = np.array([\n column_width + i * img_width for i in range(len(torch_all_states))\n])\nimage_separators_left = np.array([\n (i+1) * 2 * column_width + i * img_separation for i in range(len(torch_all_states))\n])\n# image_separators_left[1:] += img_separation\n\nimage_separators_right = [\n i * img_width for i in range(len(torch_all_states))\n]\nhline_locations = range(0, stack_depth + 1)\n\ncomplete_image_list = []\n\nfor i, state in enumerate(torch_all_states):\n ii = i + 1\n # TODO: concat the columns multi-pixel wide with an empty space in between\n # and empty spaces between each state's column\n column1 = np.vstack(state[:, syndrome_locations[0, 0], syndrome_locations[0, 1]])\n repeated_column1 = np.repeat(column1, column_width, axis=1)\n \n column2 = np.vstack(state[:, syndrome_locations[1, 0], syndrome_locations[1, 1]])\n repeated_column2 = np.repeat(column2, column_width, axis=1)\n \n state_img = np.concatenate((repeated_column1, repeated_column2), axis=1) * ii\n\n complete_image_list.append(state_img)\n if i < len(torch_all_states) - 1:\n complete_image_list.append(image_filler)\n \ncomplete_image_array = np.concatenate(complete_image_list, axis=1)\n\nax2 = ax[2].twinx()\nfor i, state in enumerate(torch_all_states):\n ii = i + 1\n q_values = model(state.unsqueeze(0))\n q_values = q_values.detach().squeeze().clone().numpy()\n \n ind = np.argpartition(q_values, -k)[-k:]\n max_ind = ind\n action = q_value_index_to_action(ind[0], code_size)\n ind = np.append(ind, [max(ind[0]-1, 0), min(ind[0]+1, len(q_values)-1)])\n ind = np.sort(ind)\n print(f\"{ind=}\")\n \n q_hist = np.histogram(q_values)\n \n if i < 3:\n ax[0].plot(range(len(q_values)), q_values, label=str(ii), color=plot_colors[ii])\n ax[0].scatter(\n max_ind, q_values[max_ind], marker=markers[i], c=plot_colors[ii], s=markersize\n )\n# marker=markers[i], c=plot_colors[ii]\n else:\n ax[1].plot(range(len(q_values)), q_values, label=str(ii), color=plot_colors[ii]),\n ax[1].scatter(\n max_ind, q_values[max_ind], marker=markers[i], c=plot_colors[ii], s=markersize\n )\n ax2.imshow(\n complete_image_array, vmin=0, vmax=6, cmap=cmap, aspect='auto', origin='lower'\n )\n ax2.axvline(x=vline_locations[i] - 0.5, linestyle=':', color='black')\n ax2.axhline(y=hline_locations[i] - 0.5, linestyle=':', color='black')\n \n ax2.axvline(x=image_separators_left[i] - 0.5, color='black')\n ax2.axvline(x=image_separators_right[i] - 0.5, color='black')\n \n ax2.text(x=image_separators_left[i] - 1.8 * column_width, y=1, s=f\"{action}\")\n \n\n\nax[0].set(\n ylim=(40, 120), \n xlabel=\"Q Value Index\",\n ylabel=\"Q Value\", \n title=f\"Q Activation\", \n)\nax[1].set(\n ylim=(40, 120), \n xlabel=\"Q Value Index\",\n title=f\"Q Activation\", \n)\n\nall_vline_locations = np.concatenate(\n [vline_locations - 0.5 * column_width, vline_locations + 0.5 * column_width]\n)\n\nx_tick_labels = [f\"{tuple(syndrome_locations[0])}\"] * len(torch_all_states)\nx_tick_labels2 = [f\"{tuple(syndrome_locations[1])}\"] * len(torch_all_states)\nx_tick_labels.extend(x_tick_labels2)\n# , f\"{tuple(syndrome_locations[1])}\"] * len(torch_all_states)\nprint(f\"{x_tick_labels}\")\n\nax2.set(xlabel=\"\", ylabel=\"h\", title=\"Isolated Syndrome States\")\nax[2].set_yticks(all_vline_locations)\nax[2].set_yticks([])\nax[2].set_yticklabels([])\nax2.set_xticklabels(x_tick_labels)\nax2.set_xticks(all_vline_locations)\n\nax[1].set_yticklabels([])\n \nax[0].legend()\nax[1].legend()\n\nplt.savefig(\"plots/q_value_activation.pdf\", bbox_inches=\"tight\")", "ind=array([17, 18, 19])\nind=array([32, 33, 34])\nind=array([74, 75, 75])\nind=array([32, 33, 34])\nind=array([32, 33, 34])\nind=array([32, 33, 34])\n['(1, 2)', '(1, 2)', '(1, 2)', '(1, 2)', '(1, 2)', '(1, 2)', '(2, 3)', '(2, 3)', '(2, 3)', '(2, 3)', '(2, 3)', '(2, 3)']\n" ] ], [ [ "## 3D Conv", "_____no_output_____" ] ], [ [ "model_name = \"conv3d\"\nmodel_config_path_3d = \"src/config/model_spec/conv_agents_slim.json\"\ntrained_model_path_3d = \"threshold_networks/5/69312/conv3d_5_69312.pt\"", "_____no_output_____" ], [ "with open(model_config_path_3d, \"r\") as jsonfile:\n model_config_3d = json.load(jsonfile)[model_name]\n\ncode_size, stack_depth = 5, 5\nsyndrome_size = code_size + 1\nmodel_config_3d = extend_model_config(model_config_3d, syndrome_size, stack_depth)\nmodel_config_3d[\"network_size\"] = \"slim\"\nmodel_config_3d[\"rl_type\"] = \"q\"", "_____no_output_____" ], [ "model3d = choose_old_model(model_name, model_config_3d)\nmodel3d, *_ = load_model(model3d, trained_model_path_3d, model_device=\"cpu\")", "_____no_output_____" ], [ "from evaluation.final_evaluation import main_evaluation\n\nall_ground_states = 0\nfor i in range(10):\n is_ground_state, n_syndromes, n_loops = main_evaluation(\n model3d,\n model3d.device,\n epsilon=0.0,\n code_size=code_size,\n stack_depth=stack_depth,\n block=False,\n verbosity=0,\n rl_type=model_config_3d[\"rl_type\"]\n )\n \n all_ground_states += is_ground_state\n\nprint(all_ground_states)", "_____no_output_____" ], [ "print(all_ground_states)", "9\n" ], [ "k = 1\n# stack_depth = 5\n# syndrome_size = 5\nfrom matplotlib import colors\nplt.rcParams.update({\"font.size\": 15})\n\n\nfig, ax = plt.subplots(1, 3, figsize=(18, 8), gridspec_kw={\"width_ratios\": [4, 4, 8], \"wspace\": 0.02, \"hspace\": 0.0},)\nplot_colors = [\"#ffffff\", \"#404E5C\", \"#F76C5E\", \"#E9B44C\", \"#7F95D1\", \"#CF1259\", \"#669900\"]\nmarkers = [\"o\", \"v\", \"^\", \"X\", \"d\", \"P\"]\ncmap = colors.ListedColormap(plot_colors)\nboundaries = range(len(torch_all_states))\nnorm = colors.BoundaryNorm(boundaries, cmap.N, clip=True)\n\nmarkersize = 70\nimg_separation = 1\ncolumn_width = 8\nsyndrome_locations = np.array([[1, 2],[2, 3]])\ncolumn_filler = np.zeros((stack_depth, 1))\nimage_filler = np.zeros((stack_depth, img_separation))\nimg_width = 2 * column_width + img_separation\n\nvline_locations = np.array([\n column_width + i * img_width for i in range(len(torch_all_states))\n])\nimage_separators_left = np.array([\n (i+1) * 2 * column_width + i * img_separation for i in range(len(torch_all_states))\n])\n# image_separators_left[1:] += img_separation\n\nimage_separators_right = [\n i * img_width for i in range(len(torch_all_states))\n]\nhline_locations = range(0, stack_depth + 1)\n\ncomplete_image_list = []\n\nfor i, state in enumerate(torch_all_states):\n ii = i + 1\n # TODO: concat the columns multi-pixel wide with an empty space in between\n # and empty spaces between each state's column\n column1 = np.vstack(state[:, syndrome_locations[0, 0], syndrome_locations[0, 1]])\n repeated_column1 = np.repeat(column1, column_width, axis=1)\n \n column2 = np.vstack(state[:, syndrome_locations[1, 0], syndrome_locations[1, 1]])\n repeated_column2 = np.repeat(column2, column_width, axis=1)\n \n state_img = np.concatenate((repeated_column1, repeated_column2), axis=1) * ii\n\n complete_image_list.append(state_img)\n if i < len(torch_all_states) - 1:\n complete_image_list.append(image_filler)\n \ncomplete_image_array = np.concatenate(complete_image_list, axis=1)\n\nax2 = ax[2].twinx()\nfor i, state in enumerate(torch_all_states):\n ii = i + 1\n q_values = model3d(state.unsqueeze(0))\n q_values = q_values.detach().squeeze().clone().numpy()\n \n ind = np.argpartition(q_values, -k)[-k:]\n max_ind = ind\n action = q_value_index_to_action(ind[0], code_size)\n ind = np.append(ind, [max(ind[0]-1, 0), min(ind[0]+1, len(q_values)-1)])\n ind = np.sort(ind)\n print(f\"{ind=}\")\n \n q_hist = np.histogram(q_values)\n \n if i < 3:\n ax[0].plot(range(len(q_values)), q_values, label=str(ii), color=plot_colors[ii])\n ax[0].scatter(\n max_ind, q_values[max_ind], marker=markers[i], c=plot_colors[ii], s=markersize\n )\n# marker=markers[i], c=plot_colors[ii]\n else:\n ax[1].plot(range(len(q_values)), q_values, label=str(ii), color=plot_colors[ii]),\n ax[1].scatter(\n max_ind, q_values[max_ind], marker=markers[i], c=plot_colors[ii], s=markersize\n )\n ax2.imshow(\n complete_image_array, vmin=0, vmax=6, cmap=cmap, aspect='auto', origin='lower'\n )\n ax2.axvline(x=vline_locations[i] - 0.5, linestyle=':', color='black')\n ax2.axhline(y=hline_locations[i] - 0.5, linestyle=':', color='black')\n \n ax2.axvline(x=image_separators_left[i] - 0.5, color='black')\n ax2.axvline(x=image_separators_right[i] - 0.5, color='black')\n \n ax2.text(x=image_separators_left[i] - 1.8 * column_width, y=1, s=f\"{action}\")\n \n\n\nax[0].set(\n ylim=(40, 100), \n xlabel=\"Q Value Index\",\n ylabel=\"Q Value\", \n title=f\"Q Activation\", \n)\nax[1].set(\n ylim=(40, 100), \n xlabel=\"Q Value Index\",\n title=f\"Q Activation\", \n)\n\nall_vline_locations = np.concatenate(\n [vline_locations - 0.5 * column_width, vline_locations + 0.5 * column_width]\n)\n\nx_tick_labels = [f\"{tuple(syndrome_locations[0])}\"] * len(torch_all_states)\nx_tick_labels2 = [f\"{tuple(syndrome_locations[1])}\"] * len(torch_all_states)\nx_tick_labels.extend(x_tick_labels2)\n# , f\"{tuple(syndrome_locations[1])}\"] * len(torch_all_states)\nprint(f\"{x_tick_labels}\")\n\nax2.set(xlabel=\"\", ylabel=\"h\", title=\"Isolated Syndrome States\")\nax[2].set_yticks(all_vline_locations)\nax[2].set_yticks([])\nax[2].set_yticklabels([])\nax2.set_xticklabels(x_tick_labels)\nax2.set_xticks(all_vline_locations)\n\nax[1].set_yticklabels([])\n \nax[0].legend()\nax[1].legend()\n\nplt.savefig(\"plots/q_value_activation_3d.pdf\", bbox_inches=\"tight\")", "ind=array([14, 15, 16])\nind=array([33, 34, 35])\nind=array([74, 75, 75])\nind=array([33, 34, 35])\nind=array([33, 34, 35])\nind=array([33, 34, 35])\n['(1, 2)', '(1, 2)', '(1, 2)', '(1, 2)', '(1, 2)', '(1, 2)', '(2, 3)', '(2, 3)', '(2, 3)', '(2, 3)', '(2, 3)', '(2, 3)']\n" ], [ "from distributed.util import select_actions", "_____no_output_____" ], [ "from surface_rl_decoder.surface_code import SurfaceCode\nfrom surface_rl_decoder.surface_code_util import create_syndrome_output_stack", "_____no_output_____" ], [ "# q_values = model3d(full_error_state)\naction, _ = select_actions(full_error_state.unsqueeze(0), model3d, code_size)", "_____no_output_____" ], [ "sc = SurfaceCode(code_size=code_size, stack_depth=stack_depth)\nsc.qubits[:, 1, 2] = 1\nsc.state = create_syndrome_output_stack(\n sc.qubits, sc.vertex_mask, sc.plaquette_mask\n)\nnp.argwhere(sc.state)", "_____no_output_____" ], [ "from copy import deepcopy\ntorch_state = torch.tensor(deepcopy(sc.state), dtype=torch.float32)\naction, _ = select_actions(torch_state.unsqueeze(0), model3d, code_size)\naction", "_____no_output_____" ], [ "new_state, *_ = sc.step(action[0])", "_____no_output_____" ], [ "torch_state = torch.tensor(deepcopy(sc.state), dtype=torch.float32)\naction, _ = select_actions(torch_state.unsqueeze(0), model3d, code_size)\naction", "_____no_output_____" ], [ "new_state, *_ = sc.step(action[0])", "_____no_output_____" ], [ "new_state", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0a846e9b9d6ed0f9616ab5407a52ba291a7fd84
103,936
ipynb
Jupyter Notebook
tests/Basic HDF5 Operations.ipynb
dlab-projects/marketflow
b5b40af2dfa62f8a6113d11a5eeb69ebc0806fb6
[ "BSD-2-Clause" ]
6
2018-03-06T00:05:13.000Z
2022-01-29T08:50:40.000Z
tests/Basic HDF5 Operations.ipynb
glass-bead-labs/marketflow
b5b40af2dfa62f8a6113d11a5eeb69ebc0806fb6
[ "BSD-2-Clause" ]
17
2015-12-01T20:18:34.000Z
2016-03-19T04:17:50.000Z
tests/Basic HDF5 Operations.ipynb
dlab-berkeley/python-taq
b5b40af2dfa62f8a6113d11a5eeb69ebc0806fb6
[ "BSD-2-Clause" ]
7
2016-04-19T17:04:29.000Z
2021-02-10T16:25:34.000Z
40.71132
5,856
0.562471
[ [ [ "ls ../test-data/", "2014-head.txt \u001b[0m\u001b[01;36mEQY_US_ALL_BBO_20150731.zip\u001b[0m@\r\n2015-head.txt small_test_data_public.h5\r\n\u001b[01;36mEQY_US_ALL_BBO_201111\u001b[0m@ \u001b[01;31msmall_test_data_public.zip\u001b[0m\r\n\u001b[01;36mEQY_US_ALL_BBO_201402\u001b[0m@\r\n" ], [ "%matplotlib inline\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nimport tables as tb", "_____no_output_____" ], [ "import h5py\nimport dask.dataframe as dd\nimport dask.bag as db", "_____no_output_____" ], [ "import blaze", "_____no_output_____" ], [ "fname = '../test-data/EQY_US_ALL_BBO_201402/EQY_US_ALL_BBO_20140206.h5'\nmax_sym = '/SPY/no_suffix'", "_____no_output_____" ], [ "fname = '../test-data/small_test_data_public.h5'\nmax_sym = '/IXQAJE/no_suffix'", "_____no_output_____" ], [ "# by default, this will be read-only\ntaq_tb = tb.open_file(fname)", "_____no_output_____" ], [ "%%time\nrec_counts = {curr._v_pathname: len(curr) \n for curr in taq_tb.walk_nodes('/', 'Table')}", "CPU times: user 749 ms, sys: 13 ms, total: 762 ms\nWall time: 763 ms\n" ], [ "# What's our biggest table? (in bytes)\nmax(rec_counts.values()) * 91 / 2 ** 20 # I think it's 91 bytes...", "_____no_output_____" ] ], [ [ "Anyway, under a gigabyte. So, nothing to worry about even if we have 24 cores.", "_____no_output_____" ] ], [ [ "# But what symbol is that?\nmax_sym = None\nmax_rows = 0\nfor sym, rows in rec_counts.items():\n if rows > max_rows:\n max_rows = rows\n max_sym = sym", "_____no_output_____" ], [ "max_sym, max_rows", "_____no_output_____" ] ], [ [ "Interesting... the S&P 500 ETF", "_____no_output_____" ] ], [ [ "# Most symbols also have way less rows - note this is log xvals\nplt.hist(list(rec_counts.values()), bins=50, log=True)\nplt.show()", "_____no_output_____" ] ], [ [ "## Doing some compute\n\nWe'll use a \"big\" table to get some sense of timings", "_____no_output_____" ] ], [ [ "spy = taq_tb.get_node(max_sym)", "_____no_output_____" ], [ "# PyTables is record oriented...\n%timeit np.mean(list(x['Bid_Price'] for x in spy.iterrows()))", "1 loop, best of 3: 7.62 s per loop\n" ], [ "# But this is faster...\n%timeit np.mean(spy[:]['Bid_Price'])", "The slowest run took 383.78 times longer than the fastest. This could mean that an intermediate result is being cached.\n10000 loops, best of 3: 47.6 µs per loop\n" ], [ "np.mean(spy[:]['Bid_Price'])", "_____no_output_____" ] ], [ [ "# Using numexpr?\n\nnumexpr is currently not set up to do reductions via HDF5. I've opened an issue here:\nhttps://github.com/PyTables/PyTables/issues/548", "_____no_output_____" ] ], [ [ "spy_bp = spy.cols.Bid_Price", "_____no_output_____" ], [ "# this works...\nnp.mean(spy_bp)", "_____no_output_____" ], [ "# But it can't use numexpr\nexpr = tb.Expr('sum(spy_bp)')", "_____no_output_____" ], [ "# You can use numexpr to get the values of the column... but that's silly\n# (sum doesn't work right, and the axis argument is non-functional)\n%timeit result = expr.eval().mean()", "1 loop, best of 3: 3.62 s per loop\n" ], [ "tb.Expr('spy_bp').eval().mean()", "_____no_output_____" ] ], [ [ "# h5py", "_____no_output_____" ] ], [ [ "taq_tb.close()", "_____no_output_____" ], [ "%%time\nspy_h5py = h5py.File(fname)[max_sym]", "CPU times: user 1.64 ms, sys: 1.92 ms, total: 3.55 ms\nWall time: 3.36 ms\n" ], [ "np.mean(spy_h5py['Bid_Price'])", "_____no_output_____" ] ], [ [ "h5py may be a *touch* faster than pytables for this kind of usage. But why does pandas use pytables?", "_____no_output_____" ] ], [ [ "%%timeit\nnp.mean(spy_h5py['Bid_Price'])", "The slowest run took 5.02 times longer than the fastest. This could mean that an intermediate result is being cached.\n1000 loops, best of 3: 253 µs per loop\n" ] ], [ [ "# Dask \n\nIt seems that there should be no need to, e.g., use h5py - but dask's read_hdf doens't seem to be working nicely...", "_____no_output_____" ] ], [ [ "taq_tb.close()", "_____no_output_____" ] ], [ [ "spy_h5py = h5py.File(fname)[max_sym]", "_____no_output_____" ] ], [ [ "store = pd.HDFStore(fname)", "_____no_output_____" ], [ "store = pd.HDFStore('../test-data/')", "_____no_output_____" ], [ "# this is a fine way to iterate over our datasets (in addition to what's available in PyTables and h5py)\nit = store.items()", "_____no_output_____" ], [ "key, tab = next(it)", "_____no_output_____" ], [ "tab", "_____no_output_____" ], [ "# The columns argument doesn't seem to work...\nstore.select(max_sym, columns=['Bid_Price']).head()", "_____no_output_____" ], [ "# columns also doesn't work here...\npd.read_hdf(fname, max_sym, columns=['Bid_Price']).head()", "_____no_output_____" ], [ "# So we use h5py (actually, pytables appears faster...)\nspy_dask = dd.from_array(spy_h5py)", "_____no_output_____" ], [ "mean_job = spy_dask['Bid_Price'].mean()", "_____no_output_____" ], [ "mean_job.compute()", "_____no_output_____" ], [ "# This is appreciably slower than directly computing the mean w/ numpy\n%timeit mean_job.compute()", "1 loop, best of 3: 9.55 s per loop\n" ] ], [ [ "## Dask for an actual distributed task (but only on one file for now)", "_____no_output_____" ] ], [ [ "class DDFs:\n # A (key, table) list\n datasets = []\n dbag = None\n\n def __init__(self, h5fname):\n h5in = h5py.File(h5fname)\n h5in.visititems(self.collect_dataset)\n \n def collect_dataset(self, key, table):\n if isinstance(table, h5py.Dataset):\n self.datasets.append(dd.from_array(table)['Bid_Price'].mean())\n \n def compute_mean(self):\n # This is still very slow!\n self.results = {key: result for key, result in dd.compute(*self.datasets)}", "_____no_output_____" ], [ "%%time\nddfs = DDFs(fname)", "CPU times: user 273 ms, sys: 22.8 ms, total: 296 ms\nWall time: 293 ms\n" ], [ "ddfs.datasets[:5]", "_____no_output_____" ], [ "len(ddfs.datasets)", "_____no_output_____" ], [ "dd.compute?", "_____no_output_____" ], [ "%%time\nresults = dd.compute(*ddfs.datasets[:20])", "CPU times: user 3.57 s, sys: 1.16 s, total: 4.74 s\nWall time: 4.86 s\n" ], [ "import dask.multiprocessing", "_____no_output_____" ], [ "%%time\n# This crashes out throwing lots of KeyErrors\nresults = dd.compute(*ddfs.datasets[:20], get=dask.multiprocessing.get)", "Exception ignored in: 'h5py._objects.ObjectID.__dealloc__'\nTraceback (most recent call last):\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nTraceback (most recent call last):\nTraceback (most recent call last):\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nTraceback (most recent call last):\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nTraceback (most recent call last):\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nTraceback (most recent call last):\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nTraceback (most recent call last):\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nTraceback (most recent call last):\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nKeyError: 140545251730248\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nKeyError: 140545251730248\nKeyError: 140545251730248\nKeyError: 140545251731592\nKeyError: 140545251730248\nKeyError: 140545251730248\nTraceback (most recent call last):\nKeyError: 140545251730248\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nKeyError: 140545251730248\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nKeyError: 140545251731592\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nTraceback (most recent call last):\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nTraceback (most recent call last):\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nTraceback (most recent call last):\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nTraceback (most recent call last):\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nTraceback (most recent call last):\nKeyError: 140545251731592\nKeyError: 140545251730248\nTraceback (most recent call last):\nTraceback (most recent call last):\nKeyError: 140545251730248\nTraceback (most recent call last):\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nKeyError: 140545251731592\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nTraceback (most recent call last):\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nTraceback (most recent call last):\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nKeyError: 140545251731592\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nKeyError: 140545251730248\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nKeyError: 140545251731592\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nKeyError: 140545251730248\nTraceback (most recent call last):\nTraceback (most recent call last):\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\nTraceback (most recent call last):\nTraceback (most recent call last):\nKeyError: 140545251731592\nTraceback (most recent call last):\nTraceback (most recent call last):\nException ignored in: 'h5py._objects.ObjectID.__dealloc__'\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nTraceback (most recent call last):\nTraceback (most recent call last):\nTraceback (most recent call last):\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nTraceback (most recent call last):\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nKeyError: 140545251731592\nTraceback (most recent call last):\nKeyError: 140545211536520\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nKeyError: 140545251730248\nKeyError: 140545251730248\nTraceback (most recent call last):\nKeyError: 140545251730248\nKeyError: 140545251731592\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nKeyError: 140545251730248\nTraceback (most recent call last):\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nKeyError: 140545251731592\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nKeyError: 140545251731592\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\n File \"h5py/_objects.pyx\", line 197, in h5py._objects.ObjectID.__dealloc__ (-------src-dir-------/h5py/_objects.c:4494)\nKeyError: 140545251730248\nKeyError: 140545251730248\nKeyError: 140545140359688\nKeyError: 140545140359688\nKeyError: 140545140359688\nProcess ForkPoolWorker-17:\nProcess ForkPoolWorker-19:\nProcess ForkPoolWorker-28:\nProcess ForkPoolWorker-20:\nProcess ForkPoolWorker-25:\nProcess ForkPoolWorker-29:\nProcess ForkPoolWorker-12:\nProcess ForkPoolWorker-21:\nProcess ForkPoolWorker-24:\nProcess ForkPoolWorker-5:\nProcess ForkPoolWorker-23:\nProcess ForkPoolWorker-22:\nProcess ForkPoolWorker-27:\nProcess ForkPoolWorker-11:\nProcess ForkPoolWorker-32:\nProcess ForkPoolWorker-6:\nProcess ForkPoolWorker-26:\nProcess ForkPoolWorker-31:\nProcess ForkPoolWorker-30:\nProcess ForkPoolWorker-8:\nProcess ForkPoolWorker-9:\nProcess ForkPoolWorker-14:\nProcess ForkPoolWorker-10:\nProcess ForkPoolWorker-15:\nProcess ForkPoolWorker-7:\nProcess ForkPoolWorker-3:\nProcess ForkPoolWorker-13:\nProcess ForkPoolWorker-1:\nProcess ForkPoolWorker-16:\nProcess ForkPoolWorker-18:\nProcess ForkPoolWorker-2:\nProcess ForkPoolWorker-4:\nTraceback (most recent call last):\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\nTraceback (most recent call last):\nTraceback (most recent call last):\nTraceback (most recent call last):\nTraceback (most recent call last):\nTraceback (most recent call last):\nTraceback (most recent call last):\nTraceback (most recent call last):\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 343, in get\n res = self._reader.recv_bytes()\nTraceback (most recent call last):\nTraceback (most recent call last):\nTraceback (most recent call last):\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\nTraceback (most recent call last):\nTraceback (most recent call last):\nTraceback (most recent call last):\nTraceback (most recent call last):\nTraceback (most recent call last):\nTraceback (most recent call last):\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\nTraceback (most recent call last):\nTraceback (most recent call last):\nTraceback (most recent call last):\nTraceback (most recent call last):\nTraceback (most recent call last):\nTraceback (most recent call last):\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\nTraceback (most recent call last):\nTraceback (most recent call last):\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\nTraceback (most recent call last):\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/connection.py\", line 216, in recv_bytes\n buf = self._recv_bytes(maxlength)\nTraceback (most recent call last):\nTraceback (most recent call last):\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\nTraceback (most recent call last):\nTraceback (most recent call last):\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\nTraceback (most recent call last):\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/connection.py\", line 407, in _recv_bytes\n buf = self._recv(4)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\nTraceback (most recent call last):\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/connection.py\", line 379, in _recv\n chunk = read(handle, remaining)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 254, in _bootstrap\n self.run()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\nKeyboardInterrupt\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/process.py\", line 93, in run\n self._target(*self._args, **self._kwargs)\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\nKeyboardInterrupt\nKeyboardInterrupt\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\nKeyboardInterrupt\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/pool.py\", line 108, in worker\n task = get()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\nKeyboardInterrupt\nKeyboardInterrupt\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\nKeyboardInterrupt\nKeyboardInterrupt\nKeyboardInterrupt\nKeyboardInterrupt\nKeyboardInterrupt\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\nKeyboardInterrupt\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\nKeyboardInterrupt\nKeyboardInterrupt\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\nKeyboardInterrupt\nKeyboardInterrupt\nKeyboardInterrupt\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\nKeyboardInterrupt\nKeyboardInterrupt\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/queues.py\", line 342, in get\n with self._rlock:\nKeyboardInterrupt\nKeyboardInterrupt\nKeyboardInterrupt\nKeyboardInterrupt\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\nKeyboardInterrupt\nKeyboardInterrupt\nKeyboardInterrupt\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\n File \"/home/dav/miniconda3/envs/TAQ/lib/python3.5/multiprocessing/synchronize.py\", line 96, in __enter__\n return self._semlock.__enter__()\nKeyboardInterrupt\nKeyboardInterrupt\nKeyboardInterrupt\nKeyboardInterrupt\nKeyboardInterrupt\nKeyboardInterrupt\n" ], [ "results[0]", "_____no_output_____" ] ], [ [ "This ends up being a *little* faster than just using blaze (see below), but about half the time is spent setting thigs up in Dask.", "_____no_output_____" ] ], [ [ "from dask import delayed\n\n@delayed\ndef mean_column(key, data, column='Bid_Price'):\n return key, blaze.data(data)[column].mean()\n\nclass DDFs:\n # A (key, table) list\n datasets = []\n\n def __init__(self, h5fname):\n h5in = h5py.File(h5fname)\n h5in.visititems(self.collect_dataset)\n \n def collect_dataset(self, key, table):\n if isinstance(table, h5py.Dataset):\n self.datasets.append(mean_column(key, table))\n \n def compute_mean(self, limit=None):\n # Note that a limit of None includes all values\n self.results = {key: result for key, result in dd.compute(*self.datasets[:limit])}", "_____no_output_____" ], [ "%%time\nddfs = DDFs(fname)", "CPU times: user 4.09 s, sys: 2.07 s, total: 6.16 s\nWall time: 23.8 s\n" ], [ "%%time\nddfs.compute_mean()", "CPU times: user 14.6 s, sys: 1.57 s, total: 16.2 s\nWall time: 21.9 s\n" ], [ "next(iter(ddfs.results.items()))", "_____no_output_____" ], [ "# You can also compute individual results as needed\nddfs.datasets[0].compute()", "_____no_output_____" ] ], [ [ "# Blaze?\n\nHoly crap!", "_____no_output_____" ] ], [ [ "spy_blaze = blaze.data(spy_h5py)", "_____no_output_____" ], [ "%time \nspy_blaze['Ask_Price'].mean()", "CPU times: user 8 µs, sys: 1 µs, total: 9 µs\nWall time: 21 µs\n" ], [ "taq_tb = tb.open_file(fname)\nspy_tb = taq_tb.get_node(max_sym)", "_____no_output_____" ], [ "spy_blaze = blaze.data(spy_tb)", "_____no_output_____" ], [ "%time spy_blaze['Bid_Price'].mean()", "CPU times: user 0 ns, sys: 978 µs, total: 978 µs\nWall time: 647 µs\n" ], [ "taq_tb.close()", "_____no_output_____" ] ], [ [ "## Read directly with Blaze\n\nSomehow this is not as impressive", "_____no_output_____" ] ], [ [ "%%time\nblaze_h5_file = blaze.data(fname)\n\n# This is rather nice\nblaze_h5_file.SPY.no_suffix.Bid_Price.mean()", "CPU times: user 12.7 s, sys: 1.05 s, total: 13.7 s\nWall time: 59.9 s\n" ], [ "blaze_h5_file.ZFKOJB.no_suffix.Bid_Price.mean()", "_____no_output_____" ] ], [ [ "# Do some actual compute with Blaze", "_____no_output_____" ] ], [ [ "taq_h5py = h5py.File(fname)", "_____no_output_____" ], [ "class SymStats:\n means = {}\n\n def compute_stats(self, key, table):\n if isinstance(table, h5py.Dataset):\n self.means[key] = blaze.data(table)['Bid_Price'].mean() ", "_____no_output_____" ], [ "ss = SymStats()", "_____no_output_____" ], [ "%time taq_h5py.visititems(ss.compute_stats)", "CPU times: user 11.2 s, sys: 1.74 s, total: 12.9 s\nWall time: 51.8 s\n" ], [ "means = iter(ss.means.items())", "_____no_output_____" ], [ "next(means)", "_____no_output_____" ], [ "ss.means['SPY/no_suffix']", "_____no_output_____" ] ], [ [ "# Pandas? \n\n### To load with Pandas, you need to close the pytables session", "_____no_output_____" ] ], [ [ "taq_tb = tb.open_file(fname)", "_____no_output_____" ], [ "taq_tb.close()", "_____no_output_____" ], [ "pd.read_hdf?", "_____no_output_____" ], [ "pd.read_hdf(fname, max_sym, start=0, stop=1, chunksize=1)", "_____no_output_____" ], [ "max_sym", "_____no_output_____" ], [ "fname", "_____no_output_____" ], [ "%%timeit\nnode = taq_tb.get_node(max_sym)\npd.DataFrame.from_records(node[0:1])", "1000 loops, best of 3: 1.18 ms per loop\n" ], [ "%%timeit\n# I've also tried this with `.get_node()`, same speed\npd.DataFrame.from_records(taq_tb.root.IXQAJE.no_suffix)", "1 loop, best of 3: 234 ms per loop\n" ], [ "%%timeit\npd.read_hdf(fname, max_sym)", "10 loops, best of 3: 34 ms per loop\n" ], [ "# Pandas has optimizations it likes to do with \n%timeit spy_df = pd.read_hdf(fname, max_sym)", "10 loops, best of 3: 32.6 ms per loop\n" ], [ "# Actually do it\nspy_df = pd.read_hdf(fname, max_sym)", "_____no_output_____" ], [ "# This is fast, but loading is slow...\n%timeit spy_df.Bid_Price.mean()", "The slowest run took 6.25 times longer than the fastest. This could mean that an intermediate result is being cached \n10000 loops, best of 3: 57.8 µs per loop\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0a8472b8a53921334eb7edf244e9728173cd858
3,413
ipynb
Jupyter Notebook
Module-02/ComparisonOperations.ipynb
rbsorilla/ReneTraining
132554eb4a970eb47a0d91167e214f4548db3073
[ "Unlicense" ]
1
2021-04-05T01:35:29.000Z
2021-04-05T01:35:29.000Z
Module-02/ComparisonOperations.ipynb
rbsorilla/ReneTraining
132554eb4a970eb47a0d91167e214f4548db3073
[ "Unlicense" ]
null
null
null
Module-02/ComparisonOperations.ipynb
rbsorilla/ReneTraining
132554eb4a970eb47a0d91167e214f4548db3073
[ "Unlicense" ]
2
2021-03-01T04:42:29.000Z
2021-03-01T04:42:59.000Z
18.251337
255
0.486376
[ [ [ "<a href=\"https://colab.research.google.com/github/FairozaAmira/AI_Programming_1_e/blob/master/Lesson05/ComparisonOperations.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Answers for Comparison Operations Exercises", "_____no_output_____" ], [ "Question 1\n\nWhich one is the odd number?\nA) 13\nB) 72\nC) 255", "_____no_output_____" ] ], [ [ "13 % 2 == 1", "_____no_output_____" ], [ "72 % 2 == 1", "_____no_output_____" ], [ "255 % 2 == 1", "_____no_output_____" ] ], [ [ "Question 3\n\nCheck if 63 is between 50 and 100.", "_____no_output_____" ] ], [ [ "a = 63\n50 < a < 100", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d0a84e9c1f4c67773ef79199dfb0227ea8e2186c
24,994
ipynb
Jupyter Notebook
site/en-snapshot/model_optimization/guide/combine/pcqat_example.ipynb
phoenix-fork-tensorflow/docs-l10n
2287738c22e3e67177555e8a41a0904edfcf1544
[ "Apache-2.0" ]
491
2020-01-27T19:05:32.000Z
2022-03-31T08:50:44.000Z
site/en-snapshot/model_optimization/guide/combine/pcqat_example.ipynb
phoenix-fork-tensorflow/docs-l10n
2287738c22e3e67177555e8a41a0904edfcf1544
[ "Apache-2.0" ]
511
2020-01-27T22:40:05.000Z
2022-03-21T08:40:55.000Z
site/en-snapshot/model_optimization/guide/combine/pcqat_example.ipynb
phoenix-fork-tensorflow/docs-l10n
2287738c22e3e67177555e8a41a0904edfcf1544
[ "Apache-2.0" ]
627
2020-01-27T21:49:52.000Z
2022-03-28T18:11:50.000Z
35.96259
357
0.552173
[ [ [ "**Copyright 2021 The TensorFlow Authors.**", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/model_optimization/guide/combine/pcqat_example\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/model-optimization/blob/master/tensorflow_model_optimization/g3doc/guide/combine/pcqat_example.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/model-optimization/blob/master/tensorflow_model_optimization/g3doc/guide/combine/pcqat_example.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/model-optimization/tensorflow_model_optimization/g3doc/guide/combine/pcqat_example.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "# Sparsity and cluster preserving quantization aware training (PCQAT) Keras example", "_____no_output_____" ], [ "## Overview\n\nThis is an end to end example showing the usage of the **sparsity and cluster preserving quantization aware training (PCQAT)** API, part of the TensorFlow Model Optimization Toolkit's collaborative optimization pipeline.\n\n### Other pages\n\nFor an introduction to the pipeline and other available techniques, see the [collaborative optimization overview page](https://www.tensorflow.org/model_optimization/guide/combine/collaborative_optimization).\n\n### Contents\n\nIn the tutorial, you will:\n\n1. Train a `tf.keras` model for the MNIST dataset from scratch.\n2. Fine-tune the model with pruning and see the accuracy and observe that the model was successfully pruned.\n3. Apply sparsity preserving clustering on the pruned model and observe that the sparsity applied earlier has been preserved.\n4. Apply QAT and observe the loss of sparsity and clusters.\n5. Apply PCQAT and observe that both sparsity and clustering applied earlier have been preserved.\n6. Generate a TFLite model and observe the effects of applying PCQAT on it.\n7. Compare the sizes of the different models to observe the compression benefits of applying sparsity followed by the collaborative optimization techniques of sparsity preserving clustering and PCQAT.\n8. Compare the accurracy of the fully optimized model with the un-optimized baseline model accuracy.", "_____no_output_____" ], [ "## Setup\n\nYou can run this Jupyter Notebook in your local [virtualenv](https://www.tensorflow.org/install/pip?lang=python3#2.-create-a-virtual-environment-recommended) or [colab](https://colab.sandbox.google.com/). For details of setting up dependencies, please refer to the [installation guide](https://www.tensorflow.org/model_optimization/guide/install).", "_____no_output_____" ] ], [ [ "! pip install -q tensorflow-model-optimization", "_____no_output_____" ], [ "import tensorflow as tf\n\nimport numpy as np\nimport tempfile\nimport zipfile\nimport os", "_____no_output_____" ] ], [ [ "## Train a tf.keras model for MNIST to be pruned and clustered", "_____no_output_____" ] ], [ [ "# Load MNIST dataset\nmnist = tf.keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n# Normalize the input image so that each pixel value is between 0 to 1.\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.InputLayer(input_shape=(28, 28)),\n tf.keras.layers.Reshape(target_shape=(28, 28, 1)),\n tf.keras.layers.Conv2D(filters=12, kernel_size=(3, 3),\n activation=tf.nn.relu),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(10)\n])\n\nopt = tf.keras.optimizers.Adam(learning_rate=1e-3)\n\n# Train the digit classification model\nmodel.compile(optimizer=opt,\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel.fit(\n train_images,\n train_labels,\n validation_split=0.1,\n epochs=10\n)", "_____no_output_____" ] ], [ [ "### Evaluate the baseline model and save it for later usage", "_____no_output_____" ] ], [ [ "_, baseline_model_accuracy = model.evaluate(\n test_images, test_labels, verbose=0)\n\nprint('Baseline test accuracy:', baseline_model_accuracy)\n\n_, keras_file = tempfile.mkstemp('.h5')\nprint('Saving model to: ', keras_file)\ntf.keras.models.save_model(model, keras_file, include_optimizer=False)", "_____no_output_____" ] ], [ [ "## Prune and fine-tune the model to 50% sparsity", "_____no_output_____" ], [ "Apply the `prune_low_magnitude()` API to achieve the pruned model that is to be clustered in the next step. Refer to the [pruning comprehensive guide](https://www.tensorflow.org/model_optimization/guide/pruning/comprehensive_guide) for more information on the pruning API.", "_____no_output_____" ], [ "### Define the model and apply the sparsity API\n\nNote that the pre-trained model is used.", "_____no_output_____" ] ], [ [ "import tensorflow_model_optimization as tfmot\n\nprune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude\n\npruning_params = {\n 'pruning_schedule': tfmot.sparsity.keras.ConstantSparsity(0.5, begin_step=0, frequency=100)\n }\n\ncallbacks = [\n tfmot.sparsity.keras.UpdatePruningStep()\n]\n\npruned_model = prune_low_magnitude(model, **pruning_params)\n\n# Use smaller learning rate for fine-tuning\nopt = tf.keras.optimizers.Adam(learning_rate=1e-5)\n\npruned_model.compile(\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=opt,\n metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "### Fine-tune the model, check sparsity, and evaluate the accuracy against baseline\n\nFine-tune the model with pruning for 3 epochs.", "_____no_output_____" ] ], [ [ "# Fine-tune model\npruned_model.fit(\n train_images,\n train_labels,\n epochs=3,\n validation_split=0.1,\n callbacks=callbacks)", "_____no_output_____" ] ], [ [ "Define helper functions to calculate and print the sparsity and clusters of the model.", "_____no_output_____" ] ], [ [ "def print_model_weights_sparsity(model):\n for layer in model.layers:\n if isinstance(layer, tf.keras.layers.Wrapper):\n weights = layer.trainable_weights\n else:\n weights = layer.weights\n for weight in weights:\n if \"kernel\" not in weight.name or \"centroid\" in weight.name:\n continue\n weight_size = weight.numpy().size\n zero_num = np.count_nonzero(weight == 0)\n print(\n f\"{weight.name}: {zero_num/weight_size:.2%} sparsity \",\n f\"({zero_num}/{weight_size})\",\n )\n\ndef print_model_weight_clusters(model):\n for layer in model.layers:\n if isinstance(layer, tf.keras.layers.Wrapper):\n weights = layer.trainable_weights\n else:\n weights = layer.weights\n for weight in weights:\n # ignore auxiliary quantization weights\n if \"quantize_layer\" in weight.name:\n continue\n if \"kernel\" in weight.name:\n unique_count = len(np.unique(weight))\n print(\n f\"{layer.name}/{weight.name}: {unique_count} clusters \"\n )", "_____no_output_____" ] ], [ [ "Let's strip the pruning wrapper first, then check that the model kernels were correctly pruned.", "_____no_output_____" ] ], [ [ "stripped_pruned_model = tfmot.sparsity.keras.strip_pruning(pruned_model)\n\nprint_model_weights_sparsity(stripped_pruned_model)", "_____no_output_____" ] ], [ [ "## Apply sparsity preserving clustering and check its effect on model sparsity in both cases", "_____no_output_____" ], [ "Next, apply sparsity preserving clustering on the pruned model and observe the number of clusters and check that the sparsity is preserved.", "_____no_output_____" ] ], [ [ "import tensorflow_model_optimization as tfmot\nfrom tensorflow_model_optimization.python.core.clustering.keras.experimental import (\n cluster,\n)\n\ncluster_weights = tfmot.clustering.keras.cluster_weights\nCentroidInitialization = tfmot.clustering.keras.CentroidInitialization\n\ncluster_weights = cluster.cluster_weights\n\nclustering_params = {\n 'number_of_clusters': 8,\n 'cluster_centroids_init': CentroidInitialization.KMEANS_PLUS_PLUS,\n 'preserve_sparsity': True\n}\n\nsparsity_clustered_model = cluster_weights(stripped_pruned_model, **clustering_params)\n\nsparsity_clustered_model.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nprint('Train sparsity preserving clustering model:')\nsparsity_clustered_model.fit(train_images, train_labels,epochs=3, validation_split=0.1)", "_____no_output_____" ] ], [ [ "Strip the clustering wrapper first, then check that the model is correctly pruned and clustered.", "_____no_output_____" ] ], [ [ "stripped_clustered_model = tfmot.clustering.keras.strip_clustering(sparsity_clustered_model)\n\nprint(\"Model sparsity:\\n\")\nprint_model_weights_sparsity(stripped_clustered_model)\n\nprint(\"\\nModel clusters:\\n\")\nprint_model_weight_clusters(stripped_clustered_model)", "_____no_output_____" ] ], [ [ "## Apply QAT and PCQAT and check effect on model clusters and sparsity", "_____no_output_____" ], [ "Next, apply both QAT and PCQAT on the sparse clustered model and observe that PCQAT preserves weight sparsity and clusters in your model. Note that the stripped model is passed to the QAT and PCQAT API.", "_____no_output_____" ] ], [ [ "# QAT\nqat_model = tfmot.quantization.keras.quantize_model(stripped_clustered_model)\n\nqat_model.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\nprint('Train qat model:')\nqat_model.fit(train_images, train_labels, batch_size=128, epochs=1, validation_split=0.1)\n\n# PCQAT\nquant_aware_annotate_model = tfmot.quantization.keras.quantize_annotate_model(\n stripped_clustered_model)\npcqat_model = tfmot.quantization.keras.quantize_apply(\n quant_aware_annotate_model,\n tfmot.experimental.combine.Default8BitClusterPreserveQuantizeScheme(preserve_sparsity=True))\n\npcqat_model.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\nprint('Train pcqat model:')\npcqat_model.fit(train_images, train_labels, batch_size=128, epochs=1, validation_split=0.1)", "_____no_output_____" ], [ "print(\"QAT Model clusters:\")\nprint_model_weight_clusters(qat_model)\nprint(\"\\nQAT Model sparsity:\")\nprint_model_weights_sparsity(qat_model)\nprint(\"\\nPCQAT Model clusters:\")\nprint_model_weight_clusters(pcqat_model)\nprint(\"\\nPCQAT Model sparsity:\")\nprint_model_weights_sparsity(pcqat_model)", "_____no_output_____" ] ], [ [ "## See compression benefits of PCQAT model\n\nDefine helper function to get zipped model file.", "_____no_output_____" ] ], [ [ "def get_gzipped_model_size(file):\n # It returns the size of the gzipped model in kilobytes.\n\n _, zipped_file = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(zipped_file, 'w', compression=zipfile.ZIP_DEFLATED) as f:\n f.write(file)\n\n return os.path.getsize(zipped_file)/1000", "_____no_output_____" ] ], [ [ "Observe that applying sparsity, clustering and PCQAT to a model yields significant compression benefits.", "_____no_output_____" ] ], [ [ "# QAT model\nconverter = tf.lite.TFLiteConverter.from_keras_model(qat_model)\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\nqat_tflite_model = converter.convert()\nqat_model_file = 'qat_model.tflite'\n# Save the model.\nwith open(qat_model_file, 'wb') as f:\n f.write(qat_tflite_model)\n\n# PCQAT model\nconverter = tf.lite.TFLiteConverter.from_keras_model(pcqat_model)\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\npcqat_tflite_model = converter.convert()\npcqat_model_file = 'pcqat_model.tflite'\n# Save the model.\nwith open(pcqat_model_file, 'wb') as f:\n f.write(pcqat_tflite_model)\n\nprint(\"QAT model size: \", get_gzipped_model_size(qat_model_file), ' KB')\nprint(\"PCQAT model size: \", get_gzipped_model_size(pcqat_model_file), ' KB')", "_____no_output_____" ] ], [ [ "## See the persistence of accuracy from TF to TFLite\n\nDefine a helper function to evaluate the TFLite model on the test dataset.", "_____no_output_____" ] ], [ [ "def eval_model(interpreter):\n input_index = interpreter.get_input_details()[0][\"index\"]\n output_index = interpreter.get_output_details()[0][\"index\"]\n\n # Run predictions on every image in the \"test\" dataset.\n prediction_digits = []\n for i, test_image in enumerate(test_images):\n if i % 1000 == 0:\n print(f\"Evaluated on {i} results so far.\")\n # Pre-processing: add batch dimension and convert to float32 to match with\n # the model's input data format.\n test_image = np.expand_dims(test_image, axis=0).astype(np.float32)\n interpreter.set_tensor(input_index, test_image)\n\n # Run inference.\n interpreter.invoke()\n\n # Post-processing: remove batch dimension and find the digit with highest\n # probability.\n output = interpreter.tensor(output_index)\n digit = np.argmax(output()[0])\n prediction_digits.append(digit)\n\n print('\\n')\n # Compare prediction results with ground truth labels to calculate accuracy.\n prediction_digits = np.array(prediction_digits)\n accuracy = (prediction_digits == test_labels).mean()\n return accuracy", "_____no_output_____" ] ], [ [ "Evaluate the model, which has been pruned, clustered and quantized, and then see that the accuracy from TensorFlow persists in the TFLite backend.", "_____no_output_____" ] ], [ [ "interpreter = tf.lite.Interpreter(pcqat_model_file)\ninterpreter.allocate_tensors()\n\npcqat_test_accuracy = eval_model(interpreter)\n\nprint('Pruned, clustered and quantized TFLite test_accuracy:', pcqat_test_accuracy)\nprint('Baseline TF test accuracy:', baseline_model_accuracy)", "_____no_output_____" ] ], [ [ "## Conclusion", "_____no_output_____" ], [ "In this tutorial, you learned how to create a model, prune it using the `prune_low_magnitude()` API, and apply sparsity preserving clustering using the `cluster_weights()` API to preserve sparsity while clustering the weights.\n\nNext, sparsity and cluster preserving quantization aware training (PCQAT) was applied to preserve model sparsity and clusters while using QAT. The final PCQAT model was compared to the QAT one to show that sparsity and clusters are preserved in the former and lost in the latter.\n\nNext, the models were converted to TFLite to show the compression benefits of chaining sparsity, clustering, and PCQAT model optimization techniques and the TFLite model was evaluated to ensure that the accuracy persists in the TFLite backend.\n\nFinally, the PCQAT TFLite model accuracy was compared to the pre-optimization baseline model accuracy to show that collaborative optimization techniques managed to achieve the compression benefits while maintaining a similar accuracy compared to the original model.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d0a85a0339327bb648bf937b590a8368190b586e
161,747
ipynb
Jupyter Notebook
mathematics/random_data_generation.ipynb
yefang008514/machinelearning
396c6301397aeb1ffb5aaa0387fdc13b5b76259a
[ "MIT" ]
6,693
2018-09-13T05:46:51.000Z
2022-03-31T06:31:43.000Z
mathematics/random_data_generation.ipynb
JinhuaSu/machinelearning
17302f708146ad46838b3782bf735364fa3cf16d
[ "MIT" ]
18
2018-11-29T09:36:25.000Z
2021-06-30T03:04:57.000Z
mathematics/random_data_generation.ipynb
JinhuaSu/machinelearning
17302f708146ad46838b3782bf735364fa3cf16d
[ "MIT" ]
3,444
2018-09-14T01:36:45.000Z
2022-03-31T06:30:54.000Z
508.638365
59,632
0.946657
[ [ [ "Copyright (C)\n2016 - 2019 Pinard Liu([email protected])\n\nhttps://www.cnblogs.com/pinard\n\nPermission given to modify the code as long as you keep this declaration at the top\n\n机器学习算法的随机数据生成 https://www.cnblogs.com/pinard/p/6047802.html", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "np.random.rand(3,2,2)", "_____no_output_____" ], [ "np.random.randn(3,2)", "_____no_output_____" ], [ "2*np.random.randn(3,2) + 1", "_____no_output_____" ], [ "np.random.randint(3, size=[2,3,4])", "_____no_output_____" ], [ "np.random.randint(3, 6, size=[2,3])", "_____no_output_____" ], [ "(5-2)*np.random.random_sample(3)+2", "_____no_output_____" ], [ "from sklearn.datasets.samples_generator import make_regression\n# X为样本特征,y为样本输出, coef为回归系数,共1000个样本,每个样本1个特征\nX, y, coef =make_regression(n_samples=1000, n_features=1,noise=10, coef=True)\n# 画图\nplt.scatter(X, y, color='black')\nplt.plot(X, X*coef, color='blue',\n linewidth=3)\n\nplt.xticks(())\nplt.yticks(())\n\nplt.show()", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom sklearn.datasets.samples_generator import make_classification\n# X1为样本特征,Y1为样本类别输出, 共400个样本,每个样本2个特征,输出有3个类别,没有冗余特征,每个类别一个簇\nX1, Y1 = make_classification(n_samples=400, n_features=2, n_redundant=0,\n n_clusters_per_class=1, n_classes=3)\nplt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)\nplt.show()", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom sklearn.datasets.samples_generator import make_blobs\n# X为样本特征,Y为样本簇类别, 共1000个样本,每个样本2个特征,共3个簇,簇中心在[-1,-1], [1,1], [2,2], 簇方差分别为[0.4, 0.5, 0.2]\nX, y = make_blobs(n_samples=1000, n_features=2, centers=[[-1,-1], [1,1], [2,2]], cluster_std=[0.4, 0.5, 0.2])\nplt.scatter(X[:, 0], X[:, 1], marker='o', c=y)\nplt.show()", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom sklearn.datasets import make_gaussian_quantiles\n#生成2维正态分布,生成的数据按分位数分成3组,1000个样本,2个样本特征均值为1和2,协方差系数为2\nX1, Y1 = make_gaussian_quantiles(n_samples=1000, n_features=2, n_classes=3, mean=[1,2],cov=2)\nplt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0a85b63dd81d671a63e62179367fa3722692888
22,881
ipynb
Jupyter Notebook
code/2_class_cnn/1_cnn_classify.ipynb
vmos1/cosmogan_pytorch
75d3d4f652a92d45d823a051b750b35d802e2317
[ "BSD-3-Clause-LBNL" ]
1
2020-10-19T18:52:50.000Z
2020-10-19T18:52:50.000Z
code/2_class_cnn/1_cnn_classify.ipynb
vmos1/cosmogan_pytorch
75d3d4f652a92d45d823a051b750b35d802e2317
[ "BSD-3-Clause-LBNL" ]
1
2020-11-13T22:35:02.000Z
2020-11-14T02:00:44.000Z
code/2_class_cnn/1_cnn_classify.ipynb
vmos1/cosmogan_pytorch
75d3d4f652a92d45d823a051b750b35d802e2317
[ "BSD-3-Clause-LBNL" ]
null
null
null
36.434713
1,598
0.520257
[ [ [ "# Testing cnn for classifying universes\nNov 10, 2020\n", "_____no_output_____" ] ], [ [ "import argparse\nimport os\nimport random\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nfrom torchsummary import summary\nfrom torch.utils.data import DataLoader, TensorDataset\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom IPython.display import HTML\n\n\nimport time\nfrom datetime import datetime\nimport glob\nimport pickle\nimport yaml\nimport logging", "_____no_output_____" ], [ "%matplotlib widget", "_____no_output_____" ] ], [ [ "## Modules", "_____no_output_____" ] ], [ [ "def f_load_config(config_file):\n with open(config_file) as f:\n config = yaml.load(f, Loader=yaml.SafeLoader)\n return config\n\n### Transformation functions for image pixel values\ndef f_transform(x):\n return 2.*x/(x + 4.) - 1.\n\ndef f_invtransform(s):\n return 4.*(1. + s)/(1. - s)\n\n", "_____no_output_____" ], [ "# custom weights initialization called on netG and netD\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\n# Generator Code\nclass View(nn.Module):\n def __init__(self, shape):\n super(View, self).__init__()\n self.shape = shape\n\n def forward(self, x):\n return x.view(*self.shape)\n\nclass Discriminator(nn.Module):\n def __init__(self, ngpu, nz,nc,ndf,n_classes,kernel_size,stride,d_padding):\n super(Discriminator, self).__init__()\n self.ngpu = ngpu\n self.main = nn.Sequential(\n # input is (nc) x 64 x 64\n # nn.Conv2d(in_channels, out_channels, kernel_size,stride,padding,output_padding,groups,bias, Dilation,padding_mode)\n nn.Conv2d(nc, ndf,kernel_size, stride, d_padding, bias=True),\n nn.BatchNorm2d(ndf,eps=1e-05, momentum=0.9, affine=True),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 32 x 32\n nn.Conv2d(ndf, ndf * 2, kernel_size, stride, d_padding, bias=True),\n nn.BatchNorm2d(ndf * 2,eps=1e-05, momentum=0.9, affine=True),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(ndf * 2, ndf * 4, kernel_size, stride, d_padding, bias=True),\n nn.BatchNorm2d(ndf * 4,eps=1e-05, momentum=0.9, affine=True),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(ndf * 4, ndf * 8, kernel_size, stride, d_padding, bias=True),\n nn.BatchNorm2d(ndf * 8,eps=1e-05, momentum=0.9, affine=True),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Flatten(),\n nn.Linear(nc*ndf*8*8*8, n_classes)\n# nn.Sigmoid()\n )\n\n def forward(self, input):\n return self.main(input)\n\n", "_____no_output_____" ] ], [ [ "## Main code", "_____no_output_____" ] ], [ [ "torch.backends.cudnn.benchmark=True\nt0=time.time()\n#################################\n###### Initialize variables #######\nconfig_file='config_128.yaml'\nconfig_dict=f_load_config(config_file)\nprint(config_dict)\n\nworkers=config_dict['training']['workers']\nnc,nz,ngf,ndf=config_dict['training']['nc'],config_dict['training']['nz'],config_dict['training']['ngf'],config_dict['training']['ndf']\nlr,beta1=config_dict['training']['lr'],config_dict['training']['beta1']\nkernel_size,stride=config_dict['training']['kernel_size'],config_dict['training']['stride']\ng_padding,d_padding=config_dict['training']['g_padding'],config_dict['training']['d_padding']\nflip_prob=config_dict['training']['flip_prob']\n\nimage_size=config_dict['data']['image_size']\ncheckpoint_size=config_dict['data']['checkpoint_size']\nnum_imgs=config_dict['data']['num_imgs']\nip_fname=config_dict['data']['ip_fname']\nop_loc=config_dict['data']['op_loc']\n\n# Overriding configs in .yaml file (different for jupyter notebook)\nngpu=1\nbatch_size=128\nspec_loss_flag=True\ncheckpoint_size=50\nnum_imgs=2000 # Number of images to use \nnum_epochs=4\nlr=0.0002\nn_classes=6\n\n### Initialize random seed (different for Jpt notebook)\nmanualSeed=21245\nprint(\"Random Seed: \", manualSeed)\nrandom.seed(manualSeed)\ntorch.manual_seed(manualSeed)\ndevice = torch.device(\"cuda\" if (torch.cuda.is_available() and ngpu > 0) else \"cpu\")\nprint('Device:',device)\n\n# #################################\n# ####### Read data and precompute ######\n# # ip_fname='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/raw_data/128_square/dataset_2_smoothing_200k/norm_1_train_val.npy'\n# ip_fname='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/raw_data/128_square/dataset_4_four_universes_6k_cnn/data_x.npy'\n# labels='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/raw_data/128_square/dataset_4_four_universes_6k_cnn/data_y.npy'\n\n# img=np.load(ip_fname)[:num_imgs].transpose(0,1,2,3)\n# t_img=torch.from_numpy(img)\n# print(img.shape,t_img.shape)\n\n# dataset=TensorDataset(t_img)\n# dataloader=DataLoader(dataset,batch_size=batch_size,shuffle=True,num_workers=1,drop_last=True)\n\n#################################\n###### Build Networks ###\nprint(\"Building CNN\")\n# Create Discriminator\nnetD = Discriminator(ngpu, nz,nc,ndf,n_classes,kernel_size,stride,g_padding).to(device)\nnetD.apply(weights_init)\nprint(netD)\nsummary(netD,(1,128,128))\n# Handle multi-gpu if desired\nngpu=torch.cuda.device_count()\n\nprint(\"Number of GPUs used\",ngpu)\nif (device.type == 'cuda') and (ngpu > 1):\n netD = nn.DataParallel(netD, list(range(ngpu)))\n\n# Initialize BCELoss function\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(netD.parameters(), lr=0.001, momentum=0.9)\n\n# fixed_noise = torch.randn(batch_size, 1, 1, nz, device=device) #Latent vectors to view G progress\n\n# Setup Adam optimizers for both G and D\n# optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999),eps=1e-7)\n\n################################# \n###### Set up directories ####### (different for Jpt notebook)\n# run_suffix='_nb_test'\n# ### Create prefix for foldername \n# now=datetime.now()\n# fldr_name=now.strftime('%Y%m%d_%H%M%S') ## time format\n# # print(fldr_name)\n# save_dir=op_loc+fldr_name+run_suffix\n\n# if not os.path.exists(save_dir):\n# os.makedirs(save_dir+'/models')\n# os.makedirs(save_dir+'/images')\n\n# Fresh start \n# iters = 0; start_epoch=0\n# best_chi1,best_chi2=1e10,1e10", "{'description': 'GAN', 'data': {'ip_fname': '/global/cfs/cdirs/m3363/vayyar/cosmogan_data/raw_data/128_square/dataset_2_smoothing_200k/norm_1_train_val.npy', 'op_loc': '/global/cfs/cdirs/m3363/vayyar/cosmogan_data/results_from_other_code/pytorch/results/128sq/', 'image_size': 128, 'checkpoint_size': 1, 'num_imgs': 200000}, 'training': {'workers': 2, 'nc': 1, 'nz': 64, 'ngf': 64, 'ndf': 64, 'lr': 0.0002, 'beta1': 0.5, 'kernel_size': 5, 'stride': 2, 'g_padding': 2, 'd_padding': 2, 'flip_prob': 0.01}}\nRandom Seed: 21245\nDevice: cuda\nBuilding CNN\nDiscriminator(\n (main): Sequential(\n (0): Conv2d(1, 64, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2))\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.2, inplace=True)\n (3): Conv2d(64, 128, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2))\n (4): BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True, track_running_stats=True)\n (5): LeakyReLU(negative_slope=0.2, inplace=True)\n (6): Conv2d(128, 256, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2))\n (7): BatchNorm2d(256, eps=1e-05, momentum=0.9, affine=True, track_running_stats=True)\n (8): LeakyReLU(negative_slope=0.2, inplace=True)\n (9): Conv2d(256, 512, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2))\n (10): BatchNorm2d(512, eps=1e-05, momentum=0.9, affine=True, track_running_stats=True)\n (11): LeakyReLU(negative_slope=0.2, inplace=True)\n (12): Flatten()\n (13): Linear(in_features=32768, out_features=6, bias=True)\n )\n)\n----------------------------------------------------------------\n Layer (type) Output Shape Param #\n================================================================\n Conv2d-1 [-1, 64, 64, 64] 1,664\n BatchNorm2d-2 [-1, 64, 64, 64] 128\n LeakyReLU-3 [-1, 64, 64, 64] 0\n Conv2d-4 [-1, 128, 32, 32] 204,928\n BatchNorm2d-5 [-1, 128, 32, 32] 256\n LeakyReLU-6 [-1, 128, 32, 32] 0\n Conv2d-7 [-1, 256, 16, 16] 819,456\n BatchNorm2d-8 [-1, 256, 16, 16] 512\n LeakyReLU-9 [-1, 256, 16, 16] 0\n Conv2d-10 [-1, 512, 8, 8] 3,277,312\n BatchNorm2d-11 [-1, 512, 8, 8] 1,024\n LeakyReLU-12 [-1, 512, 8, 8] 0\n Flatten-13 [-1, 32768] 0\n Linear-14 [-1, 6] 196,614\n================================================================\nTotal params: 4,501,894\nTrainable params: 4,501,894\nNon-trainable params: 0\n----------------------------------------------------------------\nInput size (MB): 0.06\nForward/backward pass size (MB): 11.50\nParams size (MB): 17.17\nEstimated Total Size (MB): 28.74\n----------------------------------------------------------------\nNumber of GPUs used 1\n" ], [ "# ip_fname='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/raw_data/128_square/dataset_4_four_universes_6k_cnn/data_x.npy'\n# labels_file='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/raw_data/128_square/dataset_4_four_universes_6k_cnn/data_y.npy'\n# ids_file='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/raw_data/128_square/dataset_4_four_universes_6k_cnn/data_id.npy'\n\n# img=np.load(ip_fname)\n# labels=np.load(labels_file)\n# ids=np.load(ids_file)\n\n# t_img=torch.from_numpy(img)\n# print(img.shape,t_img.shape)", "_____no_output_____" ], [ "## Read data from dataframe\ndata_dir='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/raw_data/128_square/dataset_4_four_universes_6k_cnn/'\ndf_data=pd.read_pickle(data_dir+'/df_data.pkle')\ndf_data=df_data.sample(frac=1,random_state=20).reset_index(drop=True)\ntrain_size,val_size,test_size=0.7,0.1,0.1\ndata_size=df_data.shape[0]", "_____no_output_____" ], [ "df_data[['ID','label']].head()", "_____no_output_____" ], [ "idx1,idx2,idx3=int(train_size*data_size),int((train_size+val_size)*data_size),int((train_size+val_size+test_size)*data_size)\nprint(idx1,idx2,idx3)\n\ndf_temp=df_data.loc[np.arange(0,idx1)]\ndataset=TensorDataset(torch.Tensor(np.stack(df_temp.img.values)),torch.Tensor(df_temp.label.values))\ntrain_loader=DataLoader(dataset,batch_size=batch_size,shuffle=True,num_workers=1,drop_last=True)\n\ndf_temp=df_data.loc[np.arange(idx1,idx2)]\ndataset=TensorDataset(torch.Tensor(np.stack(df_temp.img.values)),torch.Tensor(df_temp.label.values))\nval_loader=DataLoader(dataset,batch_size=16,shuffle=True,num_workers=1,drop_last=True)\n\ndf_temp=df_data.loc[np.arange(idx2,idx3)]\ndataset=TensorDataset(torch.Tensor(np.stack(df_temp.img.values)),torch.Tensor(df_temp.label.values))\ntest_loader=DataLoader(dataset,batch_size=8,shuffle=True,num_workers=1,drop_last=True)\n\n", "25804 29491 33177\n" ], [ "## Test model\ndef f_test(data_loader,netD):\n netD.eval()\n correct,total=0,0\n with torch.no_grad():\n for count,data in enumerate(data_loader):\n images,labels=data[0].to(device),data[1].to(device)\n outputs=netD(images)\n _,predictions=torch.max(outputs,1)\n total+=labels.size(0)\n correct+=(predictions==labels).sum().item()\n\n accuracy=(correct/total)*100\n# print(\"Accuracy %\",accuracy)\n# print(correct,total)\n return accuracy\n", "_____no_output_____" ], [ "accuracy=[]\nfor epoch in range(0,4):\n running_loss=0.0\n print(\"Epoch\",epoch)\n for i, data in enumerate(train_loader):\n # print(images.shape,labels.shape)\n images,labels=data[0].to(device),data[1].to(device)\n optimizer.zero_grad()\n\n # netD.train(); ### Need to add these after inference and before training\n netD.zero_grad()\n labels=labels.long()\n output = netD(images)\n\n loss= criterion(output, labels)\n loss.backward()\n optimizer.step()\n\n running_loss+=loss.item()\n \n if i%10==0: accuracy.append(f_test(val_loader,netD))\n netD.train()\n", "Epoch 0\n" ], [ "plt.figure()\nplt.plot(accuracy)\n", "_____no_output_____" ], [ "## Test model\nf_test(test_loader,netD)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0a85c82bfe0ab67337eacbdd67e6c5733bfb215
376,005
ipynb
Jupyter Notebook
demo.ipynb
neilshah13/capstone21
1be9175d70041cb3ee429f31dd51dd11c7ab39af
[ "MIT" ]
null
null
null
demo.ipynb
neilshah13/capstone21
1be9175d70041cb3ee429f31dd51dd11c7ab39af
[ "MIT" ]
1
2021-10-21T02:48:48.000Z
2021-10-21T02:48:48.000Z
demo.ipynb
neilshah13/capstone21
1be9175d70041cb3ee429f31dd51dd11c7ab39af
[ "MIT" ]
6
2021-08-30T02:51:32.000Z
2022-01-06T04:11:12.000Z
390.451713
174,642
0.925788
[ [ [ "# User Demo ", "_____no_output_____" ] ], [ [ "url = \"http://127.0.0.1:5000\"\r\nfilepath = 'C:\\\\Users\\\\reonh\\Documents\\\\NUS\\AY2022_S1\\Capstone\\capstone_21\\python_backend\\database\\lpdlprnet\\plate_2.jpg'\r\nfolderpath = 'C:\\\\Users\\\\reonh\\Documents\\\\NUS\\AY2022_S1\\Capstone\\capstone_21\\python_backend\\database\\lpdlprnet\\\\'\r\nfilename = 'plate.jpg'", "_____no_output_____" ] ], [ [ "## Check Server Status", "_____no_output_____" ] ], [ [ "import requests\r\n\r\nresponse = requests.get( url + \"/api/lpdlprnet/\" + 'internal')\r\n\r\nprint(response.json(), flush=True)", "{'HTTPStatus': 200, 'status': 'Active'}\n" ] ], [ [ "## Scenario: Developer needs to recognise license plates for the following images\n\n### Get Predictions", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\n\r\nfiles = [folderpath + 'plate.jpg']\r\n\r\ndef process(filename: str=None):\r\n \"\"\"\r\n View multiple images stored in files, stacking vertically\r\n\r\n Arguments:\r\n filename: str - path to filename containing image\r\n \"\"\"\r\n image = mpimg.imread(filename)\r\n plt.figure()\r\n plt.imshow(image)\r\n return image\r\n\r\nim = process(files[0])\r\n", "_____no_output_____" ], [ "M = im.shape[0]//10\r\nN = im.shape[1]//10\r\ntile_coord = [[x,x+M,y,y+N] for x in range(0,im.shape[0],M) for y in range(0,im.shape[1],N)]", "_____no_output_____" ], [ "ts = im.copy()\r\na = tile_coord[0][0]\r\nb = tile_coord[0][1]\r\nc = tile_coord[0][2]\r\nd = tile_coord[0][3]", "_____no_output_____" ], [ "for i,matrix in enumerate(tile_coord):\r\n a,b,c,d = matrix\r\n ts = im.copy()\r\n ts[a:b, c:d] = 255\r\n plt.imsave(str(i)+'range_rover.jpg', ts)\r\n ", "_____no_output_____" ], [ "# Used for benchmarking\r\n# [x1, x2, y1, y2] and [x3, x4, y3, y4] \r\ndef calculate_iou_from_coords(bx1, bx2):\r\n assert (is_bounding_box(bx1) and is_bounding_box(bx2))\r\n #map list of coordinates into bounding box\r\n bb1, bb2 = {},{}\r\n bb1['x1'], bb1['x2'], bb1['y1'], bb1['y2'] = bx1\r\n bb2['x1'], bb2['x2'], bb2['y1'], bb2['y2'] = bx2\r\n\r\n assert bb1['x1'] < bb1['x2']\r\n assert bb1['y1'] < bb1['y2']\r\n assert bb2['x1'] < bb2['x2']\r\n assert bb2['y1'] < bb2['y2']\r\n\r\n # determine the coordinates of the intersection rectangle\r\n x_left = max(bb1['x1'], bb2['x1'])\r\n y_top = max(bb1['y1'], bb2['y1'])\r\n x_right = min(bb1['x2'], bb2['x2'])\r\n y_bottom = min(bb1['y2'], bb2['y2'])\r\n\r\n# print(x_left, y_top, x_right, y_bottom)\r\n\r\n if x_right < x_left or y_bottom < y_top:\r\n return 0.0\r\n\r\n # The intersection of two axis-aligned bounding boxes is always an\r\n # axis-aligned bounding box\r\n intersection_area = (x_right - x_left) * (y_bottom - y_top)\r\n\r\n # compute the area of both AABBs\r\n bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])\r\n bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])\r\n\r\n # compute the intersection over union by taking the intersection\r\n # area and dividing it by the sum of prediction + ground-truth\r\n # areas - the interesection area\r\n iou = intersection_area / float(bb1_area + bb2_area - intersection_area)\r\n# print(iou, intersection_area, bb1_area, bb2_area)\r\n assert iou >= 0.0\r\n assert iou <= 1.0\r\n return iou\r\n \r\n \r\n\r\ndef is_bounding_box(box):\r\n return type(box) in [list, tuple, pd.Series, np.ndarray] and len(box) == 4\r\n\r\n\r\n# [x1, y1, w1, h1] and [x2, y2, w2, h2] \r\ndef calculate_iou_from_dim(bx1, bx2):\r\n return calculate_iou_from_coords(*list(map(convert_dim_to_coord, [bx1, bx2])))\r\n\r\n\r\ndef convert_dim_to_coord(args):\r\n x,y,w,h = args\r\n return [x, x+w, y, y+h]\r\n\r\ndef chop_image(im, n): \r\n \"\"\"\r\n Chop image into n segments\r\n \"\"\"\r\n import matplotlib.pyplot as plt\r\n org = plt.imread(im)\r\n im = org.copy()\r\n M = im.shape[0]//n\r\n N = im.shape[1]//n\r\n tile_coord = [[x,x+M,y,y+N] for x in range(0,im.shape[0],M) for y in range(0,im.shape[1],N)]\r\n response =[org]\r\n for i,matrix in enumerate(tile_coord):\r\n a,b,c,d = matrix\r\n ts = im.copy()\r\n ts[a:b, c:d] = 0\r\n response.append(ts)\r\n return response\r\n ", "_____no_output_____" ], [ "plate = plt.imread('C:\\\\Users\\\\reonh\\\\Documents\\\\NUS\\\\AY2022_S1\\\\Capstone\\\\capstone_21\\\\python_backend\\\\database\\\\lpdlprnet\\\\plate.jpg')\r\noplate = plt.imread('C:\\\\Users\\\\reonh\\\\Documents\\\\NUS\\\\AY2022_S1\\\\Capstone\\\\capstone_21\\\\python_backend\\\\triton_client\\\\lpdnet\\\\input\\\\internal\\\\081021_042837\\\\0plate.jpg')", "_____no_output_____" ], [ "plt.imshow(plate-oplate)", "_____no_output_____" ], [ "import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nresponses = pd.read_pickle('python_backend/response')\r\nresponses", "_____no_output_____" ], [ "conf = {}\r\nfor r in responses:\r\n key = r['file_name'].replace('plate.jpg','')\r\n try:\r\n sc = r['all_bboxes'][0]['confidence_score']\r\n except:\r\n sc = 0.01\r\n conf[key] = sc\r\n ", "_____no_output_____" ], [ "conf", "_____no_output_____" ], [ "chunks = chop_image(folderpath + 'plate_2.jpg', 10)", "_____no_output_____" ], [ "def conf_color(x):\r\n return 255/x\r\n\r\n", "_____no_output_____" ], [ "org = plt.imread(folderpath + 'plate.jpg')\r\nn = 10\r\nim = org.copy()\r\nM = im.shape[0]//n\r\nN = im.shape[1]//n\r\ntile_coord = [[x,x+M,y,y+N] for x in range(0,im.shape[0],M) for y in range(0,im.shape[1],N)]\r\nresponse =[]\r\nfor i,matrix in enumerate(tile_coord):\r\n a,b,c,d = matrix\r\n im[a:b,c:d] = conf_color(conf[str(i)])\r\nplt.imshow(org)\r\nplt.imshow(im, alpha=0.8)\r\n", "_____no_output_____" ], [ "import requests\r\n\r\nbaseURL = url\r\n\r\n\r\nrequest_files=[ ('image',(files[0],open(files[0],'rb'),'image/jpeg')) , ('image',(files[1],open(files[1],'rb'),'image/jpeg'))]\r\nheaders = {}\r\npayload = {'filename':['plate.jpg', 'plate_2.jpg']}\r\n\r\nresponse = requests.post( baseURL + \"/api/lpdlprnet/internal\", headers=headers, data=payload, files=request_files)\r\nprint(response.json()['0']['0_lpr']['license_plate'])\r\nprint(response.json()['1']['0_lpr']['license_plate'])\r\n\r\n", "3SAM123\nFE002CA\n" ] ], [ [ "### Can we explain this output?", "_____no_output_____" ] ], [ [ "import requests\r\n\r\nbaseURL = url\r\nfilename = filename\r\n\r\nfilepath = filepath\r\n\r\n\r\nfiles=[ ('image',(filename,open(filepath,'rb'),'image/jpeg')) ]\r\nheaders = {}\r\n\r\nresponse = requests.post( baseURL + \"/api/lpdlprnet/explain/internal\", headers=headers, data=payload, files=files)", "_____no_output_____" ], [ "from IPython.display import Markdown, display\r\n\r\ndisplay(Markdown(response.json()['explain_markdown']))", "_____no_output_____" ] ], [ [ "### How to write this code?", "_____no_output_____" ] ], [ [ "import requests\r\n\r\nbaseURL = url\r\n\r\n\r\nfiles=[ ('image',(filename,open(filepath,'rb'),'image/jpeg')) ]\r\nheaders = {}\r\n\r\nresponse = requests.post( baseURL + \"/api/lpdlprnet/internal\", headers=headers, data=payload, files=files)\r\nresponse.json()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d0a8626719a36e342885463ce304c3d7b7e74b80
9,976
ipynb
Jupyter Notebook
05 OOPS-3/5.1 Abstract Classes.ipynb
suhassuhas/Coding-Ninjas---Data-Structures-and-Algorithms-in-Python
e660d5a83b80df9cb67b2d06f2b5ba182586f3da
[ "Unlicense" ]
4
2021-09-09T06:52:31.000Z
2022-01-09T00:05:11.000Z
05 OOPS-3/5.1 Abstract Classes.ipynb
rishitbhojak/Coding-Ninjas---Data-Structures-and-Algorithms-in-Python
3b5625df60f7ac554fae58dc8ea9fd42012cbfae
[ "Unlicense" ]
null
null
null
05 OOPS-3/5.1 Abstract Classes.ipynb
rishitbhojak/Coding-Ninjas---Data-Structures-and-Algorithms-in-Python
3b5625df60f7ac554fae58dc8ea9fd42012cbfae
[ "Unlicense" ]
5
2021-09-15T13:49:32.000Z
2022-01-20T20:37:46.000Z
30.695385
757
0.505914
[ [ [ "#### Abstract Classes: contains abstract methods", "_____no_output_____" ], [ "Abstract methods are those which are only declared but they've no implementation\n\n**All methods need to be implemented (mandatory)\n\nModule -- abc\n \n |\n |\n |---> ABC (Class)\n |\n |---> Abstract method (as a functionality and used as a decorator)\n \n** You cannot create objects of an abstract class (evne there is only one abstract method)", "_____no_output_____" ] ], [ [ "from abc import ABC, abstractmethod\n\nclass Automobile(ABC):\n \n def __init__(self):\n print(\"Automobile Created\")\n \n def start(self):\n pass\n \n def start(self):\n pass\n \n def start(self):\n pass\n \nc = Automobile()", "Automobile Created\n" ], [ "from abc import ABC, abstractmethod\n\nclass Automobile(ABC):\n \n def __init__(self):\n print(\"Automobile Created\")\n \n @abstractmethod \n def start(self):\n pass\n \n @abstractmethod \n def start(self):\n pass\n \n @abstractmethod \n def start(self):\n pass\n \nc = Automobile()", "_____no_output_____" ], [ "from abc import ABC, abstractmethod\n\nclass Automobile(ABC):\n \n def __init__(self):\n print(\"Automobile Created\")\n \n @abstractmethod \n def start(self):\n pass\n \n @abstractmethod \n def start(self):\n pass\n \n @abstractmethod \n def start(self):\n pass\n \nclass Car(Automobile):\n \n def __init__(self, name):\n print(\"Car created\")\n self.name = name\n \n def start(self):\n pass\n def stop(self):\n pass\n def drive(self):\n pass\n \nclass Bus(Automobile):\n def __init__(self, name):\n print(\"Bus Created\")\n self.name = name\n def start(self):\n pass\n def stop(self):\n pass\n def drive(self):\n pass\n \n \nc = Car(\"Honda\")\nd = Bus(\"Delhi Metro BUs\")", "Car created\nBus Created\n" ] ], [ [ "#### 1) Object of abstract class cannot be created\n#### 2) Implement all the abstract methods in the child class", "_____no_output_____" ] ], [ [ "# Predict the output:\nfrom abc import ABC,abstractmethod\n\nclass A(ABC):\n\n @abstractmethod\n def fun1(self):\n pass\n\n @abstractmethod\n def fun2(self):\n pass\n \n\no = A()\no.fun1()", "_____no_output_____" ], [ "# Predict the output:\nfrom abc import ABC,abstractmethod\n\nclass A(ABC):\n\n @abstractmethod\n def fun1(self):\n pass\n\n @abstractmethod\n def fun2(self):\n pass\n\nclass B(A):\n\n def fun1(self):\n print(\"function 1 called\")\n\no = B()\no.fun1()", "_____no_output_____" ], [ "#Predict the Output:\nfrom abc import ABC,abstractmethod\n\nclass A(ABC):\n\n @abstractmethod\n def fun1(self):\n pass\n\n @abstractmethod\n def fun2(self):\n pass\n\nclass B(A):\n\n def fun1(self):\n print(\"function 1 called\")\n\n def fun2(self):\n print(\"function 2 called\")\n \no = B()\no.fun1()", "function 1 called\n" ] ], [ [ "** In this 3rd example, it's clearly visible that you've implemented all the abstract funtions of class A, class B inherits class A and created the object for class B. Then finally called the fun1() from the object. So, the output got printed else you can visit the first two examples. It throws an error either if you implement only one class or if you try to create an object for abstract class (Example 1)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
d0a868baef64484ad3279532274596c2693e7a58
164,405
ipynb
Jupyter Notebook
2023683_code_classification.ipynb
andreypeshev/classification-and-regression
5c4ebf41637b99e1a5fde1228dc206e6c4615665
[ "MIT" ]
null
null
null
2023683_code_classification.ipynb
andreypeshev/classification-and-regression
5c4ebf41637b99e1a5fde1228dc206e6c4615665
[ "MIT" ]
null
null
null
2023683_code_classification.ipynb
andreypeshev/classification-and-regression
5c4ebf41637b99e1a5fde1228dc206e6c4615665
[ "MIT" ]
null
null
null
134.64783
71,600
0.857614
[ [ [ "#imports \nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\nfrom sklearn import metrics\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier, VotingClassifier\nfrom sklearn.neural_network import MLPClassifier\n\nfrom sklearn.decomposition import PCA\n\nfrom imblearn.over_sampling import RandomOverSampler, SMOTE\nfrom imblearn.pipeline import make_pipeline\n\n\nfrom imblearn.pipeline import Pipeline \nimport time", "_____no_output_____" ] ], [ [ "# 1. Classification of Facial Expressions\n", "_____no_output_____" ], [ "### 1.1 Importing the data", "_____no_output_____" ] ], [ [ "#load the data\ndata = np.load(\"2023683_face.npz\")\ndata.files\ndata[\"X_train\"]", "_____no_output_____" ], [ "#put the data into corresponding arrays\nX_train = data[\"X_train\"]\ny_train = data[\"y_train\"]\nX_test = data[\"X_test\"]\ny_test = data[\"y_test\"]\nX_valid = data[\"X_valid\"]\ny_valid = data[\"y_valid\"]", "_____no_output_____" ] ], [ [ "### 1.2 EDA", "_____no_output_____" ] ], [ [ "print(X_train.shape)\nprint(y_train.shape)", "(1930, 2304)\n(1930,)\n" ], [ "#unique categories \nnp.unique(y_train)", "_____no_output_____" ], [ "X_train", "_____no_output_____" ], [ "X_train[0].shape", "_____no_output_____" ] ], [ [ "#### 1.2.1 Plotting an image from each category in the data ", "_____no_output_____" ] ], [ [ "#finding the indexes of the first occurances of the unique categories \nunique_categories = np.unique(y_train, return_index=True)[1]", "_____no_output_____" ], [ "#idea adopted from practical 10 \nfig, axs = plt.subplots(1, 3, figsize=(10, 10), subplot_kw={'xticks': (), 'yticks': ()})\n\nfor index, ax in zip(unique_categories, [0,1,2]):\n image = X_train[index]\n # an instance has 2304 features meaning that the square root of this number \n # gives the the dimensions of the image\n image.resize(48, 48)\n axs[ax].imshow(image)\n axs[ax].set_title(y_train[index])", "_____no_output_____" ] ], [ [ "#### 1.2.2 Displaying the number of observations for each class", "_____no_output_____" ] ], [ [ "#combining all the target labels \ncombined_labels = np.concatenate((y_train, y_test, y_valid))", "_____no_output_____" ], [ "combined_labels.shape", "_____no_output_____" ], [ "labels, counts = np.unique(combined_labels, return_counts=True)\nplt.bar(labels, counts)\nplt.xticks(labels)\nplt.title(\"Number of observations for each class\")\nplt.xlabel(\"Class\")\nplt.ylabel(\"Count\")", "_____no_output_____" ], [ "#balancing the data using RandomOverSampler\nros = RandomOverSampler(random_state=202, sampling_strategy='not majority')\nX_train_res, y_train_res = ros.fit_resample(X_train, y_train)\nX_test_res, y_test_res = ros.fit_resample(X_test, y_test)\nX_valid_res, y_valid_res = ros.fit_resample(X_valid, y_valid)", "_____no_output_____" ] ], [ [ "The data seems to be unbalanced because category 0 is observed 1200 times, 1 - a little less than 800 and 2 - around 400.", "_____no_output_____" ], [ "Fitting a PCA:", "_____no_output_____" ] ], [ [ "#starting with a PCA with 250 components\npca = PCA(n_components=250, random_state=202)\npca.fit(X_train_res)\nX_train_pca = pca.transform(X_train_res)\nX_test_pca = pca.transform(X_test_res)\nX_train_pca.shape", "_____no_output_____" ], [ "#plotting the explained variance ratio: looks like the best number of components is around 75\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.axhline(y=0.95, color='r', linestyle='-')", "_____no_output_____" ], [ "#initializing a PCA with 75 components\npca = PCA(n_components=75, random_state=202)\npca.fit(X_train_res)\nX_train_pca = pca.transform(X_train_res)\nX_test_pca = pca.transform(X_test_res)\nX_valid_pca = pca.transform(X_valid_res)", "_____no_output_____" ] ], [ [ "### 1.3 Classfication task ", "_____no_output_____" ], [ "#### 1.3.1 Training KNN Classifier \n\nInitializing the baseline model. ", "_____no_output_____" ] ], [ [ "start = time.time()\nknn = KNeighborsClassifier(n_neighbors=1)\nknn.fit(X_train_pca, y_train_res)\npred = knn.predict(X_test_pca)\n\nprint(f\"Test set score: {knn.score(X_test_pca, y_test_res)}\")\nend = time.time()\n\nprint(f\"Time elapsed: {end-start}\")\nprint(f\"Confusion matrix:\\n {metrics.confusion_matrix(y_test_res, pred)}\")\ntarget_names = ['class 0', 'class 1', 'class 2']\nprint(f\"Classification report:\\n {metrics.classification_report(y_test_res, pred, target_names=target_names)}\")", "Test set score: 0.6502732240437158\nTime elapsed: 0.07080960273742676\nConfusion matrix:\n [[108 3 11]\n [ 25 95 2]\n [ 72 15 35]]\nClassification report:\n precision recall f1-score support\n\n class 0 0.53 0.89 0.66 122\n class 1 0.84 0.78 0.81 122\n class 2 0.73 0.29 0.41 122\n\n accuracy 0.65 366\n macro avg 0.70 0.65 0.63 366\nweighted avg 0.70 0.65 0.63 366\n\n" ] ], [ [ "Plotting missclassified images.", "_____no_output_____" ] ], [ [ "#displaying two missclassified images for each class (code adapted from workbook 11)\nindex = 0\nmisclassifiedIndexes = []\n\nlab_0 = 0\nlab_1 = 0\nlab_2 = 0\n\nfor label, predict in zip(y_test_res, pred):\n if label != predict:\n #we need this part in order to have only 2 images from each class\n if label == 0 and lab_0 < 2:\n misclassifiedIndexes.append(index)\n lab_0 += 1\n elif label == 1 and lab_1 < 2:\n misclassifiedIndexes.append(index)\n lab_1 += 1\n elif label == 2 and lab_2 < 2:\n misclassifiedIndexes.append(index)\n lab_2 += 1\n index +=1\n\n# plot the corresponding image of the 6th to 9th element in the array \nplt.figure(figsize=(20, 20))\nfor plotIndex, badIndex in enumerate(misclassifiedIndexes):\n plt.subplot(1, 6, plotIndex + 1)\n plt.imshow(np.reshape(X_test[badIndex], (48,48))) \n plt.title(f'Predicted: {pred[badIndex]}, Actual: {y_test_res[badIndex]}', fontsize = 15)", "_____no_output_____" ] ], [ [ "#### 1.3.2 Finding the optimal number of neighbors for KNN", "_____no_output_____" ] ], [ [ "#finding the best number of neighbors using a loop \nn_neighbors = [1, 2, 3, 4, 5, 6]\nmax_accuracy = 0\nbest_neighbors = 0\n\nstart = time.time()\nfor n in n_neighbors:\n knn = KNeighborsClassifier(n_neighbors=n)\n \n knn.fit(X_train_pca, y_train_res)\n \n if knn.score(X_valid_pca, y_valid_res) > max_accuracy:\n max_accuracy = knn.score(X_valid_pca, y_valid_res)\n best_neighbors = n\n\nprint(f\"Best accuracy: {max_accuracy:.2f}\")\nprint(f\"Optimal number of neighbors: {best_neighbors}\")\nend = time.time()\nprint(f\"Time elapsed: {end-start}\")\n\n#evaluate model on test set\n\nknn = KNeighborsClassifier(n_neighbors=best_neighbors)\nknn.fit(X_train_pca, y_train_res)\ntest_score = knn.score(X_test_pca, y_test_res)\n\nprint(f\"Test set score with best parameters: {test_score}\")\nprint(f\"Classification report:\\n {metrics.classification_report(y_test_res, knn.predict(X_test_pca), target_names=target_names)}\")", "Best accuracy: 0.66\nOptimal number of neighbors: 5\nTime elapsed: 0.2662501335144043\nTest set score with best parameters: 0.6912568306010929\nClassification report:\n precision recall f1-score support\n\n class 0 0.59 0.82 0.69 122\n class 1 0.88 0.70 0.78 122\n class 2 0.68 0.55 0.61 122\n\n accuracy 0.69 366\n macro avg 0.72 0.69 0.69 366\nweighted avg 0.72 0.69 0.69 366\n\n" ] ], [ [ "#### 1.3.3 Fitting different classifiers \n\nFor the hyperparameter tunings I used this resource. It had some useful tips for the different models. \nhttps://machinelearningmastery.com/hyperparameters-for-classification-machine-learning-algorithms/", "_____no_output_____" ] ], [ [ "#SVM Classifier without hyperparameter tuning\nstart = time.time()\nsvc = SVC()\nsvc.fit(X_train_pca, y_train_res)\npred = svc.predict(X_test_pca)\nend = time.time()\n\nprint(f\"Time elapsed: {end-start}\")\nprint(f\"Test set score: {svc.score(X_test_pca, y_test_res)}\")\nprint(f\"Confusion matrix:\\n{metrics.confusion_matrix(pred, y_test_res)}\")\nprint(f\"Classification report:\\n {metrics.classification_report(y_test_res, pred, target_names=target_names)}\")", "Time elapsed: 0.4777240753173828\nTest set score: 0.8360655737704918\nConfusion matrix:\n[[ 95 5 22]\n [ 3 113 2]\n [ 24 4 98]]\nClassification report:\n precision recall f1-score support\n\n class 0 0.78 0.78 0.78 122\n class 1 0.96 0.93 0.94 122\n class 2 0.78 0.80 0.79 122\n\n accuracy 0.84 366\n macro avg 0.84 0.84 0.84 366\nweighted avg 0.84 0.84 0.84 366\n\n" ], [ "#SVM Classifier with hyperparameter tuning\nbest_score = 0\nbest_parameters = {}\n\nstart = time.time()\nfor C in [0.01, 0.1, 1, 10, 100]:\n for kernel in [\"rbf\", 'poly']:\n #fitting a model for each combination of hyperparemeters\n svc = SVC(C=C, kernel=kernel)\n svc.fit(X_train_pca, y_train_res)\n \n #evaluating the model on the validation set\n score = svc.score(X_valid_pca, y_valid_res)\n \n if score > best_score:\n best_score = score \n best_parameters = {'C': C, 'kernel':kernel}\n\n\nprint(f\"Best score: {best_score:.2f}\")\nprint(f\"Best hyperparameters: {best_parameters}\")\nend = time.time()\nprint(f\"Time elapsed: {end-start}\")\n# fitting the model with best parameters on the test set\n\nsvc = SVC(**best_parameters)\nsvc.fit(X_train_pca, y_train_res)\ntest_score = svc.score(X_test_pca, y_test_res)\n\nprint(f\"Test set score with best parameters: {test_score}\")\nprint(f\"Classification report:\\n {metrics.classification_report(y_test_res, svc.predict(X_test_pca), target_names=target_names)}\")", "Best score: 0.90\nBest hyperparameters: {'C': 10, 'kernel': 'rbf'}\nTime elapsed: 5.4803009033203125\nTest set score with best parameters: 0.8333333333333334\nClassification report:\n precision recall f1-score support\n\n class 0 0.72 0.88 0.79 122\n class 1 0.97 0.91 0.94 122\n class 2 0.85 0.71 0.78 122\n\n accuracy 0.83 366\n macro avg 0.85 0.83 0.83 366\nweighted avg 0.85 0.83 0.83 366\n\n" ], [ "#Decision Tree Classifier without hyperparameter tuning\nstart = time.time()\ndtc = DecisionTreeClassifier(random_state = 202)\n\ndtc.fit(X_train_pca, y_train_res)\nend = time.time()\nprint(f\"Time elapsed: {end-start}\")\n\npred = dtc.predict(X_test_pca)\nprint(f\"Test set score: {dtc.score(X_test_pca, y_test_res)}\")\nprint(f\"Confusion matrix:\\n{metrics.confusion_matrix(pred, y_test_res)}\")\nprint(f\"Classification report:\\n {metrics.classification_report(y_test_res, pred, target_names=target_names)}\")", "Time elapsed: 0.17658352851867676\nTest set score: 0.6229508196721312\nConfusion matrix:\n[[91 20 64]\n [ 8 93 14]\n [23 9 44]]\nClassification report:\n precision recall f1-score support\n\n class 0 0.52 0.75 0.61 122\n class 1 0.81 0.76 0.78 122\n class 2 0.58 0.36 0.44 122\n\n accuracy 0.62 366\n macro avg 0.64 0.62 0.61 366\nweighted avg 0.64 0.62 0.61 366\n\n" ], [ "#Decision Tree Classifier with hyperparameter tuning\n\nbest_score = 0\nbest_parameters = {}\n\nstart = time.time()\nfor criterion in ['gini', 'entropy']:\n for max_depth in [3,6,9,12,15,17,20]:\n for splitter in ['best', 'random']:\n #fitting a model for each combination of hyperparemeters\n dtc = DecisionTreeClassifier(random_state=202, criterion=criterion, max_depth=max_depth, splitter=splitter)\n dtc.fit(X_train_pca, y_train_res)\n \n #evaluating the model on the validation set\n score = dtc.score(X_valid_pca, y_valid_res)\n \n if score > best_score:\n best_score = score \n best_parameters = {'criterion': criterion, 'max_depth':max_depth, 'splitter':splitter}\n\n\nprint(f\"Best score: {best_score:.2f}\")\nprint(f\"Best hyperparameters: {best_parameters}\")\nend = time.time()\nprint(f\"Time elapsed: {end-start}\")\n\n# fitting the model with best parameters on the test set\n\ndtc = DecisionTreeClassifier(**best_parameters, random_state=202)\ndtc.fit(X_train_pca, y_train_res)\ntest_score = dtc.score(X_test_pca, y_test_res)\n\nprint(f\"Test set score with best parameters: {test_score}\")\nprint(f\"Confusion matrix:\\n{metrics.confusion_matrix(dtc.predict(X_test_pca), y_test_res)}\")\nprint(f\"Classification report:\\n {metrics.classification_report(y_test_res, dtc.predict(X_test_pca), target_names=target_names)}\")", "Best score: 0.74\nBest hyperparameters: {'criterion': 'entropy', 'max_depth': 9, 'splitter': 'best'}\nTime elapsed: 2.6220104694366455\nTest set score with best parameters: 0.7049180327868853\nConfusion matrix:\n[[78 14 32]\n [11 99 9]\n [33 9 81]]\nClassification report:\n precision recall f1-score support\n\n class 0 0.63 0.64 0.63 122\n class 1 0.83 0.81 0.82 122\n class 2 0.66 0.66 0.66 122\n\n accuracy 0.70 366\n macro avg 0.71 0.70 0.71 366\nweighted avg 0.71 0.70 0.71 366\n\n" ], [ "#Logistic Regression without hyperparameter tuning\nstart = time.time()\nlog_reg = LogisticRegression(max_iter = 10000)\nlog_reg.fit(X_train_pca, y_train_res)\nend = time.time()\nprint(f\"Time elapsed: {end-start}\")\npred = log_reg.predict(X_test_pca)\nprint(f\"Test set score: {log_reg.score(X_test_pca, y_test_res)}\")\nprint(f\"Confusion matrix:\\n{metrics.confusion_matrix(pred, y_test_res)}\")\nprint(f\"Classification report:\\n {metrics.classification_report(y_test_res, pred, target_names=target_names)}\")", "Time elapsed: 3.8905959129333496\nTest set score: 0.8278688524590164\nConfusion matrix:\n[[ 99 4 27]\n [ 4 109 0]\n [ 19 9 95]]\nClassification report:\n precision recall f1-score support\n\n class 0 0.76 0.81 0.79 122\n class 1 0.96 0.89 0.93 122\n class 2 0.77 0.78 0.78 122\n\n accuracy 0.83 366\n macro avg 0.83 0.83 0.83 366\nweighted avg 0.83 0.83 0.83 366\n\n" ], [ "#Logistic Regression with hyperparameter tuning\n\nbest_score = 0\nbest_parameters = {}\n\nstart = time.time()\nfor C in [0.0001, 0.01, 0.1, 1, 10]:\n for solver in ['newton-cg', 'lbfgs', 'sag']:\n #fitting a model for each combination of hyperparemeters\n log_reg = LogisticRegression(solver=solver, C=C,max_iter = 10000)\n log_reg.fit(X_train_pca, y_train_res)\n \n #evaluating the model on the validation set\n score = log_reg.score(X_valid_pca, y_valid_res)\n \n if score > best_score:\n best_score = score \n best_parameters = {'solver':solver, 'C':C}\n\n\nprint(f\"Best score: {best_score:.2f}\")\nprint(f\"Best hyperparameters: {best_parameters}\")\nend = time.time()\nprint(f\"Time elapsed: {end-start}\")\n\n# fitting the model with best parameters on the test set\n\nlog_reg = LogisticRegression(**best_parameters, max_iter = 10000)\nlog_reg.fit(X_train_pca, y_train_res)\ntest_score = log_reg.score(X_test_pca, y_test_res)\n\nprint(f\"Test set score with best parameters: {test_score}\")\nprint(f\"Confusion matrix:\\n{metrics.confusion_matrix(log_reg.predict(X_test_pca), y_test_res)}\")\nprint(f\"Classification report:\\n {metrics.classification_report(y_test_res, log_reg.predict(X_test_pca), target_names=target_names)}\")", "Best score: 0.82\nBest hyperparameters: {'solver': 'newton-cg', 'C': 0.0001}\nTime elapsed: 31.4239661693573\nTest set score with best parameters: 0.8306010928961749\nConfusion matrix:\n[[ 98 5 27]\n [ 4 111 0]\n [ 20 6 95]]\nClassification report:\n precision recall f1-score support\n\n class 0 0.75 0.80 0.78 122\n class 1 0.97 0.91 0.94 122\n class 2 0.79 0.78 0.78 122\n\n accuracy 0.83 366\n macro avg 0.83 0.83 0.83 366\nweighted avg 0.83 0.83 0.83 366\n\n" ], [ "#MLP classifier without hyperparameter tuning\nstart = time.time()\nmnb = MLPClassifier(random_state=202, max_iter=1000)\nmnb.fit(X_train_pca, y_train_res)\nend = time.time()\nprint(f\"Time elapsed: {end-start}\")\npred = mnb.predict(X_test_pca)\nprint(f\"Test set score: {mnb.score(X_test_pca, y_test_res)}\")\nprint(f\"Confusion matrix:\\n{metrics.confusion_matrix(pred, y_test_res)}\")\nprint(f\"Classification report:\\n {metrics.classification_report(y_test_res, pred, target_names=target_names)}\")", "Time elapsed: 0.8547160625457764\nTest set score: 0.8087431693989071\nConfusion matrix:\n[[106 16 36]\n [ 8 106 2]\n [ 8 0 84]]\nClassification report:\n precision recall f1-score support\n\n class 0 0.67 0.87 0.76 122\n class 1 0.91 0.87 0.89 122\n class 2 0.91 0.69 0.79 122\n\n accuracy 0.81 366\n macro avg 0.83 0.81 0.81 366\nweighted avg 0.83 0.81 0.81 366\n\n" ], [ "#MLP Classifier with hyperparameter tuning\n\nbest_score = 0\nbest_parameters = {}\n\nstart = time.time()\nfor alpha in [0.001, 0.01, 0.1, 0.5, 0.7 ,0.8]:\n #fitting a model for each combination of hyperparemeters\n mnb = MLPClassifier(alpha=alpha, max_iter = 1000, random_state=202)\n mnb.fit(X_train_pca, y_train_res)\n \n #evaluating the model on the validation set\n score = mnb.score(X_valid_pca, y_valid_res)\n \n if score > best_score:\n best_score = score \n best_parameters = {'alpha': alpha}\n\n\nprint(f\"Best score: {best_score:.2f}\")\nprint(f\"Best hyperparameters: {best_parameters}\")\nend = time.time()\nprint(f\"Time elapsed: {end-start}\")\n\n# fitting the model with best parameters on the test set\n\nmnb = MLPClassifier(**best_parameters, max_iter = 1000, random_state=202)\nmnb.fit(X_train_pca, y_train_res)\ntest_score = mnb.score(X_test_pca, y_test_res)\n\nprint(f\"Test set score with best parameters: {test_score}\")\nprint(f\"Confusion matrix:\\n{metrics.confusion_matrix(mnb.predict(X_test_pca), y_test_res)}\")\nprint(f\"Classification report:\\n {metrics.classification_report(y_test_res, mnb.predict(X_test_pca), target_names=target_names)}\")", "Best score: 0.86\nBest hyperparameters: {'alpha': 0.7}\nTime elapsed: 16.085976123809814\nTest set score with best parameters: 0.8797814207650273\nConfusion matrix:\n[[112 7 27]\n [ 3 115 0]\n [ 7 0 95]]\nClassification report:\n precision recall f1-score support\n\n class 0 0.77 0.92 0.84 122\n class 1 0.97 0.94 0.96 122\n class 2 0.93 0.78 0.85 122\n\n accuracy 0.88 366\n macro avg 0.89 0.88 0.88 366\nweighted avg 0.89 0.88 0.88 366\n\n" ], [ "# Random Forest Classifier without hyperparameter tuning\nstart = time.time()\nrfc = RandomForestClassifier(random_state=202)\nrfc.fit(X_train_pca, y_train_res)\nend = time.time()\nprint(f\"Time elapsed: {end-start}\")\npred = rfc.predict(X_test_pca)\nprint(f\"Test set score: {rfc.score(X_test_pca, y_test_res)}\")\nprint(f\"Confusion matrix:\\n{metrics.confusion_matrix(pred, y_test_res)}\")\nprint(f\"Classification report:\\n {metrics.classification_report(y_test_res, rfc.predict(X_test_pca), target_names=target_names)}\")", "Time elapsed: 1.3194336891174316\nTest set score: 0.7923497267759563\nConfusion matrix:\n[[119 16 57]\n [ 2 106 0]\n [ 1 0 65]]\nClassification report:\n precision recall f1-score support\n\n class 0 0.62 0.98 0.76 122\n class 1 0.98 0.87 0.92 122\n class 2 0.98 0.53 0.69 122\n\n accuracy 0.79 366\n macro avg 0.86 0.79 0.79 366\nweighted avg 0.86 0.79 0.79 366\n\n" ], [ "print(rfc.get_params())", "{'bootstrap': True, 'ccp_alpha': 0.0, 'class_weight': None, 'criterion': 'gini', 'max_depth': None, 'max_features': 'auto', 'max_leaf_nodes': None, 'max_samples': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 100, 'n_jobs': None, 'oob_score': False, 'random_state': 202, 'verbose': 0, 'warm_start': False}\n" ], [ "#Random Forest Classifier with hyperparameter tuning\n\nbest_score = 0\nbest_parameters = {}\n\nstart = time.time()\nfor max_features in [8, 9, 10, 11]:\n for n_estimators in [10, 100, 1000]:\n for max_depth in [10, 20, 30]:\n #fitting a model for each combination of hyperparemeters\n rfc = RandomForestClassifier(max_features=max_features, n_estimators=n_estimators, max_depth=max_depth, random_state=202)\n rfc.fit(X_train_pca, y_train_res)\n \n #evaluating the model on the validation set\n score = rfc.score(X_valid_pca, y_valid_res)\n \n if score > best_score:\n best_score = score \n best_parameters = {'max_features': max_features, 'n_estimators': n_estimators, 'max_depth':max_depth}\n\n\nprint(f\"Best score: {best_score:.2f}\")\nprint(f\"Best hyperparameters: {best_parameters}\")\nend = time.time()\nprint(f\"Time elapsed: {end-start}\")\n\n# fitting the model with best parameters on the test set\n\nrfc = RandomForestClassifier(**best_parameters, random_state=202)\nrfc.fit(X_train_pca, y_train_res)\ntest_score = rfc.score(X_test_pca, y_test_res)\n\nprint(f\"Test set score with best parameters: {test_score}\")\nprint(f\"Confusion matrix:\\n{metrics.confusion_matrix(rfc.predict(X_test_pca), y_test_res)}\")\nprint(f\"Classification report:\\n {metrics.classification_report(y_test_res, rfc.predict(X_test_pca), target_names=target_names)}\")", "Best score: 0.75\nBest hyperparameters: {'max_features': 10, 'n_estimators': 100, 'max_depth': 20}\nTime elapsed: 195.14487385749817\nTest set score with best parameters: 0.73224043715847\nConfusion matrix:\n[[113 24 63]\n [ 3 98 2]\n [ 6 0 57]]\nClassification report:\n precision recall f1-score support\n\n class 0 0.56 0.93 0.70 122\n class 1 0.95 0.80 0.87 122\n class 2 0.90 0.47 0.62 122\n\n accuracy 0.73 366\n macro avg 0.81 0.73 0.73 366\nweighted avg 0.81 0.73 0.73 366\n\n" ], [ "#Voting\nmodel1 = LogisticRegression(max_iter=10000, solver='newton-cg', C=0.0001)\nmodel2 = DecisionTreeClassifier(criterion = 'entropy', max_depth = 9, splitter = 'best')\nmodel3 = SVC(C = 10, kernel = 'rbf')\n\nvoting_model = VotingClassifier(estimators = [('log_reg', model1), ('dt', model2), ('svc', model3)], voting='hard')\nvoting_model.fit(X_train_pca, y_train_res)\npred = voting_model.predict(X_test_pca)\nprint(\"Test set score:\", voting_model.score(X_test_pca, y_test_res))\nprint(f\"Confusion matrix:\\n{metrics.confusion_matrix(pred, y_test_res)}\")", "Test set score: 0.825136612021858\nConfusion matrix:\n[[104 8 33]\n [ 3 109 0]\n [ 15 5 89]]\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0a86b9f465a2957ad716c724edd02caaad0f8ce
838
ipynb
Jupyter Notebook
10 Days of Statistics/Day-3/Day-3-Drawing Marbles.ipynb
krithikgokuls/HackerRank
eb6c95e16688c02921c1df6b6ea613667a251457
[ "MIT" ]
61
2017-04-27T13:45:12.000Z
2022-01-27T11:40:15.000Z
10 Days of Statistics/Day-3/Day-3-Drawing Marbles.ipynb
krithikgokuls/HackerRank
eb6c95e16688c02921c1df6b6ea613667a251457
[ "MIT" ]
1
2017-06-24T14:16:06.000Z
2017-06-24T14:16:28.000Z
10 Days of Statistics/Day-3/Day-3-Drawing Marbles.ipynb
krithikgokuls/HackerRank
eb6c95e16688c02921c1df6b6ea613667a251457
[ "MIT" ]
78
2017-07-05T11:48:20.000Z
2022-02-08T08:04:22.000Z
20.95
221
0.562053
[ [ [ "#### A bag contains 3 red marbles and 4 blue marbles. Then, 2 marbles are drawn from the bag, at random, without replacement. If the first marble drawn is red, what is the probability that the second marble is blue?", "_____no_output_____" ], [ "##### Ans: 2 / 3", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown" ] ]
d0a86e772e78998fbdf6056be06e1dc624da9af6
35,089
ipynb
Jupyter Notebook
__writing/2021-02-03-template.ipynb
joaodmrodrigues/elements-financial-machine-learning
9e3dea6ac558576db94d926c94e46bfe8ff42e6f
[ "Apache-2.0" ]
null
null
null
__writing/2021-02-03-template.ipynb
joaodmrodrigues/elements-financial-machine-learning
9e3dea6ac558576db94d926c94e46bfe8ff42e6f
[ "Apache-2.0" ]
null
null
null
__writing/2021-02-03-template.ipynb
joaodmrodrigues/elements-financial-machine-learning
9e3dea6ac558576db94d926c94e46bfe8ff42e6f
[ "Apache-2.0" ]
null
null
null
188.650538
31,860
0.922825
[ [ [ "# Title\n> Small summary\n\n- toc: true\n- branch: master\n- badges: true\n- comments: true\n- categories: [cathegory 1, other cathegory]\n- image: images/post_image_generic.jpg\n- hide: false\n- search_exclude: false\n- author: Joao Rodrigues", "_____no_output_____" ], [ "\n\n", "_____no_output_____" ], [ "## Subtitle", "_____no_output_____" ], [ "### Subsubtitle", "_____no_output_____" ], [ "And some text here", "_____no_output_____" ] ], [ [ "#collapse-hide\nvar1 = 5", "_____no_output_____" ], [ "#hide_input\nprint(\"This code cell was not shown, only the output\")", "This code cell was not shown, only the output\n" ], [ "import matplotlib.pyplot as plt\nimport numpy as np\nx = np.linspace(1, 100, 100)\ny = np.random.randn(100)\n\nplt.plot(x,y)\nplt.show()", "_____no_output_____" ] ], [ [ "## Callout boxes", "_____no_output_____" ], [ "> Warning: This is a warning", "_____no_output_____" ], [ "> Important: This is important", "_____no_output_____" ], [ "> Tip: This is a tip", "_____no_output_____" ], [ "> Note: This is a note", "_____no_output_____" ], [ "> Note: This is a note with a [link](https://en.wikipedia.org/wiki/Main_Page)", "_____no_output_____" ], [ "This is a footnote {% fn 1 %}\n{{ \"This is the footnote.\" | fndetail: 1}}", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0a87170a822a1bf3f86d88162724c8d855438e4
823,133
ipynb
Jupyter Notebook
notebooks/julia-01-least-square.ipynb
xijiang/aabg
e911db484ce52ef34c94ea5e3a4727b2fcc01d97
[ "MIT" ]
null
null
null
notebooks/julia-01-least-square.ipynb
xijiang/aabg
e911db484ce52ef34c94ea5e3a4727b2fcc01d97
[ "MIT" ]
null
null
null
notebooks/julia-01-least-square.ipynb
xijiang/aabg
e911db484ce52ef34c94ea5e3a4727b2fcc01d97
[ "MIT" ]
null
null
null
119.208255
360
0.589048
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d0a87f32e1aa4b3fca4ef30785442571093ce210
6,012
ipynb
Jupyter Notebook
01_Getting_&_Knowing_Your_Data/Chipotle/.ipynb_checkpoints/Exercises-checkpoint.ipynb
Gioparra91/Pandas-exercise
85cc6f34055fbd36959f1799c748dcddf722c5da
[ "BSD-3-Clause" ]
null
null
null
01_Getting_&_Knowing_Your_Data/Chipotle/.ipynb_checkpoints/Exercises-checkpoint.ipynb
Gioparra91/Pandas-exercise
85cc6f34055fbd36959f1799c748dcddf722c5da
[ "BSD-3-Clause" ]
null
null
null
01_Getting_&_Knowing_Your_Data/Chipotle/.ipynb_checkpoints/Exercises-checkpoint.ipynb
Gioparra91/Pandas-exercise
85cc6f34055fbd36959f1799c748dcddf722c5da
[ "BSD-3-Clause" ]
null
null
null
18.054054
135
0.498503
[ [ [ "# Ex2 - Getting and Knowing your Data", "_____no_output_____" ], [ "This time we are going to pull data directly from the internet.\nSpecial thanks to: https://github.com/justmarkham for sharing the dataset and materials.\n\n### Step 1. Import the necessary libraries", "_____no_output_____" ], [ "### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv). ", "_____no_output_____" ], [ "### Step 3. Assign it to a variable called chipo.", "_____no_output_____" ], [ "### Step 4. See the first 10 entries", "_____no_output_____" ], [ "### Step 5. What is the number of observations in the dataset?", "_____no_output_____" ] ], [ [ "# Solution 1\n\n", "_____no_output_____" ], [ "# Solution 2\n\n", "_____no_output_____" ] ], [ [ "### Step 6. What is the number of columns in the dataset?", "_____no_output_____" ], [ "### Step 7. Print the name of all the columns.", "_____no_output_____" ], [ "### Step 8. How is the dataset indexed?", "_____no_output_____" ], [ "### Step 9. Which was the most-ordered item? ", "_____no_output_____" ], [ "### Step 10. For the most-ordered item, how many items were ordered?", "_____no_output_____" ], [ "### Step 11. What was the most ordered item in the choice_description column?", "_____no_output_____" ], [ "### Step 12. How many items were orderd in total?", "_____no_output_____" ], [ "### Step 13. Turn the item price into a float", "_____no_output_____" ], [ "#### Step 13.a. Check the item price type", "_____no_output_____" ], [ "#### Step 13.b. Create a lambda function and change the type of item price", "_____no_output_____" ], [ "#### Step 13.c. Check the item price type", "_____no_output_____" ], [ "### Step 14. How much was the revenue for the period in the dataset?", "_____no_output_____" ], [ "### Step 15. How many orders were made in the period?", "_____no_output_____" ], [ "### Step 16. What is the average revenue amount per order?", "_____no_output_____" ] ], [ [ "# Solution 1\n\n", "_____no_output_____" ], [ "# Solution 2\n\n", "_____no_output_____" ] ], [ [ "### Step 17. How many different items are sold?", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d0a881b5a806a4e03340fe178503ffef4df855b6
79,165
ipynb
Jupyter Notebook
assignments/assignment2/PyTorch.ipynb
mirzaim/cs231n
d982c7f023a1cedd961b4104b3e652ce3c43e738
[ "MIT" ]
14
2021-11-01T12:45:47.000Z
2022-03-09T09:25:18.000Z
assignments/assignment2/PyTorch.ipynb
surajiitd/cs231n
d982c7f023a1cedd961b4104b3e652ce3c43e738
[ "MIT" ]
null
null
null
assignments/assignment2/PyTorch.ipynb
surajiitd/cs231n
d982c7f023a1cedd961b4104b3e652ce3c43e738
[ "MIT" ]
8
2021-11-08T10:59:46.000Z
2022-02-28T18:47:17.000Z
41.124675
890
0.561877
[ [ [ "# This mounts your Google Drive to the Colab VM.\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\n# TODO: Enter the foldername in your Drive where you have saved the unzipped\n# assignment folder, e.g. 'cs231n/assignments/assignment1/'\nFOLDERNAME = None\nassert FOLDERNAME is not None, \"[!] Enter the foldername.\"\n\n# Now that we've mounted your Drive, this ensures that\n# the Python interpreter of the Colab VM can load\n# python files from within it.\nimport sys\nsys.path.append('/content/drive/My Drive/{}'.format(FOLDERNAME))\n\n# This downloads the CIFAR-10 dataset to your Drive\n# if it doesn't already exist.\n%cd /content/drive/My\\ Drive/$FOLDERNAME/cs231n/datasets/\n!bash get_datasets.sh\n%cd /content/drive/My\\ Drive/$FOLDERNAME", "_____no_output_____" ] ], [ [ "# Introduction to PyTorch\n\nYou've written a lot of code in this assignment to provide a whole host of neural network functionality. Dropout, Batch Norm, and 2D convolutions are some of the workhorses of deep learning in computer vision. You've also worked hard to make your code efficient and vectorized.\n\nFor the last part of this assignment, though, we're going to leave behind your beautiful codebase and instead migrate to one of two popular deep learning frameworks: in this instance, PyTorch (or TensorFlow, if you choose to work with that notebook).", "_____no_output_____" ], [ "## Why do we use deep learning frameworks?\n\n* Our code will now run on GPUs! This will allow our models to train much faster. When using a framework like PyTorch or TensorFlow you can harness the power of the GPU for your own custom neural network architectures without having to write CUDA code directly (which is beyond the scope of this class).\n* In this class, we want you to be ready to use one of these frameworks for your project so you can experiment more efficiently than if you were writing every feature you want to use by hand. \n* We want you to stand on the shoulders of giants! TensorFlow and PyTorch are both excellent frameworks that will make your lives a lot easier, and now that you understand their guts, you are free to use them :) \n* Finally, we want you to be exposed to the sort of deep learning code you might run into in academia or industry.\n\n## What is PyTorch?\n\nPyTorch is a system for executing dynamic computational graphs over Tensor objects that behave similarly as numpy ndarray. It comes with a powerful automatic differentiation engine that removes the need for manual back-propagation. \n\n## How do I learn PyTorch?\n\nOne of our former instructors, Justin Johnson, made an excellent [tutorial](https://github.com/jcjohnson/pytorch-examples) for PyTorch. \n\nYou can also find the detailed [API doc](http://pytorch.org/docs/stable/index.html) here. If you have other questions that are not addressed by the API docs, the [PyTorch forum](https://discuss.pytorch.org/) is a much better place to ask than StackOverflow.", "_____no_output_____" ], [ "# Table of Contents\n\nThis assignment has 5 parts. You will learn PyTorch on **three different levels of abstraction**, which will help you understand it better and prepare you for the final project. \n\n1. Part I, Preparation: we will use CIFAR-10 dataset.\n2. Part II, Barebones PyTorch: **Abstraction level 1**, we will work directly with the lowest-level PyTorch Tensors. \n3. Part III, PyTorch Module API: **Abstraction level 2**, we will use `nn.Module` to define arbitrary neural network architecture. \n4. Part IV, PyTorch Sequential API: **Abstraction level 3**, we will use `nn.Sequential` to define a linear feed-forward network very conveniently. \n5. Part V, CIFAR-10 open-ended challenge: please implement your own network to get as high accuracy as possible on CIFAR-10. You can experiment with any layer, optimizer, hyperparameters or other advanced features. \n\nHere is a table of comparison:\n\n| API | Flexibility | Convenience |\n|---------------|-------------|-------------|\n| Barebone | High | Low |\n| `nn.Module` | High | Medium |\n| `nn.Sequential` | Low | High |", "_____no_output_____" ], [ "# GPU\n\nYou can manually switch to a GPU device on Colab by clicking `Runtime -> Change runtime type` and selecting `GPU` under `Hardware Accelerator`. You should do this before running the following cells to import packages, since the kernel gets restarted upon switching runtimes.", "_____no_output_____" ] ], [ [ "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import sampler\n\nimport torchvision.datasets as dset\nimport torchvision.transforms as T\n\nimport numpy as np\n\nUSE_GPU = True\ndtype = torch.float32 # We will be using float throughout this tutorial.\n\nif USE_GPU and torch.cuda.is_available():\n device = torch.device('cuda')\nelse:\n device = torch.device('cpu')\n\n# Constant to control how frequently we print train loss.\nprint_every = 100\nprint('using device:', device)", "using device: cuda\n" ] ], [ [ "# Part I. Preparation\n\nNow, let's load the CIFAR-10 dataset. This might take a couple minutes the first time you do it, but the files should stay cached after that.\n\nIn previous parts of the assignment we had to write our own code to download the CIFAR-10 dataset, preprocess it, and iterate through it in minibatches; PyTorch provides convenient tools to automate this process for us.", "_____no_output_____" ] ], [ [ "NUM_TRAIN = 49000\n\n# The torchvision.transforms package provides tools for preprocessing data\n# and for performing data augmentation; here we set up a transform to\n# preprocess the data by subtracting the mean RGB value and dividing by the\n# standard deviation of each RGB value; we've hardcoded the mean and std.\ntransform = T.Compose([\n T.ToTensor(),\n T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ])\n\n# We set up a Dataset object for each split (train / val / test); Datasets load\n# training examples one at a time, so we wrap each Dataset in a DataLoader which\n# iterates through the Dataset and forms minibatches. We divide the CIFAR-10\n# training set into train and val sets by passing a Sampler object to the\n# DataLoader telling how it should sample from the underlying Dataset.\ncifar10_train = dset.CIFAR10('./cs231n/datasets', train=True, download=True,\n transform=transform)\nloader_train = DataLoader(cifar10_train, batch_size=64, \n sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN)))\n\ncifar10_val = dset.CIFAR10('./cs231n/datasets', train=True, download=True,\n transform=transform)\nloader_val = DataLoader(cifar10_val, batch_size=64, \n sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN, 50000)))\n\ncifar10_test = dset.CIFAR10('./cs231n/datasets', train=False, download=True, \n transform=transform)\nloader_test = DataLoader(cifar10_test, batch_size=64)", "Files already downloaded and verified\nFiles already downloaded and verified\nFiles already downloaded and verified\n" ] ], [ [ "# Part II. Barebones PyTorch\n\nPyTorch ships with high-level APIs to help us define model architectures conveniently, which we will cover in Part II of this tutorial. In this section, we will start with the barebone PyTorch elements to understand the autograd engine better. After this exercise, you will come to appreciate the high-level model API more.\n\nWe will start with a simple fully-connected ReLU network with two hidden layers and no biases for CIFAR classification. \nThis implementation computes the forward pass using operations on PyTorch Tensors, and uses PyTorch autograd to compute gradients. It is important that you understand every line, because you will write a harder version after the example.\n\nWhen we create a PyTorch Tensor with `requires_grad=True`, then operations involving that Tensor will not just compute values; they will also build up a computational graph in the background, allowing us to easily backpropagate through the graph to compute gradients of some Tensors with respect to a downstream loss. Concretely if x is a Tensor with `x.requires_grad == True` then after backpropagation `x.grad` will be another Tensor holding the gradient of x with respect to the scalar loss at the end.", "_____no_output_____" ], [ "### PyTorch Tensors: Flatten Function\nA PyTorch Tensor is conceptionally similar to a numpy array: it is an n-dimensional grid of numbers, and like numpy PyTorch provides many functions to efficiently operate on Tensors. As a simple example, we provide a `flatten` function below which reshapes image data for use in a fully-connected neural network.\n\nRecall that image data is typically stored in a Tensor of shape N x C x H x W, where:\n\n* N is the number of datapoints\n* C is the number of channels\n* H is the height of the intermediate feature map in pixels\n* W is the height of the intermediate feature map in pixels\n\nThis is the right way to represent the data when we are doing something like a 2D convolution, that needs spatial understanding of where the intermediate features are relative to each other. When we use fully connected affine layers to process the image, however, we want each datapoint to be represented by a single vector -- it's no longer useful to segregate the different channels, rows, and columns of the data. So, we use a \"flatten\" operation to collapse the `C x H x W` values per representation into a single long vector. The flatten function below first reads in the N, C, H, and W values from a given batch of data, and then returns a \"view\" of that data. \"View\" is analogous to numpy's \"reshape\" method: it reshapes x's dimensions to be N x ??, where ?? is allowed to be anything (in this case, it will be C x H x W, but we don't need to specify that explicitly). ", "_____no_output_____" ] ], [ [ "def flatten(x):\n N = x.shape[0] # read in N, C, H, W\n return x.view(N, -1) # \"flatten\" the C * H * W values into a single vector per image\n\ndef test_flatten():\n x = torch.arange(12).view(2, 1, 3, 2)\n print('Before flattening: ', x)\n print('After flattening: ', flatten(x))\n\ntest_flatten()", "Before flattening: tensor([[[[ 0, 1],\n [ 2, 3],\n [ 4, 5]]],\n\n\n [[[ 6, 7],\n [ 8, 9],\n [10, 11]]]])\nAfter flattening: tensor([[ 0, 1, 2, 3, 4, 5],\n [ 6, 7, 8, 9, 10, 11]])\n" ] ], [ [ "### Barebones PyTorch: Two-Layer Network\n\nHere we define a function `two_layer_fc` which performs the forward pass of a two-layer fully-connected ReLU network on a batch of image data. After defining the forward pass we check that it doesn't crash and that it produces outputs of the right shape by running zeros through the network.\n\nYou don't have to write any code here, but it's important that you read and understand the implementation.", "_____no_output_____" ] ], [ [ "import torch.nn.functional as F # useful stateless functions\n\ndef two_layer_fc(x, params):\n \"\"\"\n A fully-connected neural networks; the architecture is:\n NN is fully connected -> ReLU -> fully connected layer.\n Note that this function only defines the forward pass; \n PyTorch will take care of the backward pass for us.\n \n The input to the network will be a minibatch of data, of shape\n (N, d1, ..., dM) where d1 * ... * dM = D. The hidden layer will have H units,\n and the output layer will produce scores for C classes.\n \n Inputs:\n - x: A PyTorch Tensor of shape (N, d1, ..., dM) giving a minibatch of\n input data.\n - params: A list [w1, w2] of PyTorch Tensors giving weights for the network;\n w1 has shape (D, H) and w2 has shape (H, C).\n \n Returns:\n - scores: A PyTorch Tensor of shape (N, C) giving classification scores for\n the input data x.\n \"\"\"\n # first we flatten the image\n x = flatten(x) # shape: [batch_size, C x H x W]\n \n w1, w2 = params\n \n # Forward pass: compute predicted y using operations on Tensors. Since w1 and\n # w2 have requires_grad=True, operations involving these Tensors will cause\n # PyTorch to build a computational graph, allowing automatic computation of\n # gradients. Since we are no longer implementing the backward pass by hand we\n # don't need to keep references to intermediate values.\n # you can also use `.clamp(min=0)`, equivalent to F.relu()\n x = F.relu(x.mm(w1))\n x = x.mm(w2)\n return x\n \n\ndef two_layer_fc_test():\n hidden_layer_size = 42\n x = torch.zeros((64, 50), dtype=dtype) # minibatch size 64, feature dimension 50\n w1 = torch.zeros((50, hidden_layer_size), dtype=dtype)\n w2 = torch.zeros((hidden_layer_size, 10), dtype=dtype)\n scores = two_layer_fc(x, [w1, w2])\n print(scores.size()) # you should see [64, 10]\n\ntwo_layer_fc_test()", "torch.Size([64, 10])\n" ] ], [ [ "### Barebones PyTorch: Three-Layer ConvNet\n\nHere you will complete the implementation of the function `three_layer_convnet`, which will perform the forward pass of a three-layer convolutional network. Like above, we can immediately test our implementation by passing zeros through the network. The network should have the following architecture:\n\n1. A convolutional layer (with bias) with `channel_1` filters, each with shape `KW1 x KH1`, and zero-padding of two\n2. ReLU nonlinearity\n3. A convolutional layer (with bias) with `channel_2` filters, each with shape `KW2 x KH2`, and zero-padding of one\n4. ReLU nonlinearity\n5. Fully-connected layer with bias, producing scores for C classes.\n\nNote that we have **no softmax activation** here after our fully-connected layer: this is because PyTorch's cross entropy loss performs a softmax activation for you, and by bundling that step in makes computation more efficient.\n\n**HINT**: For convolutions: http://pytorch.org/docs/stable/nn.html#torch.nn.functional.conv2d; pay attention to the shapes of convolutional filters!", "_____no_output_____" ] ], [ [ "def three_layer_convnet(x, params):\n \"\"\"\n Performs the forward pass of a three-layer convolutional network with the\n architecture defined above.\n\n Inputs:\n - x: A PyTorch Tensor of shape (N, 3, H, W) giving a minibatch of images\n - params: A list of PyTorch Tensors giving the weights and biases for the\n network; should contain the following:\n - conv_w1: PyTorch Tensor of shape (channel_1, 3, KH1, KW1) giving weights\n for the first convolutional layer\n - conv_b1: PyTorch Tensor of shape (channel_1,) giving biases for the first\n convolutional layer\n - conv_w2: PyTorch Tensor of shape (channel_2, channel_1, KH2, KW2) giving\n weights for the second convolutional layer\n - conv_b2: PyTorch Tensor of shape (channel_2,) giving biases for the second\n convolutional layer\n - fc_w: PyTorch Tensor giving weights for the fully-connected layer. Can you\n figure out what the shape should be?\n - fc_b: PyTorch Tensor giving biases for the fully-connected layer. Can you\n figure out what the shape should be?\n \n Returns:\n - scores: PyTorch Tensor of shape (N, C) giving classification scores for x\n \"\"\"\n conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b = params\n scores = None\n ################################################################################\n # TODO: Implement the forward pass for the three-layer ConvNet. #\n ################################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n x = F.conv2d(x, conv_w1, conv_b1, padding=2)\n x = F.relu(x)\n x = F.conv2d(x, conv_w2, conv_b2, padding=1)\n x = F.relu(x)\n scores = flatten(x).mm(fc_w) + fc_b\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ################################################################################\n # END OF YOUR CODE #\n ################################################################################\n return scores", "_____no_output_____" ] ], [ [ "After defining the forward pass of the ConvNet above, run the following cell to test your implementation.\n\nWhen you run this function, scores should have shape (64, 10).", "_____no_output_____" ] ], [ [ "def three_layer_convnet_test():\n x = torch.zeros((64, 3, 32, 32), dtype=dtype) # minibatch size 64, image size [3, 32, 32]\n\n conv_w1 = torch.zeros((6, 3, 5, 5), dtype=dtype) # [out_channel, in_channel, kernel_H, kernel_W]\n conv_b1 = torch.zeros((6,)) # out_channel\n conv_w2 = torch.zeros((9, 6, 3, 3), dtype=dtype) # [out_channel, in_channel, kernel_H, kernel_W]\n conv_b2 = torch.zeros((9,)) # out_channel\n\n # you must calculate the shape of the tensor after two conv layers, before the fully-connected layer\n fc_w = torch.zeros((9 * 32 * 32, 10))\n fc_b = torch.zeros(10)\n\n scores = three_layer_convnet(x, [conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b])\n print(scores.size()) # you should see [64, 10]\nthree_layer_convnet_test()", "torch.Size([64, 10])\n" ] ], [ [ "### Barebones PyTorch: Initialization\nLet's write a couple utility methods to initialize the weight matrices for our models.\n\n- `random_weight(shape)` initializes a weight tensor with the Kaiming normalization method.\n- `zero_weight(shape)` initializes a weight tensor with all zeros. Useful for instantiating bias parameters.\n\nThe `random_weight` function uses the Kaiming normal initialization method, described in:\n\nHe et al, *Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification*, ICCV 2015, https://arxiv.org/abs/1502.01852", "_____no_output_____" ] ], [ [ "def random_weight(shape):\n \"\"\"\n Create random Tensors for weights; setting requires_grad=True means that we\n want to compute gradients for these Tensors during the backward pass.\n We use Kaiming normalization: sqrt(2 / fan_in)\n \"\"\"\n if len(shape) == 2: # FC weight\n fan_in = shape[0]\n else:\n fan_in = np.prod(shape[1:]) # conv weight [out_channel, in_channel, kH, kW]\n # randn is standard normal distribution generator. \n w = torch.randn(shape, device=device, dtype=dtype) * np.sqrt(2. / fan_in)\n w.requires_grad = True\n return w\n\ndef zero_weight(shape):\n return torch.zeros(shape, device=device, dtype=dtype, requires_grad=True)\n\n# create a weight of shape [3 x 5]\n# you should see the type `torch.cuda.FloatTensor` if you use GPU. \n# Otherwise it should be `torch.FloatTensor`\nrandom_weight((3, 5))", "_____no_output_____" ] ], [ [ "### Barebones PyTorch: Check Accuracy\nWhen training the model we will use the following function to check the accuracy of our model on the training or validation sets.\n\nWhen checking accuracy we don't need to compute any gradients; as a result we don't need PyTorch to build a computational graph for us when we compute scores. To prevent a graph from being built we scope our computation under a `torch.no_grad()` context manager.", "_____no_output_____" ] ], [ [ "def check_accuracy_part2(loader, model_fn, params):\n \"\"\"\n Check the accuracy of a classification model.\n \n Inputs:\n - loader: A DataLoader for the data split we want to check\n - model_fn: A function that performs the forward pass of the model,\n with the signature scores = model_fn(x, params)\n - params: List of PyTorch Tensors giving parameters of the model\n \n Returns: Nothing, but prints the accuracy of the model\n \"\"\"\n split = 'val' if loader.dataset.train else 'test'\n print('Checking accuracy on the %s set' % split)\n num_correct, num_samples = 0, 0\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU\n y = y.to(device=device, dtype=torch.int64)\n scores = model_fn(x, params)\n _, preds = scores.max(1)\n num_correct += (preds == y).sum()\n num_samples += preds.size(0)\n acc = float(num_correct) / num_samples\n print('Got %d / %d correct (%.2f%%)' % (num_correct, num_samples, 100 * acc))", "_____no_output_____" ] ], [ [ "### BareBones PyTorch: Training Loop\nWe can now set up a basic training loop to train our network. We will train the model using stochastic gradient descent without momentum. We will use `torch.functional.cross_entropy` to compute the loss; you can [read about it here](http://pytorch.org/docs/stable/nn.html#cross-entropy).\n\nThe training loop takes as input the neural network function, a list of initialized parameters (`[w1, w2]` in our example), and learning rate.", "_____no_output_____" ] ], [ [ "def train_part2(model_fn, params, learning_rate):\n \"\"\"\n Train a model on CIFAR-10.\n \n Inputs:\n - model_fn: A Python function that performs the forward pass of the model.\n It should have the signature scores = model_fn(x, params) where x is a\n PyTorch Tensor of image data, params is a list of PyTorch Tensors giving\n model weights, and scores is a PyTorch Tensor of shape (N, C) giving\n scores for the elements in x.\n - params: List of PyTorch Tensors giving weights for the model\n - learning_rate: Python scalar giving the learning rate to use for SGD\n \n Returns: Nothing\n \"\"\"\n for t, (x, y) in enumerate(loader_train):\n # Move the data to the proper device (GPU or CPU)\n x = x.to(device=device, dtype=dtype)\n y = y.to(device=device, dtype=torch.long)\n\n # Forward pass: compute scores and loss\n scores = model_fn(x, params)\n loss = F.cross_entropy(scores, y)\n\n # Backward pass: PyTorch figures out which Tensors in the computational\n # graph has requires_grad=True and uses backpropagation to compute the\n # gradient of the loss with respect to these Tensors, and stores the\n # gradients in the .grad attribute of each Tensor.\n loss.backward()\n\n # Update parameters. We don't want to backpropagate through the\n # parameter updates, so we scope the updates under a torch.no_grad()\n # context manager to prevent a computational graph from being built.\n with torch.no_grad():\n for w in params:\n w -= learning_rate * w.grad\n\n # Manually zero the gradients after running the backward pass\n w.grad.zero_()\n\n if t % print_every == 0:\n print('Iteration %d, loss = %.4f' % (t, loss.item()))\n check_accuracy_part2(loader_val, model_fn, params)\n print()", "_____no_output_____" ] ], [ [ "### BareBones PyTorch: Train a Two-Layer Network\nNow we are ready to run the training loop. We need to explicitly allocate tensors for the fully connected weights, `w1` and `w2`. \n\nEach minibatch of CIFAR has 64 examples, so the tensor shape is `[64, 3, 32, 32]`. \n\nAfter flattening, `x` shape should be `[64, 3 * 32 * 32]`. This will be the size of the first dimension of `w1`. \nThe second dimension of `w1` is the hidden layer size, which will also be the first dimension of `w2`. \n\nFinally, the output of the network is a 10-dimensional vector that represents the probability distribution over 10 classes. \n\nYou don't need to tune any hyperparameters but you should see accuracies above 40% after training for one epoch.", "_____no_output_____" ] ], [ [ "hidden_layer_size = 4000\nlearning_rate = 1e-2\n\nw1 = random_weight((3 * 32 * 32, hidden_layer_size))\nw2 = random_weight((hidden_layer_size, 10))\n\ntrain_part2(two_layer_fc, [w1, w2], learning_rate)", "Iteration 0, loss = 4.4703\nChecking accuracy on the val set\nGot 158 / 1000 correct (15.80%)\n\nIteration 100, loss = 2.5019\nChecking accuracy on the val set\nGot 358 / 1000 correct (35.80%)\n\nIteration 200, loss = 1.8195\nChecking accuracy on the val set\nGot 403 / 1000 correct (40.30%)\n\nIteration 300, loss = 1.8267\nChecking accuracy on the val set\nGot 405 / 1000 correct (40.50%)\n\nIteration 400, loss = 2.2147\nChecking accuracy on the val set\nGot 399 / 1000 correct (39.90%)\n\nIteration 500, loss = 1.3718\nChecking accuracy on the val set\nGot 435 / 1000 correct (43.50%)\n\nIteration 600, loss = 2.1412\nChecking accuracy on the val set\nGot 368 / 1000 correct (36.80%)\n\nIteration 700, loss = 1.6326\nChecking accuracy on the val set\nGot 458 / 1000 correct (45.80%)\n\n" ] ], [ [ "### BareBones PyTorch: Training a ConvNet\n\nIn the below you should use the functions defined above to train a three-layer convolutional network on CIFAR. The network should have the following architecture:\n\n1. Convolutional layer (with bias) with 32 5x5 filters, with zero-padding of 2\n2. ReLU\n3. Convolutional layer (with bias) with 16 3x3 filters, with zero-padding of 1\n4. ReLU\n5. Fully-connected layer (with bias) to compute scores for 10 classes\n\nYou should initialize your weight matrices using the `random_weight` function defined above, and you should initialize your bias vectors using the `zero_weight` function above.\n\nYou don't need to tune any hyperparameters, but if everything works correctly you should achieve an accuracy above 42% after one epoch.", "_____no_output_____" ] ], [ [ "learning_rate = 3e-3\n\nchannel_1 = 32\nchannel_2 = 16\n\nconv_w1 = None\nconv_b1 = None\nconv_w2 = None\nconv_b2 = None\nfc_w = None\nfc_b = None\n\n################################################################################\n# TODO: Initialize the parameters of a three-layer ConvNet. #\n################################################################################\n# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\nconv_w1 = random_weight((channel_1, 3, 5, 5))\nconv_b1 = zero_weight((channel_1, ))\nconv_w2 = random_weight((channel_2, channel_1, 3, 3))\nconv_b2 = zero_weight((channel_2, ))\nfc_w = random_weight((channel_2 * 32 * 32, 10))\nfc_b = zero_weight((10, ))\n\n# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n################################################################################\n# END OF YOUR CODE #\n################################################################################\n\nparams = [conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b]\ntrain_part2(three_layer_convnet, params, learning_rate)", "Iteration 0, loss = 3.2898\nChecking accuracy on the val set\nGot 123 / 1000 correct (12.30%)\n\nIteration 100, loss = 1.8016\nChecking accuracy on the val set\nGot 369 / 1000 correct (36.90%)\n\nIteration 200, loss = 1.5645\nChecking accuracy on the val set\nGot 400 / 1000 correct (40.00%)\n\nIteration 300, loss = 1.7513\nChecking accuracy on the val set\nGot 396 / 1000 correct (39.60%)\n\nIteration 400, loss = 1.5842\nChecking accuracy on the val set\nGot 444 / 1000 correct (44.40%)\n\nIteration 500, loss = 1.3763\nChecking accuracy on the val set\nGot 464 / 1000 correct (46.40%)\n\nIteration 600, loss = 1.4629\nChecking accuracy on the val set\nGot 468 / 1000 correct (46.80%)\n\nIteration 700, loss = 1.4830\nChecking accuracy on the val set\nGot 476 / 1000 correct (47.60%)\n\n" ] ], [ [ "# Part III. PyTorch Module API\n\nBarebone PyTorch requires that we track all the parameter tensors by hand. This is fine for small networks with a few tensors, but it would be extremely inconvenient and error-prone to track tens or hundreds of tensors in larger networks.\n\nPyTorch provides the `nn.Module` API for you to define arbitrary network architectures, while tracking every learnable parameters for you. In Part II, we implemented SGD ourselves. PyTorch also provides the `torch.optim` package that implements all the common optimizers, such as RMSProp, Adagrad, and Adam. It even supports approximate second-order methods like L-BFGS! You can refer to the [doc](http://pytorch.org/docs/master/optim.html) for the exact specifications of each optimizer.\n\nTo use the Module API, follow the steps below:\n\n1. Subclass `nn.Module`. Give your network class an intuitive name like `TwoLayerFC`. \n\n2. In the constructor `__init__()`, define all the layers you need as class attributes. Layer objects like `nn.Linear` and `nn.Conv2d` are themselves `nn.Module` subclasses and contain learnable parameters, so that you don't have to instantiate the raw tensors yourself. `nn.Module` will track these internal parameters for you. Refer to the [doc](http://pytorch.org/docs/master/nn.html) to learn more about the dozens of builtin layers. **Warning**: don't forget to call the `super().__init__()` first!\n\n3. In the `forward()` method, define the *connectivity* of your network. You should use the attributes defined in `__init__` as function calls that take tensor as input and output the \"transformed\" tensor. Do *not* create any new layers with learnable parameters in `forward()`! All of them must be declared upfront in `__init__`. \n\nAfter you define your Module subclass, you can instantiate it as an object and call it just like the NN forward function in part II.\n\n### Module API: Two-Layer Network\nHere is a concrete example of a 2-layer fully connected network:", "_____no_output_____" ] ], [ [ "class TwoLayerFC(nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super().__init__()\n # assign layer objects to class attributes\n self.fc1 = nn.Linear(input_size, hidden_size)\n # nn.init package contains convenient initialization methods\n # http://pytorch.org/docs/master/nn.html#torch-nn-init \n nn.init.kaiming_normal_(self.fc1.weight)\n self.fc2 = nn.Linear(hidden_size, num_classes)\n nn.init.kaiming_normal_(self.fc2.weight)\n \n def forward(self, x):\n # forward always defines connectivity\n x = flatten(x)\n scores = self.fc2(F.relu(self.fc1(x)))\n return scores\n\ndef test_TwoLayerFC():\n input_size = 50\n x = torch.zeros((64, input_size), dtype=dtype) # minibatch size 64, feature dimension 50\n model = TwoLayerFC(input_size, 42, 10)\n scores = model(x)\n print(scores.size()) # you should see [64, 10]\ntest_TwoLayerFC()", "torch.Size([64, 10])\n" ] ], [ [ "### Module API: Three-Layer ConvNet\nIt's your turn to implement a 3-layer ConvNet followed by a fully connected layer. The network architecture should be the same as in Part II:\n\n1. Convolutional layer with `channel_1` 5x5 filters with zero-padding of 2\n2. ReLU\n3. Convolutional layer with `channel_2` 3x3 filters with zero-padding of 1\n4. ReLU\n5. Fully-connected layer to `num_classes` classes\n\nYou should initialize the weight matrices of the model using the Kaiming normal initialization method.\n\n**HINT**: http://pytorch.org/docs/stable/nn.html#conv2d\n\nAfter you implement the three-layer ConvNet, the `test_ThreeLayerConvNet` function will run your implementation; it should print `(64, 10)` for the shape of the output scores.", "_____no_output_____" ] ], [ [ "class ThreeLayerConvNet(nn.Module):\n def __init__(self, in_channel, channel_1, channel_2, num_classes):\n super().__init__()\n ########################################################################\n # TODO: Set up the layers you need for a three-layer ConvNet with the #\n # architecture defined above. #\n ########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n self.conv1 = nn.Conv2d(in_channel, channel_1, (5, 5), padding=2)\n nn.init.kaiming_normal_(self.conv1.weight)\n self.conv2 = nn.Conv2d(channel_1, channel_2, (3, 3), padding=1)\n nn.init.kaiming_normal_(self.conv2.weight)\n self.fc = nn.Linear(channel_2 * 32 * 32, num_classes)\n nn.init.kaiming_normal_(self.fc.weight)\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ########################################################################\n # END OF YOUR CODE # \n ########################################################################\n\n def forward(self, x):\n scores = None\n ########################################################################\n # TODO: Implement the forward function for a 3-layer ConvNet. you #\n # should use the layers you defined in __init__ and specify the #\n # connectivity of those layers in forward() #\n ########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n scores = self.fc(flatten(x))\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ########################################################################\n # END OF YOUR CODE #\n ########################################################################\n return scores\n\n\ndef test_ThreeLayerConvNet():\n x = torch.zeros((64, 3, 32, 32), dtype=dtype) # minibatch size 64, image size [3, 32, 32]\n model = ThreeLayerConvNet(in_channel=3, channel_1=12, channel_2=8, num_classes=10)\n scores = model(x)\n print(scores.size()) # you should see [64, 10]\ntest_ThreeLayerConvNet()", "torch.Size([64, 10])\n" ] ], [ [ "### Module API: Check Accuracy\nGiven the validation or test set, we can check the classification accuracy of a neural network. \n\nThis version is slightly different from the one in part II. You don't manually pass in the parameters anymore.", "_____no_output_____" ] ], [ [ "def check_accuracy_part34(loader, model):\n if loader.dataset.train:\n print('Checking accuracy on validation set')\n else:\n print('Checking accuracy on test set') \n num_correct = 0\n num_samples = 0\n model.eval() # set model to evaluation mode\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU\n y = y.to(device=device, dtype=torch.long)\n scores = model(x)\n _, preds = scores.max(1)\n num_correct += (preds == y).sum()\n num_samples += preds.size(0)\n acc = float(num_correct) / num_samples\n print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc))", "_____no_output_____" ] ], [ [ "### Module API: Training Loop\nWe also use a slightly different training loop. Rather than updating the values of the weights ourselves, we use an Optimizer object from the `torch.optim` package, which abstract the notion of an optimization algorithm and provides implementations of most of the algorithms commonly used to optimize neural networks.", "_____no_output_____" ] ], [ [ "def train_part34(model, optimizer, epochs=1):\n \"\"\"\n Train a model on CIFAR-10 using the PyTorch Module API.\n \n Inputs:\n - model: A PyTorch Module giving the model to train.\n - optimizer: An Optimizer object we will use to train the model\n - epochs: (Optional) A Python integer giving the number of epochs to train for\n \n Returns: Nothing, but prints model accuracies during training.\n \"\"\"\n model = model.to(device=device) # move the model parameters to CPU/GPU\n for e in range(epochs):\n for t, (x, y) in enumerate(loader_train):\n model.train() # put model to training mode\n x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU\n y = y.to(device=device, dtype=torch.long)\n\n scores = model(x)\n loss = F.cross_entropy(scores, y)\n\n # Zero out all of the gradients for the variables which the optimizer\n # will update.\n optimizer.zero_grad()\n\n # This is the backwards pass: compute the gradient of the loss with\n # respect to each parameter of the model.\n loss.backward()\n\n # Actually update the parameters of the model using the gradients\n # computed by the backwards pass.\n optimizer.step()\n\n if t % print_every == 0:\n print('Iteration %d, loss = %.4f' % (t, loss.item()))\n check_accuracy_part34(loader_val, model)\n print()", "_____no_output_____" ] ], [ [ "### Module API: Train a Two-Layer Network\nNow we are ready to run the training loop. In contrast to part II, we don't explicitly allocate parameter tensors anymore.\n\nSimply pass the input size, hidden layer size, and number of classes (i.e. output size) to the constructor of `TwoLayerFC`. \n\nYou also need to define an optimizer that tracks all the learnable parameters inside `TwoLayerFC`.\n\nYou don't need to tune any hyperparameters, but you should see model accuracies above 40% after training for one epoch.", "_____no_output_____" ] ], [ [ "hidden_layer_size = 4000\nlearning_rate = 1e-2\nmodel = TwoLayerFC(3 * 32 * 32, hidden_layer_size, 10)\noptimizer = optim.SGD(model.parameters(), lr=learning_rate)\n\ntrain_part34(model, optimizer)", "Iteration 0, loss = 4.2223\nChecking accuracy on validation set\nGot 156 / 1000 correct (15.60)\n\nIteration 100, loss = 2.1946\nChecking accuracy on validation set\nGot 333 / 1000 correct (33.30)\n\nIteration 200, loss = 1.7546\nChecking accuracy on validation set\nGot 368 / 1000 correct (36.80)\n\nIteration 300, loss = 1.9863\nChecking accuracy on validation set\nGot 406 / 1000 correct (40.60)\n\nIteration 400, loss = 1.5470\nChecking accuracy on validation set\nGot 405 / 1000 correct (40.50)\n\nIteration 500, loss = 2.0345\nChecking accuracy on validation set\nGot 419 / 1000 correct (41.90)\n\nIteration 600, loss = 1.6698\nChecking accuracy on validation set\nGot 427 / 1000 correct (42.70)\n\nIteration 700, loss = 1.6145\nChecking accuracy on validation set\nGot 420 / 1000 correct (42.00)\n\n" ] ], [ [ "### Module API: Train a Three-Layer ConvNet\nYou should now use the Module API to train a three-layer ConvNet on CIFAR. This should look very similar to training the two-layer network! You don't need to tune any hyperparameters, but you should achieve above above 45% after training for one epoch.\n\nYou should train the model using stochastic gradient descent without momentum.", "_____no_output_____" ] ], [ [ "learning_rate = 3e-3\nchannel_1 = 32\nchannel_2 = 16\n\nmodel = None\noptimizer = None\n################################################################################\n# TODO: Instantiate your ThreeLayerConvNet model and a corresponding optimizer #\n################################################################################\n# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\nmodel = ThreeLayerConvNet(3, channel_1, channel_2, 10)\noptimizer = optim.SGD(model.parameters(), lr=learning_rate)\n\n# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n################################################################################\n# END OF YOUR CODE #\n################################################################################\n\ntrain_part34(model, optimizer)", "Iteration 0, loss = 2.5947\nChecking accuracy on validation set\nGot 158 / 1000 correct (15.80)\n\nIteration 100, loss = 1.9449\nChecking accuracy on validation set\nGot 324 / 1000 correct (32.40)\n\nIteration 200, loss = 1.7619\nChecking accuracy on validation set\nGot 366 / 1000 correct (36.60)\n\nIteration 300, loss = 1.7287\nChecking accuracy on validation set\nGot 426 / 1000 correct (42.60)\n\nIteration 400, loss = 1.4725\nChecking accuracy on validation set\nGot 416 / 1000 correct (41.60)\n\nIteration 500, loss = 1.6120\nChecking accuracy on validation set\nGot 452 / 1000 correct (45.20)\n\nIteration 600, loss = 1.7758\nChecking accuracy on validation set\nGot 473 / 1000 correct (47.30)\n\nIteration 700, loss = 1.2409\nChecking accuracy on validation set\nGot 484 / 1000 correct (48.40)\n\n" ] ], [ [ "# Part IV. PyTorch Sequential API\n\nPart III introduced the PyTorch Module API, which allows you to define arbitrary learnable layers and their connectivity. \n\nFor simple models like a stack of feed forward layers, you still need to go through 3 steps: subclass `nn.Module`, assign layers to class attributes in `__init__`, and call each layer one by one in `forward()`. Is there a more convenient way? \n\nFortunately, PyTorch provides a container Module called `nn.Sequential`, which merges the above steps into one. It is not as flexible as `nn.Module`, because you cannot specify more complex topology than a feed-forward stack, but it's good enough for many use cases.\n\n### Sequential API: Two-Layer Network\nLet's see how to rewrite our two-layer fully connected network example with `nn.Sequential`, and train it using the training loop defined above.\n\nAgain, you don't need to tune any hyperparameters here, but you shoud achieve above 40% accuracy after one epoch of training.", "_____no_output_____" ] ], [ [ "# We need to wrap `flatten` function in a module in order to stack it\n# in nn.Sequential\nclass Flatten(nn.Module):\n def forward(self, x):\n return flatten(x)\n\nhidden_layer_size = 4000\nlearning_rate = 1e-2\n\nmodel = nn.Sequential(\n Flatten(),\n nn.Linear(3 * 32 * 32, hidden_layer_size),\n nn.ReLU(),\n nn.Linear(hidden_layer_size, 10),\n)\n\n# you can use Nesterov momentum in optim.SGD\noptimizer = optim.SGD(model.parameters(), lr=learning_rate,\n momentum=0.9, nesterov=True)\n\ntrain_part34(model, optimizer)", "Iteration 0, loss = 2.3411\nChecking accuracy on validation set\nGot 182 / 1000 correct (18.20)\n\nIteration 100, loss = 1.8701\nChecking accuracy on validation set\nGot 389 / 1000 correct (38.90)\n\nIteration 200, loss = 1.6901\nChecking accuracy on validation set\nGot 417 / 1000 correct (41.70)\n\nIteration 300, loss = 1.6675\nChecking accuracy on validation set\nGot 413 / 1000 correct (41.30)\n\nIteration 400, loss = 1.8973\nChecking accuracy on validation set\nGot 432 / 1000 correct (43.20)\n\nIteration 500, loss = 2.0647\nChecking accuracy on validation set\nGot 415 / 1000 correct (41.50)\n\nIteration 600, loss = 1.9500\nChecking accuracy on validation set\nGot 451 / 1000 correct (45.10)\n\nIteration 700, loss = 2.0941\nChecking accuracy on validation set\nGot 435 / 1000 correct (43.50)\n\n" ] ], [ [ "### Sequential API: Three-Layer ConvNet\nHere you should use `nn.Sequential` to define and train a three-layer ConvNet with the same architecture we used in Part III:\n\n1. Convolutional layer (with bias) with 32 5x5 filters, with zero-padding of 2\n2. ReLU\n3. Convolutional layer (with bias) with 16 3x3 filters, with zero-padding of 1\n4. ReLU\n5. Fully-connected layer (with bias) to compute scores for 10 classes\n\nYou can use the default PyTorch weight initialization.\n\nYou should optimize your model using stochastic gradient descent with Nesterov momentum 0.9.\n\nAgain, you don't need to tune any hyperparameters but you should see accuracy above 55% after one epoch of training.", "_____no_output_____" ] ], [ [ "channel_1 = 32\nchannel_2 = 16\nlearning_rate = 1e-2\n\nmodel = None\noptimizer = None\n\n################################################################################\n# TODO: Rewrite the 2-layer ConvNet with bias from Part III with the #\n# Sequential API. #\n################################################################################\n# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\nmodel = nn.Sequential(\n nn.Conv2d(3, channel_1, (5, 5), padding=2),\n nn.ReLU(),\n nn.Conv2d(channel_1, channel_2, (3, 3), padding=1),\n nn.ReLU(),\n Flatten(),\n nn.Linear(channel_2 * 32 * 32, 10)\n)\noptimizer = optim.SGD(model.parameters(), lr=learning_rate,\n momentum=0.9, nesterov=True)\n\n# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n################################################################################\n# END OF YOUR CODE #\n################################################################################\n\ntrain_part34(model, optimizer)", "Iteration 0, loss = 2.3069\nChecking accuracy on validation set\nGot 87 / 1000 correct (8.70)\n\nIteration 100, loss = 1.8110\nChecking accuracy on validation set\nGot 431 / 1000 correct (43.10)\n\nIteration 200, loss = 1.5242\nChecking accuracy on validation set\nGot 480 / 1000 correct (48.00)\n\nIteration 300, loss = 1.3371\nChecking accuracy on validation set\nGot 523 / 1000 correct (52.30)\n\nIteration 400, loss = 1.5544\nChecking accuracy on validation set\nGot 572 / 1000 correct (57.20)\n\nIteration 500, loss = 1.3840\nChecking accuracy on validation set\nGot 571 / 1000 correct (57.10)\n\nIteration 600, loss = 1.1285\nChecking accuracy on validation set\nGot 595 / 1000 correct (59.50)\n\nIteration 700, loss = 1.2000\nChecking accuracy on validation set\nGot 598 / 1000 correct (59.80)\n\n" ] ], [ [ "# Part V. CIFAR-10 open-ended challenge\n\nIn this section, you can experiment with whatever ConvNet architecture you'd like on CIFAR-10. \n\nNow it's your job to experiment with architectures, hyperparameters, loss functions, and optimizers to train a model that achieves **at least 70%** accuracy on the CIFAR-10 **validation** set within 10 epochs. You can use the check_accuracy and train functions from above. You can use either `nn.Module` or `nn.Sequential` API. \n\nDescribe what you did at the end of this notebook.\n\nHere are the official API documentation for each component. One note: what we call in the class \"spatial batch norm\" is called \"BatchNorm2D\" in PyTorch.\n\n* Layers in torch.nn package: http://pytorch.org/docs/stable/nn.html\n* Activations: http://pytorch.org/docs/stable/nn.html#non-linear-activations\n* Loss functions: http://pytorch.org/docs/stable/nn.html#loss-functions\n* Optimizers: http://pytorch.org/docs/stable/optim.html\n\n\n### Things you might try:\n- **Filter size**: Above we used 5x5; would smaller filters be more efficient?\n- **Number of filters**: Above we used 32 filters. Do more or fewer do better?\n- **Pooling vs Strided Convolution**: Do you use max pooling or just stride convolutions?\n- **Batch normalization**: Try adding spatial batch normalization after convolution layers and vanilla batch normalization after affine layers. Do your networks train faster?\n- **Network architecture**: The network above has two layers of trainable parameters. Can you do better with a deep network? Good architectures to try include:\n - [conv-relu-pool]xN -> [affine]xM -> [softmax or SVM]\n - [conv-relu-conv-relu-pool]xN -> [affine]xM -> [softmax or SVM]\n - [batchnorm-relu-conv]xN -> [affine]xM -> [softmax or SVM]\n- **Global Average Pooling**: Instead of flattening and then having multiple affine layers, perform convolutions until your image gets small (7x7 or so) and then perform an average pooling operation to get to a 1x1 image picture (1, 1 , Filter#), which is then reshaped into a (Filter#) vector. This is used in [Google's Inception Network](https://arxiv.org/abs/1512.00567) (See Table 1 for their architecture).\n- **Regularization**: Add l2 weight regularization, or perhaps use Dropout.\n\n### Tips for training\nFor each network architecture that you try, you should tune the learning rate and other hyperparameters. When doing this there are a couple important things to keep in mind:\n\n- If the parameters are working well, you should see improvement within a few hundred iterations\n- Remember the coarse-to-fine approach for hyperparameter tuning: start by testing a large range of hyperparameters for just a few training iterations to find the combinations of parameters that are working at all.\n- Once you have found some sets of parameters that seem to work, search more finely around these parameters. You may need to train for more epochs.\n- You should use the validation set for hyperparameter search, and save your test set for evaluating your architecture on the best parameters as selected by the validation set.\n\n### Going above and beyond\nIf you are feeling adventurous there are many other features you can implement to try and improve your performance. You are **not required** to implement any of these, but don't miss the fun if you have time!\n\n- Alternative optimizers: you can try Adam, Adagrad, RMSprop, etc.\n- Alternative activation functions such as leaky ReLU, parametric ReLU, ELU, or MaxOut.\n- Model ensembles\n- Data augmentation\n- New Architectures\n - [ResNets](https://arxiv.org/abs/1512.03385) where the input from the previous layer is added to the output.\n - [DenseNets](https://arxiv.org/abs/1608.06993) where inputs into previous layers are concatenated together.\n - [This blog has an in-depth overview](https://chatbotslife.com/resnets-highwaynets-and-densenets-oh-my-9bb15918ee32)\n\n### Have fun and happy training! ", "_____no_output_____" ] ], [ [ "################################################################################\n# TODO: # \n# Experiment with any architectures, optimizers, and hyperparameters. #\n# Achieve AT LEAST 70% accuracy on the *validation set* within 10 epochs. #\n# #\n# Note that you can use the check_accuracy function to evaluate on either #\n# the test set or the validation set, by passing either loader_test or #\n# loader_val as the second argument to check_accuracy. You should not touch #\n# the test set until you have finished your architecture and hyperparameter #\n# tuning, and only run the test set once at the end to report a final value. #\n################################################################################\nmodel = None\noptimizer = None\n\n# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\nlearning_rate = 1e-2\nmodel = nn.Sequential(\n nn.Conv2d(3, 32, (3, 3), padding=1),\n nn.ReLU(),\n nn.Conv2d(32, 32, (3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.MaxPool2d((2, 2)),\n nn.Conv2d(32, 64, (3, 3), padding=1),\n nn.ReLU(),\n nn.Conv2d(64, 64, (3, 3), padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.MaxPool2d((2, 2)),\n nn.Conv2d(64, 128, (3, 3), padding=1),\n nn.ReLU(),\n nn.Conv2d(128, 128, (3, 3), padding=1),\n nn.ReLU(),\n nn.Conv2d(128, 128, (3, 3), padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n nn.MaxPool2d((2, 2)),\n Flatten(),\n nn.Linear(128 * 4 * 4, 512),\n nn.ReLU(),\n nn.Linear(512, 128),\n nn.ReLU(),\n nn.Linear(128, 10),\n)\noptimizer = optim.SGD(model.parameters(), lr=learning_rate,\n momentum=0.9, nesterov=True)\n\n# train_part34(model, optimizer, epochs=1)\n\n\n# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n################################################################################\n# END OF YOUR CODE #\n################################################################################\n\n# You should get at least 70% accuracy\ntrain_part34(model, optimizer, epochs=10)", "Iteration 0, loss = 2.3076\nChecking accuracy on validation set\nGot 113 / 1000 correct (11.30)\n\nIteration 100, loss = 1.5164\nChecking accuracy on validation set\nGot 372 / 1000 correct (37.20)\n\nIteration 200, loss = 1.2780\nChecking accuracy on validation set\nGot 480 / 1000 correct (48.00)\n\nIteration 300, loss = 1.0746\nChecking accuracy on validation set\nGot 517 / 1000 correct (51.70)\n\nIteration 400, loss = 1.0998\nChecking accuracy on validation set\nGot 621 / 1000 correct (62.10)\n\nIteration 500, loss = 0.9986\nChecking accuracy on validation set\nGot 607 / 1000 correct (60.70)\n\nIteration 600, loss = 0.9144\nChecking accuracy on validation set\nGot 647 / 1000 correct (64.70)\n\nIteration 700, loss = 0.8689\nChecking accuracy on validation set\nGot 672 / 1000 correct (67.20)\n\nIteration 0, loss = 0.7947\nChecking accuracy on validation set\nGot 587 / 1000 correct (58.70)\n\nIteration 100, loss = 0.4547\nChecking accuracy on validation set\nGot 649 / 1000 correct (64.90)\n\nIteration 200, loss = 0.9766\nChecking accuracy on validation set\nGot 702 / 1000 correct (70.20)\n\nIteration 300, loss = 0.9580\nChecking accuracy on validation set\nGot 737 / 1000 correct (73.70)\n\nIteration 400, loss = 0.6596\nChecking accuracy on validation set\nGot 728 / 1000 correct (72.80)\n\nIteration 500, loss = 0.7980\nChecking accuracy on validation set\nGot 699 / 1000 correct (69.90)\n\nIteration 600, loss = 0.7282\nChecking accuracy on validation set\nGot 675 / 1000 correct (67.50)\n\nIteration 700, loss = 0.6137\nChecking accuracy on validation set\nGot 736 / 1000 correct (73.60)\n\nIteration 0, loss = 0.7045\nChecking accuracy on validation set\nGot 752 / 1000 correct (75.20)\n\nIteration 100, loss = 0.5893\nChecking accuracy on validation set\nGot 709 / 1000 correct (70.90)\n\nIteration 200, loss = 0.5676\nChecking accuracy on validation set\nGot 764 / 1000 correct (76.40)\n\nIteration 300, loss = 0.5725\nChecking accuracy on validation set\nGot 753 / 1000 correct (75.30)\n\nIteration 400, loss = 0.6374\nChecking accuracy on validation set\nGot 766 / 1000 correct (76.60)\n\nIteration 500, loss = 0.5355\nChecking accuracy on validation set\nGot 770 / 1000 correct (77.00)\n\nIteration 600, loss = 0.5342\nChecking accuracy on validation set\nGot 761 / 1000 correct (76.10)\n\nIteration 700, loss = 0.5690\nChecking accuracy on validation set\nGot 790 / 1000 correct (79.00)\n\nIteration 0, loss = 0.8060\nChecking accuracy on validation set\nGot 783 / 1000 correct (78.30)\n\nIteration 100, loss = 0.6684\nChecking accuracy on validation set\nGot 776 / 1000 correct (77.60)\n\nIteration 200, loss = 0.6752\nChecking accuracy on validation set\nGot 749 / 1000 correct (74.90)\n\nIteration 300, loss = 0.3067\nChecking accuracy on validation set\nGot 753 / 1000 correct (75.30)\n\nIteration 400, loss = 0.6599\nChecking accuracy on validation set\nGot 769 / 1000 correct (76.90)\n\nIteration 500, loss = 0.5646\nChecking accuracy on validation set\nGot 770 / 1000 correct (77.00)\n\nIteration 600, loss = 0.5196\nChecking accuracy on validation set\nGot 782 / 1000 correct (78.20)\n\nIteration 700, loss = 0.5311\nChecking accuracy on validation set\nGot 764 / 1000 correct (76.40)\n\nIteration 0, loss = 0.5311\nChecking accuracy on validation set\nGot 790 / 1000 correct (79.00)\n\nIteration 100, loss = 0.5227\nChecking accuracy on validation set\nGot 800 / 1000 correct (80.00)\n\nIteration 200, loss = 0.4871\nChecking accuracy on validation set\nGot 776 / 1000 correct (77.60)\n\nIteration 300, loss = 0.4313\nChecking accuracy on validation set\nGot 811 / 1000 correct (81.10)\n\nIteration 400, loss = 0.3238\nChecking accuracy on validation set\nGot 777 / 1000 correct (77.70)\n\nIteration 500, loss = 0.2970\nChecking accuracy on validation set\nGot 798 / 1000 correct (79.80)\n\nIteration 600, loss = 0.2405\nChecking accuracy on validation set\nGot 800 / 1000 correct (80.00)\n\nIteration 700, loss = 0.4273\nChecking accuracy on validation set\nGot 824 / 1000 correct (82.40)\n\nIteration 0, loss = 0.3206\nChecking accuracy on validation set\nGot 829 / 1000 correct (82.90)\n\nIteration 100, loss = 0.1677\nChecking accuracy on validation set\nGot 806 / 1000 correct (80.60)\n\nIteration 200, loss = 0.4448\nChecking accuracy on validation set\nGot 808 / 1000 correct (80.80)\n\nIteration 300, loss = 0.2736\nChecking accuracy on validation set\nGot 799 / 1000 correct (79.90)\n\nIteration 400, loss = 0.4932\nChecking accuracy on validation set\nGot 801 / 1000 correct (80.10)\n\nIteration 500, loss = 0.2225\nChecking accuracy on validation set\nGot 824 / 1000 correct (82.40)\n\nIteration 600, loss = 0.6659\nChecking accuracy on validation set\nGot 832 / 1000 correct (83.20)\n\nIteration 700, loss = 0.3845\nChecking accuracy on validation set\nGot 816 / 1000 correct (81.60)\n\nIteration 0, loss = 0.3336\nChecking accuracy on validation set\nGot 817 / 1000 correct (81.70)\n\nIteration 100, loss = 0.3040\nChecking accuracy on validation set\nGot 821 / 1000 correct (82.10)\n\nIteration 200, loss = 0.2552\nChecking accuracy on validation set\nGot 842 / 1000 correct (84.20)\n\nIteration 300, loss = 0.4524\nChecking accuracy on validation set\nGot 843 / 1000 correct (84.30)\n\nIteration 400, loss = 0.1319\nChecking accuracy on validation set\nGot 815 / 1000 correct (81.50)\n\nIteration 500, loss = 0.2500\nChecking accuracy on validation set\nGot 805 / 1000 correct (80.50)\n\nIteration 600, loss = 0.2838\nChecking accuracy on validation set\nGot 814 / 1000 correct (81.40)\n\nIteration 700, loss = 0.4331\nChecking accuracy on validation set\nGot 805 / 1000 correct (80.50)\n\nIteration 0, loss = 0.2813\nChecking accuracy on validation set\nGot 818 / 1000 correct (81.80)\n\nIteration 100, loss = 0.2629\nChecking accuracy on validation set\nGot 827 / 1000 correct (82.70)\n\nIteration 200, loss = 0.2951\nChecking accuracy on validation set\nGot 839 / 1000 correct (83.90)\n\nIteration 300, loss = 0.3101\nChecking accuracy on validation set\nGot 797 / 1000 correct (79.70)\n\nIteration 400, loss = 0.2068\nChecking accuracy on validation set\nGot 836 / 1000 correct (83.60)\n\nIteration 500, loss = 0.3420\nChecking accuracy on validation set\nGot 824 / 1000 correct (82.40)\n\nIteration 600, loss = 0.2365\nChecking accuracy on validation set\nGot 837 / 1000 correct (83.70)\n\nIteration 700, loss = 0.2689\nChecking accuracy on validation set\nGot 823 / 1000 correct (82.30)\n\nIteration 0, loss = 0.2967\nChecking accuracy on validation set\nGot 820 / 1000 correct (82.00)\n\nIteration 100, loss = 0.1652\nChecking accuracy on validation set\nGot 849 / 1000 correct (84.90)\n\nIteration 200, loss = 0.0765\nChecking accuracy on validation set\nGot 807 / 1000 correct (80.70)\n\nIteration 300, loss = 0.1592\nChecking accuracy on validation set\nGot 842 / 1000 correct (84.20)\n\nIteration 400, loss = 0.1104\nChecking accuracy on validation set\nGot 831 / 1000 correct (83.10)\n\nIteration 500, loss = 0.2477\nChecking accuracy on validation set\nGot 846 / 1000 correct (84.60)\n\nIteration 600, loss = 0.2215\nChecking accuracy on validation set\nGot 835 / 1000 correct (83.50)\n\nIteration 700, loss = 0.3012\nChecking accuracy on validation set\nGot 838 / 1000 correct (83.80)\n\nIteration 0, loss = 0.1167\nChecking accuracy on validation set\nGot 837 / 1000 correct (83.70)\n\nIteration 100, loss = 0.0675\nChecking accuracy on validation set\nGot 830 / 1000 correct (83.00)\n\nIteration 200, loss = 0.1840\nChecking accuracy on validation set\nGot 823 / 1000 correct (82.30)\n\nIteration 300, loss = 0.1254\nChecking accuracy on validation set\nGot 832 / 1000 correct (83.20)\n\nIteration 400, loss = 0.1394\nChecking accuracy on validation set\nGot 833 / 1000 correct (83.30)\n\nIteration 500, loss = 0.1529\nChecking accuracy on validation set\nGot 839 / 1000 correct (83.90)\n\nIteration 600, loss = 0.1160\nChecking accuracy on validation set\nGot 830 / 1000 correct (83.00)\n\nIteration 700, loss = 0.1300\nChecking accuracy on validation set\nGot 832 / 1000 correct (83.20)\n\n" ] ], [ [ "## Describe what you did \n\nIn the cell below you should write an explanation of what you did, any additional features that you implemented, and/or any graphs that you made in the process of training and evaluating your network.", "_____no_output_____" ], [ "**Answer:**\n\n", "_____no_output_____" ], [ "## Test set -- run this only once\n\nNow that we've gotten a result we're happy with, we test our final model on the test set (which you should store in best_model). Think about how this compares to your validation set accuracy.", "_____no_output_____" ] ], [ [ "best_model = model\ncheck_accuracy_part34(loader_test, best_model)", "Checking accuracy on test set\nGot 8208 / 10000 correct (82.08)\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ] ]
d0a8904837ee67a5d2bbd90c41cfe18c488e08de
9,894
ipynb
Jupyter Notebook
notebooks/beginner/notebooks/dictionaries.ipynb
4TINI/learn-python3
72e6625782d9e526c590389a6c92f04223b93c3c
[ "MIT" ]
null
null
null
notebooks/beginner/notebooks/dictionaries.ipynb
4TINI/learn-python3
72e6625782d9e526c590389a6c92f04223b93c3c
[ "MIT" ]
null
null
null
notebooks/beginner/notebooks/dictionaries.ipynb
4TINI/learn-python3
72e6625782d9e526c590389a6c92f04223b93c3c
[ "MIT" ]
null
null
null
23.17096
163
0.493329
[ [ [ "# [Dictionaries](https://docs.python.org/3/library/stdtypes.html#dict) \nCollections of `key`-`value` pairs. ", "_____no_output_____" ] ], [ [ "my_empty_dict = {} # alternative: my_empty_dict = dict()\nprint('dict: {}, type: {}'.format(my_empty_dict, type(my_empty_dict)))", "dict: {}, type: <class 'dict'>\n" ] ], [ [ "## Initialization", "_____no_output_____" ] ], [ [ "dict1 = {'value1': 1.6, 'value2': 10, 'name': 'John Doe'}\ndict2 = dict(value1=1.6, value2=10, name='John Doe')\n\nprint(dict1)\nprint(dict2)\n\nprint('equal: {}'.format(dict1 == dict2))\nprint('length: {}'.format(len(dict1)))", "{'value1': 1.6, 'value2': 10, 'name': 'John Doe'}\n{'value1': 1.6, 'value2': 10, 'name': 'John Doe'}\nequal: True\nlength: 3\n" ] ], [ [ "## `dict.keys(), dict.values(), dict.items()`", "_____no_output_____" ] ], [ [ "print('keys: {}'.format(dict1.keys()))\nprint('values: {}'.format(dict1.values()))\nprint('items: {}'.format(dict1.items()))", "keys: dict_keys(['value1', 'value2', 'name'])\nvalues: dict_values([1.6, 10, 'John Doe'])\nitems: dict_items([('value1', 1.6), ('value2', 10), ('name', 'John Doe')])\n" ] ], [ [ "## Accessing and setting values", "_____no_output_____" ] ], [ [ "my_dict = {}\nmy_dict['key1'] = 'value1'\nmy_dict['key2'] = 99\nmy_dict['key1'] = 'new value' # overriding existing value\nprint(my_dict)\nprint('value of key1: {}'.format(my_dict['key1']))", "{'key1': 'new value', 'key2': 99}\nvalue of key1: new value\n" ] ], [ [ "Accessing a nonexistent key will raise `KeyError` (see [`dict.get()`](#dict_get) for workaround):", "_____no_output_____" ] ], [ [ "# print(my_dict['nope'])", "_____no_output_____" ] ], [ [ "## Deleting", "_____no_output_____" ] ], [ [ "my_dict = {'key1': 'value1', 'key2': 99, 'keyX': 'valueX'}\ndel my_dict['keyX']\nprint(my_dict)\n\n# Usually better to make sure that the key exists (see also pop() and popitem())\nkey_to_delete = 'my_key'\nif key_to_delete in my_dict:\n del my_dict[key_to_delete]\nelse:\n print('{key} is not in {dictionary}'.format(key=key_to_delete, dictionary=my_dict))", "{'key1': 'value1', 'key2': 99}\nmy_key is not in {'key1': 'value1', 'key2': 99}\n" ] ], [ [ "## Dictionaries are mutable", "_____no_output_____" ] ], [ [ "my_dict = {'ham': 'good', 'carrot': 'semi good'}\nmy_other_dict = my_dict\nmy_other_dict['carrot'] = 'super tasty'\nmy_other_dict['sausage'] = 'best ever'\nprint('my_dict: {}\\nother: {}'.format(my_dict, my_other_dict))\nprint('equal: {}'.format(my_dict == my_other_dict))", "my_dict: {'ham': 'good', 'carrot': 'super tasty', 'sausage': 'best ever'}\nother: {'ham': 'good', 'carrot': 'super tasty', 'sausage': 'best ever'}\nequal: True\n" ] ], [ [ "Create a new `dict` if you want to have a copy:", "_____no_output_____" ] ], [ [ "my_dict = {'ham': 'good', 'carrot': 'semi good'}\nmy_other_dict = dict(my_dict)\nmy_other_dict['beer'] = 'decent'\nprint('my_dict: {}\\nother: {}'.format(my_dict, my_other_dict))\nprint('equal: {}'.format(my_dict == my_other_dict))", "_____no_output_____" ] ], [ [ "<a id='dict_get'></a>\n## `dict.get()`\nReturns `None` if `key` is not in `dict`. However, you can also specify `default` return value which will be returned if `key` is not present in the `dict`. ", "_____no_output_____" ] ], [ [ "my_dict = {'a': 1, 'b': 2, 'c': 3}\nd = my_dict.get('d')\nprint('d: {}'.format(d))\n\nd = my_dict.get('d', 'my default value')\nprint('d: {}'.format(d))", "d: None\nd: my default value\n" ] ], [ [ "## `dict.pop()`", "_____no_output_____" ] ], [ [ "my_dict = dict(food='ham', drink='beer', sport='football')\nprint('dict before pops: {}'.format(my_dict))\n\nfood = my_dict.pop('food')\nprint('food: {}'.format(food))\nprint('dict after popping food: {}'.format(my_dict))\n\nfood_again = my_dict.pop('food', 'default value for food')\nprint('food again: {}'.format(food_again))\nprint('dict after popping food again: {}'.format(my_dict))\n", "dict before pops: {'food': 'ham', 'drink': 'beer', 'sport': 'football'}\nfood: ham\ndict after popping food: {'drink': 'beer', 'sport': 'football'}\nfood again: default value for food\ndict after popping food again: {'drink': 'beer', 'sport': 'football'}\n" ] ], [ [ "## `dict.setdefault()`\nReturns the `value` of `key` defined as first parameter. If the `key` is not present in the dict, adds `key` with default value (second parameter).", "_____no_output_____" ] ], [ [ "my_dict = {'a': 1, 'b': 2, 'c': 3}\na = my_dict.setdefault('a', 'my default value')\nd = my_dict.setdefault('d', 'my default value')\nprint('a: {}\\nd: {}\\nmy_dict: {}'.format(a, d, my_dict))", "a: 1\nd: my default value\nmy_dict: {'a': 1, 'b': 2, 'c': 3, 'd': 'my default value'}\n" ] ], [ [ "## `dict.update()`\nMerge two `dict`s", "_____no_output_____" ] ], [ [ "dict1 = {'a': 1, 'b': 2}\ndict2 = {'c': 3}\ndict1.update(dict2)\nprint(dict1)\n\n# If they have same keys:\ndict1.update({'c': 4})\nprint(dict1)", "{'a': 1, 'b': 2, 'c': 3}\n{'a': 1, 'b': 2, 'c': 4}\n" ] ], [ [ "## The keys of a `dict` have to be immutable", "_____no_output_____" ], [ "Thus you can not use e.g. a `list` or a `dict` as key because they are mutable types\n:", "_____no_output_____" ] ], [ [ "# bad_dict = {['my_list'], 'value'} # Raises TypeError", "_____no_output_____" ] ], [ [ "Values can be mutable", "_____no_output_____" ] ], [ [ "good_dict = {'my key': ['Python', 'is', 'still', 'cool']}\nprint(good_dict)", "{'my key': ['Python', 'is', 'still', 'cool']}\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0a8a3e3646cbbc04160cddb0658576a5e5acd92
8,829
ipynb
Jupyter Notebook
TweeterAnalysis.ipynb
firoj998/Twitter-Sentimental-Analysis
343ae69eefd4991dcc815860fa652febc8ef6cf5
[ "MIT" ]
null
null
null
TweeterAnalysis.ipynb
firoj998/Twitter-Sentimental-Analysis
343ae69eefd4991dcc815860fa652febc8ef6cf5
[ "MIT" ]
null
null
null
TweeterAnalysis.ipynb
firoj998/Twitter-Sentimental-Analysis
343ae69eefd4991dcc815860fa652febc8ef6cf5
[ "MIT" ]
null
null
null
43.492611
155
0.560313
[ [ [ "import re\nimport tweepy\nfrom tweepy import OAuthHandler\nfrom textblob import TextBlob", "_____no_output_____" ], [ "class TwitterClient(object):\n '''\n Generic Twitter Class for sentiment analysis.\n '''\n def __init__(self):\n '''\n Class constructor or initialization method.\n '''\n # keys and tokens from the Twitter Dev Console\n #consumer_key = 'VabVIqTlwNiAFg7NxqQtuSQ8g'\n consumer_key = 'DRh9fiZ6OOeTIyywoRteKXcqJ'\n #consumer_secret = '5rPgXh5gyhvGDZDyJuYNpMtYwEGMYG52q3akk4wkN3I5KGR5MM'\n consumer_secret = 'I6wlXOItz1ryxOSM540lhydDqHvmegfyNoXXTSUOz6DwcrwMMk'\n #access_token = '85755855-3RPlt0XAVOrKeZjHSsehRLsdKfosE0XUuRBljptgL'\n access_token = '4214012714-34yS5BD4p6wXG55Di6Sm8DL5nmS5zGBqbfn4Ik1'\n #access_token_secret = 'Gx9VvkXcb5xUR1cUB7LBxtkHjhwtrb4bXQo0b9wf525rY'\n access_token_secret = 'mhBR7wtOQBpIrAFec4UQlV4vsAMpGsWh9EwcWe2N9tJWe'\n\n \n # attempt authentication\n try:\n # create OAuthHandler object\n self.auth = OAuthHandler(consumer_key, consumer_secret)\n # set access token and secret\n self.auth.set_access_token(access_token, access_token_secret)\n # create tweepy API object to fetch tweets\n self.api = tweepy.API(self.auth)\n except:\n print(\"Error: Authentication Failed\")\n def clean_tweet(self, tweet):\n '''\n Utility function to clean tweet text by removing links, \n special characters\n using simple regex statements.\n '''\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n def get_tweet_sentiment(self, tweet):\n '''\n Utility function to classify sentiment of passed tweet\n using textblob's sentiment method\n '''\n # create TextBlob object of passed tweet text\n analysis = TextBlob(self.clean_tweet(tweet))\n # set sentiment\n if analysis.sentiment.polarity > 0:\n return 'positive'\n elif analysis.sentiment.polarity == 0:\n return 'neutral'\n else:\n return 'negative'\n def get_tweets(self, query, count = 10):\n '''\n Main function to fetch tweets and parse them.\n '''\n # empty list to store parsed tweets\n tweets = []\n \n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q = query, count = count)\n \n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n \n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n \n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n \n # return parsed tweets\n return tweets\n \n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))\ndef main():\n # creating object of TwitterClient Class\n api = TwitterClient()\n # calling function to get tweets\n tweets = api.get_tweets(query = 'Narendra MOdi', count = 200)\n \n # picking positive tweets from tweets\n ptweets = [tweet for tweet in tweets if tweet['sentiment'] == 'positive']\n # percentage of positive tweets\n print(\"Positive tweets percentage: {} %\".format(100*len(ptweets)/len(tweets)))\n # picking negative tweets from tweets\n ntweets = [tweet for tweet in tweets if tweet['sentiment'] == 'negative']\n # percentage of negative tweets\n #print(\"Negative tweets percentage: {} %\".format(100*len(ntweets)/len(tweets)))\n # percentage of neutral tweets\n #print(\"Neutral tweets percentage: {} % \\\n # \".format(100*len(tweets - ntweets - ptweets)/len(tweets)))\n \n # printing first 5 positive tweets\n print(\"\\n\\nPositive tweets:\")\n for tweet in ptweets[:10]:\n print(tweet['text'])\n \n # printing first 5 negative tweets\n print(\"\\n\\nNegative tweets:\")\n for tweet in ntweets[:10]:\n print(tweet['text'])\nif __name__ == \"__main__\":\n main()\n", "Positive tweets percentage: 48.529411764705884 %\n\n\nPositive tweets:\nRT @ANI: Narendra Modi is the most deserving candidate &amp;rightful leader of a democracy. He is not in this position because of his parents,…\nRT @narendramodi: Heard today's #MannKiBaat?\n\nTake part in this interesting quiz based on the programme, on the 'Narendra Modi Mobile App.'…\nRT @vijayrupanibjp: I don't fear standing with corporates as I have right intentions: Narendra Modi\nhttps://t.co/L962FF8ccf\n\nvia NaMo App h…\nRT @GMurugadassBJP: Proud moment! ‘Rag-picker’s son makes it to AIIMS, gets lauded by PM Narendra Modi\nhttps://t.co/3ky2iYWNSv via NaMo App…\nRT @GMurugadassBJP: I don't fear standing with corporates as I have right intentions: Narendra Modi\nhttps://t.co/wjVmGC0sPU via NaMo App ht…\nRT @iPankajShukla: Not afraid to stand with industrialists, says PM Shri @narendramodi Ji https://t.co/HrxWXIu1v7\nRT @BJPLive: Proud moment! ‘Rag-picker’s son makes it to AIIMS, gets lauded by PM Narendra Modi https://t.co/IwM70rKuYz #BJPNewsTrack\nRT @BJPLive: I don't fear standing with corporates as I have right intentions: PM @narendramodi https://t.co/xaArThr21k #BJPNewsTrack\nI don't fear standing with corporates as I have right intentions: Narendra Modi\nhttps://t.co/wjVmGC0sPU via NaMo App https://t.co/39yA8JgzY9\nRT @SwamiGeetika: Narendra Modi is the most deserving candidate and rightful leader of a democracy. He is not in this position because of h…\n\n\nNegative tweets:\nRT @sukanyaiyer2: Friends, Our PMs Fitting Rebuttal &amp; A Tight slap on the Face of Oppn Ranty Rona for their spree of False Propaganda again…\nRT @RoshanKrRai: &amp; the only difference between Nawaz Sharif and Narendra Modi is that Nawaz Sharif is in jail 😉 https://t.co/xFmIiMxZBs\nRT @sanjivbhatt: FACT: For some inexplicable reason, Reham Khan, the ex-wife of Imran Khan has gotten more space in the Indian media than J…\nRT @sunny_hundal: Imran Khan is to Pakistan what Narendra Modi is to India: a nation exhausted by corruption is charmed by a man who preten…\n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
d0a8a95425cbbcc7d59dde07d4bb5eadfc6cb5ae
8,652
ipynb
Jupyter Notebook
site/en-snapshot/swift/tutorials/raw_tensorflow_operators.ipynb
NarimaneHennouni/docs-l10n
39a48e0d5aa34950e29efd5c1f111c120185e9d9
[ "Apache-2.0" ]
null
null
null
site/en-snapshot/swift/tutorials/raw_tensorflow_operators.ipynb
NarimaneHennouni/docs-l10n
39a48e0d5aa34950e29efd5c1f111c120185e9d9
[ "Apache-2.0" ]
null
null
null
site/en-snapshot/swift/tutorials/raw_tensorflow_operators.ipynb
NarimaneHennouni/docs-l10n
39a48e0d5aa34950e29efd5c1f111c120185e9d9
[ "Apache-2.0" ]
null
null
null
37.133047
598
0.545423
[ [ [ "##### Copyright 2018 The TensorFlow Authors. [Licensed under the Apache License, Version 2.0](#scrollTo=bPJq2qP2KE3u).", "_____no_output_____" ] ], [ [ "// #@title Licensed under the Apache License, Version 2.0 (the \"License\"); { display-mode: \"form\" }\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// https://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.", "_____no_output_____" ] ], [ [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/swift/tutorials/raw_tensorflow_operators\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/swift/blob/main/docs/site/tutorials/raw_tensorflow_operators.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/swift/blob/main/docs/site/tutorials/raw_tensorflow_operators.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>", "_____no_output_____" ], [ "# Raw TensorFlow operators\n\nBuilding on TensorFlow, Swift for TensorFlow takes a fresh approach to API design. APIs are carefully curated from established libraries and combined with new language idioms. This means that not all TensorFlow APIs will be directly available as Swift APIs, and our API curation needs time and dedicated effort to evolve. However, do not worry if your favorite TensorFlow operator is not available in Swift -- the TensorFlow Swift library gives you transparent access to most TensorFlow operators, under the `_Raw` namespace.\n", "_____no_output_____" ], [ "Import `TensorFlow` to get started.", "_____no_output_____" ] ], [ [ "import TensorFlow", "_____no_output_____" ] ], [ [ "## Calling raw operators\n\nSimply find the function you need under the `_Raw` namespace via code completion.", "_____no_output_____" ] ], [ [ "print(_Raw.mul(Tensor([2.0, 3.0]), Tensor([5.0, 6.0])))", "_____no_output_____" ] ], [ [ "## Defining a new multiply operator\n\nMultiply is already available as operator `*` on `Tensor`, but let us pretend that we wanted to make it available under a new name as `.*`. Swift allows you to retroactively add methods or computed properties to existing types using `extension` declarations.\n\nNow, let us add `.*` to `Tensor` by declaring an extension and make it available when the tensor's `Scalar` type conforms to [`Numeric`](https://developer.apple.com/documentation/swift/numeric).", "_____no_output_____" ] ], [ [ "infix operator .* : MultiplicationPrecedence\n\nextension Tensor where Scalar: Numeric {\n static func .* (_ lhs: Tensor, _ rhs: Tensor) -> Tensor {\n return _Raw.mul(lhs, rhs)\n }\n}\n\nlet x: Tensor<Double> = [[1.0, 2.0], [3.0, 4.0]]\nlet y: Tensor<Double> = [[8.0, 7.0], [6.0, 5.0]]\nprint(x .* y)", "_____no_output_____" ] ], [ [ "## Defining a derivative of a wrapped function\n\nNot only can you easily define a Swift API for a raw TensorFlow operator, you can also make it differentiable to work with Swift's first-class automatic differentiation.\n\nTo make `.*` differentiable, use the `@derivative` attribute on the derivative function and specify the original function as an attribute argument under the `of:` label. Since the `.*` operator is defined when the generic type `Scalar` conforms to `Numeric`, it is not enough for making `Tensor<Scalar>` conform to the `Differentiable` protocol. Born with type safety, Swift will remind us to add a generic constraint on the `@differentiable` attribute to require `Scalar` to conform to `TensorFlowFloatingPoint` protocol, which would make `Tensor<Scalar>` conform to `Differentiable`.\n\n```swift\n@differentiable(where Scalar: TensorFlowFloatingPoint)\n```", "_____no_output_____" ] ], [ [ "infix operator .* : MultiplicationPrecedence\n\nextension Tensor where Scalar: Numeric {\n @differentiable(where Scalar: TensorFlowFloatingPoint)\n static func .* (_ lhs: Tensor, _ rhs: Tensor) -> Tensor {\n return _Raw.mul(lhs, rhs)\n }\n}\n\nextension Tensor where Scalar : TensorFlowFloatingPoint { \n @derivative(of: .*)\n static func multiplyDerivative(\n _ lhs: Tensor, _ rhs: Tensor\n ) -> (value: Tensor, pullback: (Tensor) -> (Tensor, Tensor)) {\n return (lhs * rhs, { v in\n ((rhs * v).unbroadcasted(to: lhs.shape),\n (lhs * v).unbroadcasted(to: rhs.shape))\n })\n }\n}\n\n// Now, we can take the derivative of a function that calls `.*` that we just defined.\nprint(gradient(at: x, y) { x, y in\n (x .* y).sum()\n})", "_____no_output_____" ] ], [ [ "## More examples", "_____no_output_____" ] ], [ [ "let matrix = Tensor<Float>([[1, 2], [3, 4]])\n\nprint(_Raw.matMul(matrix, matrix, transposeA: true, transposeB: true))\nprint(_Raw.matMul(matrix, matrix, transposeA: true, transposeB: false))\nprint(_Raw.matMul(matrix, matrix, transposeA: false, transposeB: true))\nprint(_Raw.matMul(matrix, matrix, transposeA: false, transposeB: false))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0a8aa3160b43920ff25580e0ec1e400494ebf99
54,982
ipynb
Jupyter Notebook
doc/source/operators/mutation.ipynb
AIasd/pymoo
08705ca866367d9fab675c30ffe585c837df9654
[ "Apache-2.0" ]
11
2018-05-22T17:38:02.000Z
2022-02-28T03:34:33.000Z
doc/source/operators/mutation.ipynb
AIasd/pymoo
08705ca866367d9fab675c30ffe585c837df9654
[ "Apache-2.0" ]
15
2022-01-03T19:36:36.000Z
2022-03-30T03:57:58.000Z
doc/source/operators/mutation.ipynb
AIasd/pymoo
08705ca866367d9fab675c30ffe585c837df9654
[ "Apache-2.0" ]
3
2021-11-22T08:01:47.000Z
2022-03-11T08:53:58.000Z
230.050209
15,320
0.924503
[ [ [ ".. _nb_mutation:", "_____no_output_____" ] ], [ [ "## Mutation", "_____no_output_____" ] ], [ [ ".. _nb_mutation_pm:", "_____no_output_____" ] ], [ [ "### Polynomial Mutation ('real_pm', 'int_pm')\n\nDetails about the mutation can be found in <cite data-cite=\"sbx\"></cite>. This mutation follows the same probability distribution as the simulated binary crossover.", "_____no_output_____" ] ], [ [ "from pymoo.interface import mutation\nfrom pymoo.factory import get_mutation\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef show(eta_mut):\n a = np.full((5000, 1), 0.5)\n off = mutation(get_mutation(\"real_pm\", eta=eta_mut, prob=1.0), a)\n\n plt.hist(off, range=(0,1), bins=200, density=True, color=\"red\")\n plt.show()\n\nshow(30)", "_____no_output_____" ], [ "show(10)", "_____no_output_____" ] ], [ [ "Basically, the same can be applied to discrete variables as well: ", "_____no_output_____" ] ], [ [ "\ndef show(eta_mut):\n a = np.full((10000, 1), 0)\n off = mutation(get_mutation(\"int_pm\", eta=eta_mut, prob=1.0), a, xl=-20, xu=20)\n\n plt.hist(off, range=(-20, 20), bins=40, density=True, color=\"red\")\n plt.show()\n\n\nshow(30)\n", "_____no_output_____" ] ], [ [ ".. _nb_mutation_bitflip:", "_____no_output_____" ] ], [ [ "### Bitflip Mutation ('bin_bitflip')\n\nThe bitlip mutation randomly flips a bit.", "_____no_output_____" ] ], [ [ "def show(M):\n plt.figure(figsize=(4,4))\n plt.imshow(M, cmap='Greys', interpolation='nearest')\n plt.show()\n \na = np.full((100,100), False)\nmut = mutation(get_mutation(\"bin_bitflip\", prob=0.1), a)\n\nshow(a != mut)", "_____no_output_____" ] ], [ [ "### API", "_____no_output_____" ] ], [ [ ".. autofunction:: pymoo.factory.get_mutation\n :noindex:\n\n.. autofunction:: pymoo.model.mutation.Mutation\n :noindex:", "_____no_output_____" ] ] ]
[ "raw", "markdown", "raw", "markdown", "code", "markdown", "code", "raw", "markdown", "code", "markdown", "raw" ]
[ [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "raw" ] ]
d0a8ae4fdb32348aead54df2257d1ce144f68264
166,446
ipynb
Jupyter Notebook
notebooks/index_generation_odsc.ipynb
jbauer44/factor_investing
403f68ad0f57b7e2504a790b4ba877d3122df3c2
[ "MIT" ]
1
2021-11-09T12:28:43.000Z
2021-11-09T12:28:43.000Z
notebooks/index_generation_odsc.ipynb
jbauer44/factor_investing
403f68ad0f57b7e2504a790b4ba877d3122df3c2
[ "MIT" ]
null
null
null
notebooks/index_generation_odsc.ipynb
jbauer44/factor_investing
403f68ad0f57b7e2504a790b4ba877d3122df3c2
[ "MIT" ]
null
null
null
209.366038
144,364
0.91053
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Libraries-and-functions\" data-toc-modified-id=\"Libraries-and-functions-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Libraries and functions</a></span><ul class=\"toc-item\"><li><span><a href=\"#Import-libraries\" data-toc-modified-id=\"Import-libraries-1.1\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>Import libraries</a></span></li><li><span><a href=\"#Definition-of--functions-used-locally\" data-toc-modified-id=\"Definition-of--functions-used-locally-1.2\"><span class=\"toc-item-num\">1.2&nbsp;&nbsp;</span>Definition of functions used locally</a></span></li></ul></li><li><span><a href=\"#Options\" data-toc-modified-id=\"Options-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Options</a></span></li><li><span><a href=\"#Load-data\" data-toc-modified-id=\"Load-data-3\"><span class=\"toc-item-num\">3&nbsp;&nbsp;</span>Load data</a></span></li><li><span><a href=\"#Create-base-indices\" data-toc-modified-id=\"Create-base-indices-4\"><span class=\"toc-item-num\">4&nbsp;&nbsp;</span>Create base indices</a></span><ul class=\"toc-item\"><li><span><a href=\"#Market-cap-weighted\" data-toc-modified-id=\"Market-cap-weighted-4.1\"><span class=\"toc-item-num\">4.1&nbsp;&nbsp;</span>Market cap weighted</a></span></li><li><span><a href=\"#Equal-weights\" data-toc-modified-id=\"Equal-weights-4.2\"><span class=\"toc-item-num\">4.2&nbsp;&nbsp;</span>Equal weights</a></span></li><li><span><a href=\"#compute-index-returns\" data-toc-modified-id=\"compute-index-returns-4.3\"><span class=\"toc-item-num\">4.3&nbsp;&nbsp;</span>compute index returns</a></span></li></ul></li><li><span><a href=\"#Index-based-on-model-predictions\" data-toc-modified-id=\"Index-based-on-model-predictions-5\"><span class=\"toc-item-num\">5&nbsp;&nbsp;</span>Index based on model predictions</a></span><ul class=\"toc-item\"><li><span><a href=\"#Weight-generation\" data-toc-modified-id=\"Weight-generation-5.1\"><span class=\"toc-item-num\">5.1&nbsp;&nbsp;</span>Weight generation</a></span></li><li><span><a href=\"#Build-your-strategy\" data-toc-modified-id=\"Build-your-strategy-5.2\"><span class=\"toc-item-num\">5.2&nbsp;&nbsp;</span>Build your strategy</a></span></li><li><span><a href=\"#Build-index\" data-toc-modified-id=\"Build-index-5.3\"><span class=\"toc-item-num\">5.3&nbsp;&nbsp;</span>Build index</a></span><ul class=\"toc-item\"><li><span><a href=\"#Monthly-index-levels\" data-toc-modified-id=\"Monthly-index-levels-5.3.1\"><span class=\"toc-item-num\">5.3.1&nbsp;&nbsp;</span>Monthly index levels</a></span></li></ul></li></ul></li></ul></div>", "_____no_output_____" ], [ "# Libraries and functions\n\n## Import libraries\n\nNeed to be able to access functions in base_dir/src", "_____no_output_____" ] ], [ [ "# libraries\n\n# general\nimport sys\nimport os\n\nimport itertools\n\nimport dateutil.relativedelta as relativedelta\n\nimport datetime\nimport numpy as np\nimport pandas as pd\n\n# plotting\n%matplotlib inline\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport seaborn as sns\n\n# # statistics and machine learning\n\nfrom IPython.display import display\npd.options.display.max_columns = None\n\n# # add the base path to python system path\npath = os.getcwd()\n#dir_up = os.path.abspath(os.path.join(path, os.pardir))\nbase_path = os.path.abspath(os.path.join(path, os.pardir))\nsys.path.append(base_path)\n\n\n# from mpl_toolkits.axes_grid.anchored_artists import AnchoredText\nfrom matplotlib import gridspec\n\n\n# # libraries within package\nfrom src.finance_functions import multiple_returns_from_levels_vec, project_to_first\nfrom src.finance_functions import df_restrict_dates\nfrom src.automotive_dictionaries import equity_name2first_date\n\nfrom src.index_functionality import index_levels_from_returns\n\nfrom src.financial_metrics import extract_performance\n\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "%reload_ext autoreload", "_____no_output_____" ] ], [ [ "# Options", "_____no_output_____" ], [ "# Load data", "_____no_output_____" ] ], [ [ "filename = '../data/data_sample_monthly.csv'\ninfile = filename\ndf_comb_long = pd.read_csv(infile)\ndf_comb_long['date'] = pd.to_datetime(df_comb_long['date'])", "_____no_output_____" ], [ "#df_comb_long.head()", "_____no_output_____" ], [ "#eq_name = 'Equity Parent'\neq_name = 'company'\ndf_prices = df_comb_long.pivot(values='stock_price', index='date', columns=eq_name)\n\n\n# monthly returns\ndf_returns = multiple_returns_from_levels_vec(df_prices.ffill())", "_____no_output_____" ] ], [ [ "# Create base indices ", "_____no_output_____" ] ], [ [ "df_market_cap = df_comb_long.pivot(values='MarketCap_Mlns', index='date', columns=eq_name)\ndf_market_cap.index = df_market_cap.index.map(project_to_first)\n\n# deal with the missing values by taking the previously available one\ndf_market_cap.ffill(inplace=True)\n\n# set to zero when not available, this takes care of the market cap weights\n\nfor col in df_market_cap.columns:\n first_date = project_to_first(equity_name2first_date[col])\n mask = df_market_cap.index < first_date\n df_market_cap.loc[mask, col] = 0.0\n #print(first_date)", "_____no_output_____" ], [ "#df_market_cap.head()", "_____no_output_____" ] ], [ [ "## Market cap weighted", "_____no_output_____" ] ], [ [ "total_market_cap = df_market_cap.sum(axis=1)\n# weights determined in the same month as market cap\ndf_weights = df_market_cap.div(total_market_cap, axis=0)", "_____no_output_____" ], [ "# the weights for the index should be determined by past information, \n# i.e.by previous month market_cap\n#df_weights_mc = df_weights.shift(1).bfill()\ndf_weights_mc = df_weights.shift(1)", "_____no_output_____" ] ], [ [ "## Equal weights", "_____no_output_____" ] ], [ [ "df_temp = (df_weights_mc > 0.0).astype(int)\ndf_weights_equal = df_temp.div(df_temp.sum(axis=1), axis=0)\n", "_____no_output_____" ] ], [ [ "## compute index returns", "_____no_output_____" ] ], [ [ "df_mc_index_returns = pd.DataFrame((df_returns * df_weights_mc).sum(axis=1),columns=['mc_return'])\ndf_mc_index_returns.dropna(inplace=True)\n\ndf_equal_index_returns = pd.DataFrame((df_returns * df_weights_equal).sum(axis=1),columns=['equal_return'])\ndf_equal_index_returns.dropna(inplace=True)", "_____no_output_____" ] ], [ [ "# Index based on model predictions\n\n\nBase indices\n * market cap weighted\n * equal weighted \n", "_____no_output_____" ], [ "## Weight generation", "_____no_output_____" ] ], [ [ "l_base_weights = ['Market Cap', 'Equal']\n\nl_weighting_schemes = ['0']\n\n# Cartesian product tuples\nl_weights = list(itertools.product(*[l_base_weights,l_weighting_schemes]))", "_____no_output_____" ], [ "# WEIGHT ADJUSTMENTS OPTIONS\n\nd_weights = {}\n", "_____no_output_____" ], [ "#execution loop\n\nfor base_mod, scheme in l_weights:\n print(base_mod, scheme)\n if base_mod == 'Market Cap' :\n df_base_weights = df_weights_mc.copy()\n if base_mod == 'Equal' :\n df_base_weights = df_weights_equal.copy()\n \n \n df_mod_weights = df_base_weights.copy()\n \n name_d = base_mod + ' ' + scheme\n d_weights[name_d] = df_mod_weights\n", "Market Cap 0\nEqual 0\n" ] ], [ [ "## Build your strategy", "_____no_output_____" ] ], [ [ "# do something better than random\n\ndf_rand = pd.DataFrame(np.random.uniform(low=0.0, high=0.01, size=(len(df_weights_mc.index), len(df_weights_mc.columns))), \n columns=list(df_weights_mc.columns), index=df_weights_mc.index)\ndf_w = df_weights_mc + df_rand\ndf_w = df_w.div(df_w.sum(axis=1), axis=0)", "_____no_output_____" ], [ "d_weights['Market Cap smart modify'] = df_w\n", "_____no_output_____" ], [ "# # SCHEMATIC: CODE DOES NOT EXECUTE LIKE THIS\n# # possible loop for training a model\n# # and producing df_oos_predictions\n\n# # alternatively use portfolio optimization\n\n\n# x_names = ['feature1',...]\n# y_name = 'returns'\n\n# prediction_dates = df_weights_mc.index[24:]\n\n# for date in prediction_dates:\n# #print(date)\n \n# train_ini_date = date + relativedelta.relativedelta(months=-24)\n# train_final_date = date + relativedelta.relativedelta(months=-1)\n \n# df1 = df_restrict_dates(df_comb_long, train_ini_date, train_final_date)\n \n# df_x_train = df1[x_names].copy()\n# df_y_train = df1[[y_name]].copy()\n\n# X_train_full = df_x_train.values\n# y_train_full = df_y_train[y_name].values\n \n# model.fit(X_train_full, y_train_full, sample_weight=sample_weights) \n \n \n# ##### oos results\n# df2 = df_restrict_dates(df_comb_long, date, date)\n# df_x = df2[x_names].copy() \n# X_oos = df_x.values\n \n# predictions = model.predict(X_oos)\n# df_oos_predictions.loc[date] = predictions", "_____no_output_____" ], [ "# # SCHEMATIC: CODE DOES NOT EXECUTE LIKE THIS\n# # possible loop for weight updates (schematic)\n# # based on model predictions df_oos_predictions\n\n# df_base_weights = df_weights_mc.copy() \n# df_mod_weights = df_base_weights.copy()\n\n \n# for date in prediction_dates:\n\n# # assume you have made some predictions\n# predictions = df_oos_predictions.loc[date].values \n\n# # relate predictions to weight updates\n \n# weights_mod = ....\n\n# # possibly apply capping rules\n \n# df_mod_weights.loc[date] = weights_mod \n\n# name_d = 'xx'\n# d_weights[name_d] = df_mod_weights\n", "_____no_output_____" ] ], [ [ "## Build index", "_____no_output_____" ], [ "### Monthly index levels", "_____no_output_____" ] ], [ [ "# build date frame with indices (no rebalancing)\n\nstart_date = datetime.datetime(2009,1,1)\nend_date = datetime.datetime(2015,12,31)\n\nstarting_level = 100\n\ndf_r_in = df_restrict_dates(df_returns, start_date, end_date)\nfrequency = 'monthly'\n\nfor k, name in enumerate(sorted(d_weights.keys())):\n print(name)\n \n df_w_in = df_restrict_dates(d_weights[name], start_date, end_date)\n df_temp = index_levels_from_returns(df_w_in, df_r_in, out_field=name, starting_level=starting_level, \n transaction_costs=False, frequency=frequency)\n if k == 0:\n df_i_comb = df_temp\n else:\n df_i_comb = df_i_comb.merge(df_temp, left_index=True, right_index=True) ", "Equal 0\nMarket Cap 0\nMarket Cap smart modify\n" ], [ "# plot without rebalancing costs \nfont = {'size' : 24}\n\nmpl.rc('font', **font)\ncm = plt.get_cmap('jet')\n#cm = plt.get_cmap('viridis')\n\nsns.set(font_scale=2.5)\nsns.set_style(\"whitegrid\")\n\n#fields = 'Equal'\n#fields = 'Market Cap'\nfields = None\n\nheaders = df_i_comb.columns\n\nif fields is not None:\n headers = list(filter(lambda s: fields in s, df_i_comb.columns))\n \ndf_i_comb[headers].plot(figsize=(20,12), colormap=cm)\nplt.title(frequency.title() + ' Index performance (no rebalancing costs)')\nprint()", "\n" ], [ "extract_performance(df_i_comb[headers])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
d0a8b36afaa7b9823360ea1e0480820f1b38d043
33,574
ipynb
Jupyter Notebook
notebooks/chapter24/Image Segmentation.ipynb
mbalduccini/aima-python
6624d12467867a84795838abccad579b1da57469
[ "MIT" ]
6,946
2016-02-27T19:28:07.000Z
2022-03-31T21:21:35.000Z
notebooks/chapter24/Image Segmentation.ipynb
mbalduccini/aima-python
6624d12467867a84795838abccad579b1da57469
[ "MIT" ]
733
2016-02-29T20:12:12.000Z
2022-02-19T11:56:13.000Z
notebooks/chapter24/Image Segmentation.ipynb
mbalduccini/aima-python
6624d12467867a84795838abccad579b1da57469
[ "MIT" ]
3,880
2016-02-24T21:13:35.000Z
2022-03-31T17:09:57.000Z
69.800416
6,700
0.807827
[ [ [ "# Segmentation\n\nImage segmentation is another early as well as an important image processing task. Segmentation is the process of breaking an image into groups, based on similarities of the pixels. Pixels can be similar to each other in multiple ways like brightness, color, or texture. The segmentation algorithms are to find a partition of the image into sets of similar pixels which usually indicating objects or certain scenes in an image.\n\nThe segmentations in this chapter can be categorized into two complementary ways: one focussing on detecting the boundaries of these groups, and the other on detecting the groups themselves, typically called regions. We will introduce some principles of some algorithms in this notebook to present the basic ideas in segmentation.", "_____no_output_____" ], [ "## Probability Boundary Detection\n\nA boundary curve passing through a pixel $(x,y)$ in an image will have an orientation $\\theta$, so we can formulize boundary detection problem as a classification problem. Based on features from a local neighborhood, we want to compute the probability $P_b(x,y,\\theta)$ that indeed there is a boundary curve at that pixel along that orientation. \n\nOne of the sampling ways to calculate $P_b(x,y,\\theta)$ is to generate a series sub-divided into two half disks by a diameter oriented at θ. If there is a boundary at (x, y, θ) the two half disks might be expected to differ significantly in their brightness, color, and texture. For detailed proof of this algorithm, please refer to this [article](https://people.eecs.berkeley.edu/~malik/papers/MFM-boundaries.pdf).", "_____no_output_____" ], [ "### Implementation\n\nWe implemented a simple demonstration of probability boundary detector as `probability_contour_detection` in `perception.py`. This method takes three inputs:\n\n- image: an image already transformed into the type of numpy ndarray.\n- discs: a list of sub-divided discs.\n- threshold: the standard to tell whether the difference between intensities of two discs implying there is a boundary passing the current pixel.\n\nwe also provide a helper function `gen_discs` to gen a list of discs. It takes `scales` as the number of sizes of discs will be generated which is default 1. Please note that for each scale size, there will be 8 sub discs generated which are in the horizontal, verticle and two diagnose directions. Another `init_scale` indicates the starting scale size. For instance, if we use `init_scale` of 10 and `scales` of 2, then scales of sizes of 10 and 20 will be generated and thus we will have 16 sub-divided scales.", "_____no_output_____" ], [ "### Example\n\nNow let's demonstrate the inner mechanism with our navie implementation of the algorithm. First, let's generate some very simple test images. We already generated a grayscale image with only three steps of gray scales in `perceptron.py`:", "_____no_output_____" ] ], [ [ "import os, sys\nsys.path = [os.path.abspath(\"../../\")] + sys.path\nfrom perception4e import *\nfrom notebook4e import *\nimport matplotlib.pyplot as plt", "Using TensorFlow backend.\n" ] ], [ [ "Let's take a look at it:", "_____no_output_____" ] ], [ [ "plt.imshow(gray_scale_image, cmap='gray', vmin=0, vmax=255)\nplt.axis('off')\nplt.show()", "_____no_output_____" ] ], [ [ "You can also generate your own grayscale images by calling `gen_gray_scale_picture` and pass the image size and grayscale levels needed:", "_____no_output_____" ] ], [ [ "gray_img = gen_gray_scale_picture(100, 5)\nplt.imshow(gray_img, cmap='gray', vmin=0, vmax=255)\nplt.axis('off')\nplt.show()", "_____no_output_____" ] ], [ [ "Now let's generate the discs we are going to use as sampling masks to tell the intensity difference between two half of the care area of an image. We can generate the discs of size 100 pixels and show them:", "_____no_output_____" ] ], [ [ "discs = gen_discs(100, 1)\nfig=plt.figure(figsize=(10, 10))\nfor i in range(8):\n img = discs[0][i]\n fig.add_subplot(1, 8, i+1)\n plt.axis('off')\n plt.imshow(img, cmap='gray', vmin=0, vmax=255)\nplt.show()", "_____no_output_____" ] ], [ [ "The white part of disc images is of value 1 while dark places are of value 0. Thus convolving the half-disc image with the corresponding area of an image will yield only half of its content. Of course, discs of size 100 is too large for an image of the same size. We will use discs of size 10 and pass them to the detector.", "_____no_output_____" ] ], [ [ "discs = gen_discs(10, 1)\ncontours = probability_contour_detection(gray_img, discs[0])", "_____no_output_____" ], [ "show_edges(contours)", "_____no_output_____" ] ], [ [ "As we are using discs of size 10 and some boundary conditions are not dealt with in our naive algorithm, the extracted contour has a bold edge with missings near the image border. But the main structures of contours are extracted correctly which shows the ability of this algorithm.", "_____no_output_____" ], [ "## Group Contour Detection\n\nThe alternative approach is based on trying to “cluster” the pixels into regions based on their brightness, color and texture properties. There are multiple grouping algorithms and the simplest and the most popular one is k-means clustering. Basically, the k-means algorithm starts with k randomly selected centroids, which are used as the beginning points for every cluster, and then performs iterative calculations to optimize the positions of the centroids. For a detailed description, please refer to the chapter of unsupervised learning.", "_____no_output_____" ], [ "### Implementation\n\nHere we will use the module of `cv2` to perform K-means clustering and show the image. To use it you need to have `opencv-python` pre-installed. Using `cv2.kmeans` is quite simple, you only need to specify the input image and the characters of cluster initialization. Here we use modules provide by `cv2` to initialize the clusters. `cv2.KMEANS_RANDOM_CENTERS` can randomly generate centers of clusters and the cluster number is defined by the user.\n\n`kmeans` method will return the centers and labels of clusters, which can be used to classify pixels of an image. Let's try this algorithm again on the small grayscale image we imported:", "_____no_output_____" ] ], [ [ "contours = group_contour_detection(gray_scale_image, 3)", "_____no_output_____" ] ], [ [ "Now let's show the extracted contours:", "_____no_output_____" ] ], [ [ "show_edges(contours)", "_____no_output_____" ] ], [ [ "It is not obvious as our generated image already has very clear boundaries. Let's apply the algorithm on the stapler example to see whether it will be more obvious:", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.image as mpimg\n\nstapler_img = mpimg.imread('images/stapler.png', format=\"gray\")", "_____no_output_____" ], [ "contours = group_contour_detection(stapler_img, 5)\nplt.axis('off')\nplt.imshow(contours, cmap=\"gray\")", "_____no_output_____" ] ], [ [ "The segmentation is very rough when using only 5 clusters. Adding to the cluster number will increase the degree of subtle of each group thus the whole picture will be more alike the original one:", "_____no_output_____" ] ], [ [ "contours = group_contour_detection(stapler_img, 15)\nplt.axis('off')\nplt.imshow(contours, cmap=\"gray\")", "_____no_output_____" ] ], [ [ "## Minimum Cut Segmentation\n\nAnother way to do clustering is by applying the minimum cut algorithm in graph theory. Roughly speaking, the criterion for partitioning the graph is to minimize the sum of weights of connections across the groups and maximize the sum of weights of connections within the groups.\n\n### Implementation\n\nThere are several kinds of representations of a graph such as a matrix or an adjacent list. Here we are using a util function `image_to_graph` to convert an image in ndarray type to an adjacent list. It is integrated into the class of `Graph`. `Graph` takes an image as input and offer the following implementations of some graph theory algorithms:", "_____no_output_____" ], [ "- bfs: performing bread searches from a source vertex to a terminal vertex. Return `True` if there is a path between the two nodes else return `False`.\n\n- min_cut: performing minimum cut on a graph from a source vertex to sink vertex. The method will return the edges to be cut.\n\nNow let's try the minimum cut method on a simple generated grayscale image of size 10:", "_____no_output_____" ] ], [ [ "image = gen_gray_scale_picture(size=10, level=2)\nshow_edges(image)", "_____no_output_____" ], [ "graph = Graph(image)\ngraph.min_cut((0,0), (9,9))", "_____no_output_____" ] ], [ [ "There are ten edges to be cut. By cutting the ten edges, we can separate the pictures into two parts by the pixel intensities.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d0a8c0ffc76621ae4a974638e99e93724fae7002
8,918
ipynb
Jupyter Notebook
DA Lab/.ipynb_checkpoints/2_assignment-checkpoint.ipynb
ganeshbhandarkar/College-Labs-And-Projects
f99a99cda97a0e3d220ad5b6e31fc02d7d77ee68
[ "MIT" ]
null
null
null
DA Lab/.ipynb_checkpoints/2_assignment-checkpoint.ipynb
ganeshbhandarkar/College-Labs-And-Projects
f99a99cda97a0e3d220ad5b6e31fc02d7d77ee68
[ "MIT" ]
null
null
null
DA Lab/.ipynb_checkpoints/2_assignment-checkpoint.ipynb
ganeshbhandarkar/College-Labs-And-Projects
f99a99cda97a0e3d220ad5b6e31fc02d7d77ee68
[ "MIT" ]
1
2020-11-05T04:25:11.000Z
2020-11-05T04:25:11.000Z
19.344902
123
0.388316
[ [ [ "name <- as.character(readline(prompt = \"name\"))\nage <- as.integer(readline(prompt = \"age\"))\ncat(\"Name : \", name,\"\\n\",\"Age : \", age)", "name Ganesh Bhandakar\nage 20\n" ], [ "roll <- as.integer(readline(prompt = \"roll no\"))\nname <- as.character(readline(prompt = \"name\"))\nbranch <- as.character(readline(prompt = \"branch\"))\ncat(\"roll->\",roll,\"\\n\",\"name->\",name,\"\\n\",\"branch->\",branch)", "roll no 1806554\nname Ganesh Bhandakar\nbranch IT\n" ], [ "x <- readline()\nx <- strsplit(x,\" \")\nx <- as.integer(x[[1]])\ncat(\"sum : \",x[1]+x[2],\"\\t\",\"mul : \",x[1]*x[2],\"\\t\",\"div : \",x[1]/x[2],\"\\t\",\"sub : \",x[1]-x[2])", " 5 2\n" ], [ "x <- as.character(readline())\nx <- strsplit(x,\"\")\nx <- as.integer(x[[1]])\ncat(\"sum of 3 digits of number : \",x[1]+x[2]+x[3])", " 234\n" ], [ "x <- as.integer(readline())\narea = 3.14*x*x\ncircumference = 2*3.14*x\ncat(\"area : \",area,\"\\n\",\"circumference : \",circumference)", " 3\n" ], [ "cat(\"seq bet 20 to 50 : \",c(20:50))\ncat(\"\\n\")\ncat(\"mean bet 20 to 60 : \",mean(20:60))\ncat(\"\\n\")\ncat(\"sum for 51 to 91 : \",sum(51:91))", "seq bet 20 to 50 : 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50\nmean bet 20 to 60 : 40\nsum for 51 to 91 : 2911" ], [ "round(runif(10,-50,50))", "_____no_output_____" ], [ "x <- readline()\nx <- strsplit(x,\" \")\nx <- as.integer(x[[1]])\nmax(x)\nmin(x)", " 1 70 11 34 21 45\n" ], [ "x <- c(1,2,3,4,5)\ny <- c(\"hey\",\"how\",\"are\",\"you\",\"bruh\")\nz <- x > 1\nclass(x)\ncat(x,\"\\n\")\nclass(y)\ncat(y,\"\\n\")\nclass(z)\ncat(z)", "_____no_output_____" ], [ "x <- readline()\nx <- strsplit(x,\" \")\nx <- as.integer(x[[1]])\ncat(\"sum : \",sum(x),\"\\n\",\"mean : \",mean(x),\"\\n\",\"product : \",prod(x))", " 1 2 3 4 5\n" ], [ "tmp <- c(1:20)\nprint(tmp)\ntmp <- c(20:1)\nprint(tmp)\ntmp <- c(1:20,20:1)\nprint(tmp)\ntmp <- c(4,6,3)\nprint(tmp)", " [1] 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20\n [1] 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1\n [1] 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 20 19 18 17 16\n[26] 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1\n[1] 4 6 3\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0a8c93d291ebe33ff3584f999003d3134d8b968
6,683
ipynb
Jupyter Notebook
Notebooks and Scripts/Bag_of_Words_Analysis.ipynb
tdeme/NLP-Partisanship
4a289d1157ac1b96e5af3b38b2676f2d5c84e21b
[ "MIT" ]
null
null
null
Notebooks and Scripts/Bag_of_Words_Analysis.ipynb
tdeme/NLP-Partisanship
4a289d1157ac1b96e5af3b38b2676f2d5c84e21b
[ "MIT" ]
null
null
null
Notebooks and Scripts/Bag_of_Words_Analysis.ipynb
tdeme/NLP-Partisanship
4a289d1157ac1b96e5af3b38b2676f2d5c84e21b
[ "MIT" ]
null
null
null
34.448454
132
0.454137
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "!git clone https://github.com/tdeme/NLP-Political-Bias.git", "Cloning into 'NLP-Political-Bias'...\nremote: Enumerating objects: 8, done.\u001b[K\nremote: Counting objects: 100% (8/8), done.\u001b[K\nremote: Compressing objects: 100% (7/7), done.\u001b[K\nremote: Total 8 (delta 1), reused 0 (delta 0), pack-reused 0\u001b[K\nUnpacking objects: 100% (8/8), done.\n" ], [ "left_tweets_df = pd.read_csv('/content/NLP-Political-Bias/Rleft_tweets.csv')\nright_tweets_df = pd.read_csv('/content/NLP-Political-Bias/Rright_tweets.csv')", "_____no_output_____" ], [ "print(len(left_tweets_df))\nprint(len(right_tweets_df))", "49532\n40105\n" ], [ "from sklearn.feature_extraction.text import CountVectorizer\nimport nltk\nfrom nltk.corpus import stopwords\nnltk.download('stopwords')\n\nnltk_stopwords = stopwords.words('english')\nfor stopword in ('http','https','co'):\n nltk_stopwords.append(stopword)\n\nleft_tf_vectorizer = CountVectorizer(max_df=0.8, min_df=25, ngram_range=(2,3), binary=True, stop_words=nltk_stopwords)\nright_tf_vectorizer = CountVectorizer(max_df=0.8, min_df=25, ngram_range=(2,3), binary=True, stop_words=nltk_stopwords)\n\nleft_term_freqs = left_tf_vectorizer.fit_transform(left_tweets_df['tweet'].values.astype('U'))\nright_term_freqs = right_tf_vectorizer.fit_transform(right_tweets_df['tweet'].values.astype('U'))", "[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Unzipping corpora/stopwords.zip.\n" ], [ "#Make dataframes for the term frequencies for left and right\n\nleft_phrases_df = pd.DataFrame(data=left_tf_vectorizer.get_feature_names(),columns=['phrase'])\nleft_phrases_df['total_occurences'] = left_term_freqs.sum(axis=0).T\nleft_phrases_df.sort_values(by='total_occurences',ascending=False).head(50).to_csv('left_top_50.csv', index=False)\n\nright_phrases_df = pd.DataFrame(data=right_tf_vectorizer.get_feature_names(),columns=['phrase'])\nright_phrases_df['total_occurences'] = right_term_freqs.sum(axis=0).T\nright_phrases_df.sort_values(by='total_occurences',ascending=False).head(50).to_csv('right_top_50.csv', index=False)\n\nprint(left_phrases_df)\nprint(right_phrases_df)", " phrase total_occurences\n0 000 americans 105\n1 000 children 36\n2 000 dreamers 25\n3 000 federal 38\n4 000 month 27\n... ... ...\n2310 youtube video congresswoman 26\n2311 yrs ago 28\n2312 zero tolerance 43\n2313 zero tolerance policy 29\n2314 zip code 31\n\n[2315 rows x 2 columns]\n phrase total_occurences\n0 000 jobs 45\n1 000 new 43\n2 000 new jobs 29\n3 100 000 30\n4 100 days 26\n... ... ...\n1129 years come 29\n1130 years service 25\n1131 yesterday discuss 38\n1132 yet another 65\n1133 young people 26\n\n[1134 rows x 2 columns]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
d0a8cb1b27e5dba1e9b1bc7551c4f4c02f7a9493
27,164
ipynb
Jupyter Notebook
-Decision-Trees-drug.ipynb
varunsingh4/MachineLearning
eacbab992d7791534052f12ce631370fb3947a6d
[ "Apache-2.0" ]
null
null
null
-Decision-Trees-drug.ipynb
varunsingh4/MachineLearning
eacbab992d7791534052f12ce631370fb3947a6d
[ "Apache-2.0" ]
null
null
null
-Decision-Trees-drug.ipynb
varunsingh4/MachineLearning
eacbab992d7791534052f12ce631370fb3947a6d
[ "Apache-2.0" ]
null
null
null
25.919847
745
0.521168
[ [ [ "<center>\n <img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%203/images/IDSNlogo.png\" width=\"300\" alt=\"cognitiveclass.ai logo\" />\n</center>\n\n# Decision Trees\n\nEstimated time needed: **15** minutes\n\n## Objectives\n\nAfter completing this lab you will be able to:\n\n* Develop a classification model using Decision Tree Algorithm\n", "_____no_output_____" ], [ "In this lab exercise, you will learn a popular machine learning algorithm, Decision Trees. You will use this classification algorithm to build a model from the historical data of patients, and their response to different medications. Then you will use the trained decision tree to predict the class of a unknown patient, or to find a proper drug for a new patient.\n", "_____no_output_____" ], [ "<h1>Table of contents</h1>\n\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <ol>\n <li><a href=\"#about_dataset\">About the dataset</a></li>\n <li><a href=\"#downloading_data\">Downloading the Data</a></li>\n <li><a href=\"#pre-processing\">Pre-processing</a></li>\n <li><a href=\"#setting_up_tree\">Setting up the Decision Tree</a></li>\n <li><a href=\"#modeling\">Modeling</a></li>\n <li><a href=\"#prediction\">Prediction</a></li>\n <li><a href=\"#evaluation\">Evaluation</a></li>\n <li><a href=\"#visualization\">Visualization</a></li>\n </ol>\n</div>\n<br>\n<hr>\n", "_____no_output_____" ], [ "Import the Following Libraries:\n\n<ul>\n <li> <b>numpy (as np)</b> </li>\n <li> <b>pandas</b> </li>\n <li> <b>DecisionTreeClassifier</b> from <b>sklearn.tree</b> </li>\n</ul>\n", "_____no_output_____" ] ], [ [ "import numpy as np \nimport pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier", "_____no_output_____" ] ], [ [ "<div id=\"about_dataset\">\n <h2>About the dataset</h2>\n Imagine that you are a medical researcher compiling data for a study. You have collected data about a set of patients, all of whom suffered from the same illness. During their course of treatment, each patient responded to one of 5 medications, Drug A, Drug B, Drug c, Drug x and y. \n <br>\n <br>\n Part of your job is to build a model to find out which drug might be appropriate for a future patient with the same illness. The features of this dataset are Age, Sex, Blood Pressure, and the Cholesterol of the patients, and the target is the drug that each patient responded to.\n <br>\n <br>\n It is a sample of multiclass classifier, and you can use the training part of the dataset \n to build a decision tree, and then use it to predict the class of a unknown patient, or to prescribe a drug to a new patient.\n</div>\n", "_____no_output_____" ], [ "<div id=\"downloading_data\"> \n <h2>Downloading the Data</h2>\n To download the data, we will use !wget to download it from IBM Object Storage.\n</div>\n", "_____no_output_____" ] ], [ [ "!wget -O drug200.csv https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%203/data/drug200.csv", "--2021-06-06 16:57:39-- https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%203/data/drug200.csv\nResolving cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud (cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud)... 169.63.118.104\nConnecting to cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud (cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud)|169.63.118.104|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 5827 (5.7K) [text/csv]\nSaving to: 'drug200.csv'\n\n 0K ..... 100% 868M=0s\n\n2021-06-06 16:57:40 (868 MB/s) - 'drug200.csv' saved [5827/5827]\n\n" ] ], [ [ "**Did you know?** When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC)\n", "_____no_output_____" ], [ "Now, read the data using pandas dataframe:\n", "_____no_output_____" ] ], [ [ "my_data = pd.read_csv(\"drug200.csv\", delimiter=\",\")\nmy_data[0:5]", "_____no_output_____" ] ], [ [ "<div id=\"practice\"> \n <h3>Practice</h3> \n What is the size of data? \n</div>\n", "_____no_output_____" ] ], [ [ "# write your code here\nmy_data.shape\n\n", "_____no_output_____" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nmy_data.shape\n\n```\n\n</details>\n", "_____no_output_____" ], [ "<div href=\"pre-processing\">\n <h2>Pre-processing</h2>\n</div>\n", "_____no_output_____" ], [ "Using <b>my_data</b> as the Drug.csv data read by pandas, declare the following variables: <br>\n\n<ul>\n <li> <b> X </b> as the <b> Feature Matrix </b> (data of my_data) </li>\n <li> <b> y </b> as the <b> response vector </b> (target) </li>\n</ul>\n", "_____no_output_____" ], [ "Remove the column containing the target name since it doesn't contain numeric values.\n", "_____no_output_____" ] ], [ [ "X = my_data[['Age', 'Sex', 'BP', 'Cholesterol', 'Na_to_K']].values\nX[0:5]\n", "_____no_output_____" ] ], [ [ "As you may figure out, some features in this dataset are categorical, such as **Sex** or **BP**. Unfortunately, Sklearn Decision Trees does not handle categorical variables. We can still convert these features to numerical values using **pandas.get_dummies()**\nto convert the categorical variable into dummy/indicator variables.\n", "_____no_output_____" ] ], [ [ "from sklearn import preprocessing\nle_sex = preprocessing.LabelEncoder()\nle_sex.fit(['F','M'])\nX[:,1] = le_sex.transform(X[:,1]) \n\n\nle_BP = preprocessing.LabelEncoder()\nle_BP.fit([ 'LOW', 'NORMAL', 'HIGH'])\nX[:,2] = le_BP.transform(X[:,2])\n\n\nle_Chol = preprocessing.LabelEncoder()\nle_Chol.fit([ 'NORMAL', 'HIGH'])\nX[:,3] = le_Chol.transform(X[:,3]) \n\nX[0:5]\n", "_____no_output_____" ] ], [ [ "Now we can fill the target variable.\n", "_____no_output_____" ] ], [ [ "y = my_data[\"Drug\"]\ny[0:5]", "_____no_output_____" ] ], [ [ "<hr>\n\n<div id=\"setting_up_tree\">\n <h2>Setting up the Decision Tree</h2>\n We will be using <b>train/test split</b> on our <b>decision tree</b>. Let's import <b>train_test_split</b> from <b>sklearn.cross_validation</b>.\n</div>\n", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ] ], [ [ "Now <b> train_test_split </b> will return 4 different parameters. We will name them:<br>\nX_trainset, X_testset, y_trainset, y_testset <br> <br>\nThe <b> train_test_split </b> will need the parameters: <br>\nX, y, test_size=0.3, and random_state=3. <br> <br>\nThe <b>X</b> and <b>y</b> are the arrays required before the split, the <b>test_size</b> represents the ratio of the testing dataset, and the <b>random_state</b> ensures that we obtain the same splits.\n", "_____no_output_____" ] ], [ [ "X_trainset, X_testset, y_trainset, y_testset = train_test_split(X, y, test_size=0.3, random_state=3)", "_____no_output_____" ] ], [ [ "<h3>Practice</h3>\nPrint the shape of X_trainset and y_trainset. Ensure that the dimensions match.\n", "_____no_output_____" ] ], [ [ "# your code\nX_trainset.shape\n\n\n", "_____no_output_____" ], [ "y_trainset.shape", "_____no_output_____" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nprint('Shape of X training set {}'.format(X_trainset.shape),'&',' Size of Y training set {}'.format(y_trainset.shape))\n\n```\n\n</details>\n", "_____no_output_____" ], [ "Print the shape of X_testset and y_testset. Ensure that the dimensions match.\n", "_____no_output_____" ] ], [ [ "# your code\nX_testset.shape\n\n", "_____no_output_____" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nprint('Shape of X training set {}'.format(X_testset.shape),'&',' Size of Y training set {}'.format(y_testset.shape))\n\n```\n\n</details>\n", "_____no_output_____" ] ], [ [ "y_testset.shape", "_____no_output_____" ] ], [ [ "<hr>\n\n<div id=\"modeling\">\n <h2>Modeling</h2>\n We will first create an instance of the <b>DecisionTreeClassifier</b> called <b>drugTree</b>.<br>\n Inside of the classifier, specify <i> criterion=\"entropy\" </i> so we can see the information gain of each node.\n</div>\n", "_____no_output_____" ] ], [ [ "drugTree = DecisionTreeClassifier(criterion=\"entropy\", max_depth = 4)\ndrugTree # it shows the default parameters", "_____no_output_____" ] ], [ [ "Next, we will fit the data with the training feature matrix <b> X_trainset </b> and training response vector <b> y_trainset </b>\n", "_____no_output_____" ] ], [ [ "drugTree.fit(X_trainset,y_trainset)", "_____no_output_____" ] ], [ [ "<hr>\n\n<div id=\"prediction\">\n <h2>Prediction</h2>\n Let's make some <b>predictions</b> on the testing dataset and store it into a variable called <b>predTree</b>.\n</div>\n", "_____no_output_____" ] ], [ [ "predTree = drugTree.predict(X_testset)", "_____no_output_____" ] ], [ [ "You can print out <b>predTree</b> and <b>y_testset</b> if you want to visually compare the predictions to the actual values.\n", "_____no_output_____" ] ], [ [ "print (predTree [0:5])\nprint (y_testset [0:5])\n", "['drugY' 'drugX' 'drugX' 'drugX' 'drugX']\n40 drugY\n51 drugX\n139 drugX\n197 drugX\n170 drugX\nName: Drug, dtype: object\n" ] ], [ [ "<hr>\n\n<div id=\"evaluation\">\n <h2>Evaluation</h2>\n Next, let's import <b>metrics</b> from sklearn and check the accuracy of our model.\n</div>\n", "_____no_output_____" ] ], [ [ "from sklearn import metrics\nimport matplotlib.pyplot as plt\nprint(\"DecisionTrees's Accuracy: \", metrics.accuracy_score(y_testset, predTree))", "DecisionTrees's Accuracy: 0.9833333333333333\n" ] ], [ [ "**Accuracy classification score** computes subset accuracy: the set of labels predicted for a sample must exactly match the corresponding set of labels in y_true.\n\nIn multilabel classification, the function returns the subset accuracy. If the entire set of predicted labels for a sample strictly match with the true set of labels, then the subset accuracy is 1.0; otherwise it is 0.0.\n", "_____no_output_____" ], [ "\n", "_____no_output_____" ] ], [ [ "\n", "_____no_output_____" ] ], [ [ "<h2>Want to learn more?</h2>\n\nIBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href=\"https://www.ibm.com/analytics/spss-statistics-software?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkML0101ENSkillsNetwork20718538-2021-01-01\">SPSS Modeler</a>\n\nAlso, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href=\"https://www.ibm.com/cloud/watson-studio?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkML0101ENSkillsNetwork20718538-2021-01-01\">Watson Studio</a>\n", "_____no_output_____" ], [ "### Thank you for completing this lab!\n\n## Author\n\nSaeed Aghabozorgi\n\n### Other Contributors\n\n<a href=\"https://www.linkedin.com/in/joseph-s-50398b136/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkML0101ENSkillsNetwork20718538-2021-01-01\" target=\"_blank\">Joseph Santarcangelo</a>\n\n## Change Log\n\n| Date (YYYY-MM-DD) | Version | Changed By | Change Description |\n|---|---|---|---|\n| 2020-11-20 | 2.2 | Lakshmi | Changed import statement of StringIO|\n| 2020-11-03 | 2.1 | Lakshmi | Changed URL of the csv |\n| 2020-08-27 | 2.0 | Lavanya | Moved lab to course repo in GitLab |\n| | | | |\n| | | | |\n\n## <h3 align=\"center\"> © IBM Corporation 2020. All rights reserved. <h3/>\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d0a8cfd65bf989e1b4c76f932749d730d6a0066d
211,059
ipynb
Jupyter Notebook
fixtures/aerl/notebook/results-py3.ipynb
bbengfort/honu
3bfbf6a1959ec60a1e120a287f84996fa333757c
[ "MIT" ]
2
2018-05-28T18:27:35.000Z
2020-10-07T10:12:38.000Z
fixtures/aerl/notebook/results-py3.ipynb
bbengfort/honu
3bfbf6a1959ec60a1e120a287f84996fa333757c
[ "MIT" ]
3
2021-10-05T00:37:26.000Z
2021-10-05T01:53:00.000Z
fixtures/aerl/notebook/results-py3.ipynb
bbengfort/honu
3bfbf6a1959ec60a1e120a287f84996fa333757c
[ "MIT" ]
null
null
null
165.666405
158,267
0.844953
[ [ [ "# Results Analysis \n\nThis notebook analyzes results produced by the _anti-entropy reinforcement learning_ experiments. The practical purpose of this notebook is to create graphs that can be used to display anti-entropy topologies, but also to extract information relevant to each experimental run. ", "_____no_output_____" ] ], [ [ "%matplotlib notebook\n\nimport os \nimport re \nimport glob \nimport json\nimport unicodedata\n\nimport numpy as np \nimport pandas as pd \nimport seaborn as sns\nimport networkx as nx\nimport matplotlib as mpl \nimport graph_tool.all as gt\nimport matplotlib.pyplot as plt \n\nfrom nx2gt import nx2gt\nfrom datetime import timedelta \nfrom collections import defaultdict", "_____no_output_____" ] ], [ [ "## Data Loading \n\nThe data directory contains directories whose names are the hosts along with configuration files for each run. Each run is stored in its own `metrics.json` file, suffixed by the run number. The data loader yields _all_ rows from _all_ metric files and appends them with the correct configuration data. ", "_____no_output_____" ] ], [ [ "DATA = \"../data\"\nFIGS = \"../figures\"\nGRAPHS = \"../graphs\"\nHOSTS = \"hosts.json\"\nRESULTS = \"metrics-*.json\"\nCONFIGS = \"config-*.json\"\nNULLDATE = \"0001-01-01T00:00:00Z\"\nDURATION = re.compile(\"^([\\d\\.]+)(\\w+)$\") \n\ndef suffix(path):\n # Get the run id from the path \n name, _ = os.path.splitext(path)\n return int(name.split(\"-\")[-1]) \n\n\ndef parse_duration(d):\n match = DURATION.match(d)\n if match is None:\n raise TypeError(\"could not parse duration '{}'\".format(d))\n amount, units = match.groups() \n amount = float(amount)\n\n unitkw = {\n \"µs\": \"microseconds\", \n \"ms\": \"milliseconds\", \n \"s\": \"seconds\", \n }[units]\n \n return timedelta(**{unitkw:amount}).total_seconds()\n\n\ndef load_hosts(path=DATA):\n with open(os.path.join(path, HOSTS), 'r') as f:\n return json.load(f)\n\n\ndef load_configs(path=DATA):\n configs = {}\n for name in glob.glob(os.path.join(path, CONFIGS)):\n with open(name, 'r') as f:\n configs[suffix(name)] = json.load(f)\n return configs \n\n\ndef slugify(name):\n slug = unicodedata.normalize('NFKD', name)\n slug = str(slug.encode('ascii', 'ignore')).lower()\n slug = re.sub(r'[^a-z0-9]+', '-', slug).strip('-')\n slug = re.sub(r'[-]+', '-', slug)\n return slug \n\n\ndef load_results(path=DATA):\n hosts = load_hosts(path)\n configs = load_configs(path) \n for host in os.listdir(path):\n for name in glob.glob(os.path.join(path, host, \"metrics-*.json\")):\n run = suffix(name) \n with open(name, 'r', encoding='utf-8') as f:\n for line in f:\n row = json.loads(line.strip())\n row['name'] = host \n row['host'] = hosts[host][\"hostname\"] + \":3264\"\n row['runid'] = run \n row['config'] = configs[run]\n yield row\n\n\ndef merge_results(path, data=DATA):\n # Merge all of the results into a single unified file \n with open(path, 'w') as f:\n for row in load_results(data):\n f.write(json.dumps(row))\n f.write(\"\\n\")", "_____no_output_____" ] ], [ [ "## Graph Extraction \n\nThis section extracts a NeworkX graph for each of the experimental runs such that each graph defines an anti-entropy topology.", "_____no_output_____" ] ], [ [ "def extract_graphs(path=DATA, outdir=None):\n graphs = defaultdict(nx.DiGraph)\n for row in load_results(path):\n \n # Get the graph for the topology \n G = graphs[row[\"runid\"]]\n \n # Update the graph information \n name = row[\"bandit\"][\"strategy\"].title()\n epsilon = row[\"config\"][\"replicas\"].get(\"epsilon\", None)\n if epsilon:\n name += \" ε={}\".format(epsilon)\n \n G.graph.update({\n \"name\": name + \" (E{})\".format(row[\"runid\"]), \n \"experiment\": row[\"runid\"], \n \"uptime\": row[\"config\"][\"replicas\"][\"uptime\"], \n \"bandit\": row[\"config\"][\"replicas\"][\"bandit\"], \n \"epsilon\": epsilon or \"\", \n \"anti_entropy_interval\": row[\"config\"][\"replicas\"][\"delay\"], \n \"workload_duration\": row[\"config\"][\"clients\"][\"config\"][\"duration\"], \n \"n_clients\": len(row[\"config\"][\"clients\"][\"hosts\"]),\n# \"workload\": row[\"config\"][\"clients\"][\"hosts\"], \n \"store\": row[\"store\"], \n })\n \n # Update the vertex information\n vnames = row[\"name\"].split(\"-\")\n vertex = {\n \"duration\": row[\"duration\"], \n \"finished\": row[\"finished\"] if row[\"finished\"] != NULLDATE else \"\", \n \"started\": row[\"started\"] if row[\"started\"] != NULLDATE else \"\",\n \"keys_stored\": row[\"nkeys\"], \n \"reads\": row[\"reads\"], \n \"writes\": row[\"writes\"], \n \"throughput\": row[\"throughput\"], \n \"location\": \" \".join(vnames[1:-1]).title(), \n \"pid\": int(vnames[-1]), \n \"name\": row[\"name\"]\n }\n source_id = row[\"host\"]\n source = G.add_node(source_id, **vertex)\n \n # Get bandit edge information \n bandit_counts = dict(zip(row[\"peers\"], row[\"bandit\"][\"counts\"]))\n bandit_values = dict(zip(row[\"peers\"], row[\"bandit\"][\"values\"]))\n \n # Add the edges from the sync table \n for target_id, stats in row[\"syncs\"].items():\n edge = {\n \"count\": bandit_counts[target_id], \n \"reward\": bandit_values[target_id], \n \"misses\": stats[\"Misses\"],\n \"pulls\": stats[\"Pulls\"], \n \"pushes\": stats[\"Pushes\"], \n \"syncs\": stats[\"Syncs\"],\n \"versions\": stats[\"Versions\"], \n \"mean_pull_latency\": parse_duration(stats[\"PullLatency\"][\"mean\"]),\n \"mean_push_latency\": parse_duration(stats[\"PushLatency\"][\"mean\"]),\n }\n G.add_edge(source_id, target_id, **edge)\n \n # Write Graphs\n if outdir:\n for G in graphs.values():\n opath = os.path.join(outdir, slugify(G.name)+\".graphml.gz\")\n nx.write_graphml(G, opath)\n \n return graphs \n\n \n# for G in extract_graphs(outdir=GRAPHS).values():\nfor G in extract_graphs().values():\n print(nx.info(G))\n print()", "Name: Uniform Selection (E1)\nType: DiGraph\nNumber of nodes: 24\nNumber of edges: 552\nAverage in degree: 23.0000\nAverage out degree: 23.0000\n\nName: Epsilon Greedy ε=0.5 (E4)\nType: DiGraph\nNumber of nodes: 24\nNumber of edges: 552\nAverage in degree: 23.0000\nAverage out degree: 23.0000\n\nName: Annealing Epsilon Greedy (E5)\nType: DiGraph\nNumber of nodes: 24\nNumber of edges: 552\nAverage in degree: 23.0000\nAverage out degree: 23.0000\n\nName: Epsilon Greedy ε=0.1 (E2)\nType: DiGraph\nNumber of nodes: 24\nNumber of edges: 552\nAverage in degree: 23.0000\nAverage out degree: 23.0000\n\nName: Epsilon Greedy ε=0.2 (E3)\nType: DiGraph\nNumber of nodes: 24\nNumber of edges: 552\nAverage in degree: 23.0000\nAverage out degree: 23.0000\n\n" ], [ "LOCATION_COLORS = {\n \"Virginia\": \"#D91E18\", \n \"Ohio\": \"#E26A6A\", \n \"California\": \"#8E44AD\", \n \"Sao Paulo\": \"#6BB9F0\", \n \"London\": \"#2ECC71\", \n \"Frankfurt\": \"#6C7A89\", \n \"Seoul\": \"#F9690E\", \n \"Sydney\": \"#F7CA18\", \n}\nLOCATION_GROUPS = sorted(list(LOCATION_COLORS.keys()))\nLOCATION_CODES = {\n \"Virginia\": \"VA\", \n \"Ohio\": \"OH\", \n \"California\": \"CA\", \n \"Sao Paulo\": \"BR\", \n \"London\": \"GB\", \n \"Frankfurt\": \"DE\", \n \"Seoul\": \"KR\", \n \"Sydney\": \"AU\", \n}\n\ndef filter_edges(h, pulls=0, pushes=0):\n # Create a view of the graph with only edges with syncs > 0 \n efilt = h.new_edge_property('bool')\n for edge in h.edges():\n efilt[edge] = (h.ep['pulls'][edge] > pulls or h.ep['pushes'][edge] > pushes)\n return gt.GraphView(h, efilt=efilt)\n\n\ndef mklabel(name, loc):\n code = LOCATION_CODES[loc]\n parts = name.split(\"-\")\n return \"{}{}\".format(code, parts[-1])\n\n\ndef visualize_graph(G, layout='sfdp', filter=True, save=True):\n print(G.name)\n output = None\n if save:\n output = os.path.join(FIGS, slugify(G.name) + \".pdf\")\n \n # Convert the nx Graph to a gt Graph \n g = nx2gt(G) \n if filter:\n g = filter_edges(g)\n \n # Vertex Properties \n vgroup = g.new_vertex_property('int32_t')\n vcolor = g.new_vertex_property('string')\n vlabel = g.new_vertex_property('string')\n for vertex in g.vertices():\n vcolor[vertex] = LOCATION_COLORS[g.vp['location'][vertex]]\n vgroup[vertex] = LOCATION_GROUPS.index(g.vp['location'][vertex])\n vlabel[vertex] = mklabel(g.vp['name'][vertex], g.vp['location'][vertex])\n vsize = gt.prop_to_size(g.vp['writes'], ma=65, mi=35)\n \n # Edge Properties \n esize = gt.prop_to_size(g.ep['versions'], mi=.01, ma=6)\n ecolor = gt.prop_to_size(g.ep['mean_pull_latency'], mi=1, ma=5, log=True)\n \n # Compute the layout and draw \n if layout == 'fruchterman_reingold':\n pos = gt.fruchterman_reingold_layout(g, weight=esize, circular=True, grid=False)\n elif layout == 'sfdp':\n pos = gt.sfdp_layout(g, eweight=esize, groups=vgroup)\n else:\n raise ValueError(\"unknown layout '{}\".format(layout))\n\n gt.graph_draw(\n g, pos=pos, output_size=(1200,1200), output=output, inline=True,\n vertex_size=vsize, vertex_fill_color=vcolor, vertex_text=vlabel, \n vertex_halo=False, vertex_pen_width=1.2,\n edge_pen_width=esize,\n )\n \n \nvisualize_graph(extract_graphs()[5])", "Annealing Epsilon Greedy (E5)\n" ] ], [ [ "## Rewards DataFrame \n\nThis section extracts a timeseries of rewards on a per-replica basis. ", "_____no_output_____" ] ], [ [ "def extract_rewards(path=DATA):\n for row in load_results(path):\n bandit = row[\"bandit\"]\n history = bandit[\"history\"]\n strategy = bandit[\"strategy\"]\n epsilon = row[\"config\"][\"replicas\"].get(\"epsilon\")\n if epsilon:\n strategy += \" ε={}\".format(epsilon)\n values = np.array(list(map(float, history[\"rewards\"])))\n series = pd.Series(values, name=row[\"name\"] + \" \" + strategy)\n yield series, row['runid']", "_____no_output_____" ], [ "total_rewards = {} \nfor series, rowid in extract_rewards():\n if rowid not in total_rewards:\n total_rewards[rowid] = series \n else:\n total_rewards[rowid] += series ", "_____no_output_____" ], [ "cumulative_rewards = {\n rowid: s.cumsum()\n for rowid, s in total_rewards.items()\n}", "_____no_output_____" ], [ "from pandas.plotting import autocorrelation_plot\ndf = pd.DataFrame({\n \" \".join(s.name.split(\" \")[1:]): s \n for s in total_rewards.values()\n}).iloc[15:361]\n\ndf.reset_index(inplace=True, drop=True)\n\nfig,ax = plt.subplots(figsize=(9,6))\n\ndf.rolling(window=15,center=False).mean().plot(ax=ax)\n\nax.set_ylabel(\"Rolling Mean of Total System Reward (w=15)\")\nax.set_xlabel(\"Timesteps (Anti-Entropy Sessions)\")\nax.grid(True, ls='--')\nax.set_xlim(12, 346)\n\nplt.savefig(os.path.join(FIGS, \"rewards.pdf\"))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d0a8e6f046abbc4f71118aa522a42d426838b45f
427,085
ipynb
Jupyter Notebook
notebooks/dataset-projections/64/fmnist/fmnist-64-ae-only.ipynb
timsainb/ParametricUMAP_paper
00b4d676647e45619552aec8f2663c0903a83e3f
[ "MIT" ]
124
2020-09-27T23:59:01.000Z
2022-03-22T06:27:35.000Z
notebooks/dataset-projections/64/fmnist/fmnist-64-ae-only.ipynb
kiminh/ParametricUMAP_paper
00b4d676647e45619552aec8f2663c0903a83e3f
[ "MIT" ]
2
2021-02-05T18:13:13.000Z
2021-11-01T14:55:08.000Z
notebooks/dataset-projections/64/fmnist/fmnist-64-ae-only.ipynb
kiminh/ParametricUMAP_paper
00b4d676647e45619552aec8f2663c0903a83e3f
[ "MIT" ]
16
2020-09-28T07:43:21.000Z
2022-03-21T00:31:34.000Z
175.178425
190,160
0.87333
[ [ [ "# reload packages\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ] ], [ [ "### Choose GPU (this may not be needed on your computer)", "_____no_output_____" ] ], [ [ "%env CUDA_DEVICE_ORDER=PCI_BUS_ID\n%env CUDA_VISIBLE_DEVICES=1", "env: CUDA_DEVICE_ORDER=PCI_BUS_ID\nenv: CUDA_VISIBLE_DEVICES=1\n" ], [ "import tensorflow as tf\ngpu_devices = tf.config.experimental.list_physical_devices('GPU')\nif len(gpu_devices)>0:\n tf.config.experimental.set_memory_growth(gpu_devices[0], True)\nprint(gpu_devices)\ntf.keras.backend.clear_session()", "[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]\n" ] ], [ [ "### load packages", "_____no_output_____" ] ], [ [ "from tfumap.umap import tfUMAP", "/mnt/cube/tsainbur/conda_envs/tpy3/lib/python3.6/site-packages/tqdm/autonotebook/__init__.py:14: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n \" (e.g. in jupyter console)\", TqdmExperimentalWarning)\n" ], [ "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm.autonotebook import tqdm\nimport umap\nimport pandas as pd", "_____no_output_____" ] ], [ [ "### Load dataset", "_____no_output_____" ] ], [ [ "dataset = 'fmnist'", "_____no_output_____" ], [ "from tensorflow.keras.datasets import fashion_mnist", "_____no_output_____" ], [ "# load dataset\n(train_images, Y_train), (test_images, Y_test) = fashion_mnist.load_data()\nX_train = (train_images/255.).astype('float32')\nX_test = (test_images/255.).astype('float32')\nX_train = X_train.reshape((len(X_train), np.product(np.shape(X_train)[1:])))\nX_test = X_test.reshape((len(X_test), np.product(np.shape(X_test)[1:])))\n\n# subset a validation set\nn_valid = 10000\nX_valid = X_train[-n_valid:]\nY_valid = Y_train[-n_valid:]\nX_train = X_train[:-n_valid]\nY_train = Y_train[:-n_valid]\n\n# flatten X\nX_train_flat = X_train.reshape((len(X_train), np.product(np.shape(X_train)[1:])))\nX_test_flat = X_test.reshape((len(X_test), np.product(np.shape(X_test)[1:])))\nX_valid_flat= X_valid.reshape((len(X_valid), np.product(np.shape(X_valid)[1:])))\nprint(len(X_train), len(X_valid), len(X_test))", "50000 10000 10000\n" ] ], [ [ "### define networks", "_____no_output_____" ] ], [ [ "dims = (28,28,1)\nn_components = 64", "_____no_output_____" ], [ "encoder = tf.keras.Sequential([\n tf.keras.layers.InputLayer(input_shape=dims),\n tf.keras.layers.Conv2D(\n filters=64, kernel_size=3, strides=(2, 2), activation=\"relu\"\n ),\n tf.keras.layers.Conv2D(\n filters=128, kernel_size=3, strides=(2, 2), activation=\"relu\"\n ),\n\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(units=512, activation=\"relu\"),\n tf.keras.layers.Dense(units=n_components),\n])", "_____no_output_____" ], [ "decoder = tf.keras.Sequential([\n tf.keras.layers.InputLayer(input_shape=(n_components)),\n tf.keras.layers.Dense(units=512, activation=\"relu\"),\n tf.keras.layers.Dense(units=7 * 7 * 256, activation=\"relu\"),\n tf.keras.layers.Reshape(target_shape=(7, 7, 256)),\n tf.keras.layers.Conv2DTranspose(\n filters=128, kernel_size=3, strides=(2, 2), padding=\"SAME\", activation=\"relu\"\n ),\n tf.keras.layers.Conv2DTranspose(\n filters=64, kernel_size=3, strides=(2, 2), padding=\"SAME\", activation=\"relu\"\n ),\n tf.keras.layers.Conv2DTranspose(\n filters=1, kernel_size=3, strides=(1, 1), padding=\"SAME\", activation=\"sigmoid\"\n )\n])", "_____no_output_____" ], [ "input_img = tf.keras.Input(dims)\noutput_img = decoder(encoder(input_img))\nautoencoder = tf.keras.Model(input_img, output_img)", "_____no_output_____" ], [ "autoencoder.compile(optimizer='adam', loss='binary_crossentropy')", "_____no_output_____" ], [ "X_train = X_train.reshape([len(X_train)] + list(dims))\nhistory = autoencoder.fit(X_train, X_train,\n epochs=50,\n batch_size=256,\n shuffle=True,\n #validation_data=(X_valid, X_valid)\n )", "Epoch 1/50\n196/196 [==============================] - 12s 61ms/step - loss: 0.3511\nEpoch 2/50\n196/196 [==============================] - 11s 55ms/step - loss: 0.2881\nEpoch 3/50\n196/196 [==============================] - 11s 54ms/step - loss: 0.2777\nEpoch 4/50\n196/196 [==============================] - 11s 54ms/step - loss: 0.2725\nEpoch 5/50\n196/196 [==============================] - 9s 46ms/step - loss: 0.2693\nEpoch 6/50\n196/196 [==============================] - 9s 45ms/step - loss: 0.2670\nEpoch 7/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2654\nEpoch 8/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2640\nEpoch 9/50\n196/196 [==============================] - 9s 45ms/step - loss: 0.2629\nEpoch 10/50\n196/196 [==============================] - 9s 45ms/step - loss: 0.2621\nEpoch 11/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2613\nEpoch 12/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2606\nEpoch 13/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2599\nEpoch 14/50\n196/196 [==============================] - 9s 45ms/step - loss: 0.2594\nEpoch 15/50\n196/196 [==============================] - 9s 45ms/step - loss: 0.2589\nEpoch 16/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2584\nEpoch 17/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2580\nEpoch 18/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2576\nEpoch 19/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2572\nEpoch 20/50\n196/196 [==============================] - 9s 45ms/step - loss: 0.2570\nEpoch 21/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2567\nEpoch 22/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2563\nEpoch 23/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2560\nEpoch 24/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2557\nEpoch 25/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2557\nEpoch 26/50\n196/196 [==============================] - 9s 45ms/step - loss: 0.2552\nEpoch 27/50\n196/196 [==============================] - 9s 45ms/step - loss: 0.2551\nEpoch 28/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2548\nEpoch 29/50\n196/196 [==============================] - 9s 46ms/step - loss: 0.2546\nEpoch 30/50\n196/196 [==============================] - 9s 45ms/step - loss: 0.2545\nEpoch 31/50\n196/196 [==============================] - 9s 45ms/step - loss: 0.2543\nEpoch 32/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2539\nEpoch 33/50\n196/196 [==============================] - 9s 45ms/step - loss: 0.2539\nEpoch 34/50\n196/196 [==============================] - 9s 45ms/step - loss: 0.2536\nEpoch 35/50\n196/196 [==============================] - 9s 46ms/step - loss: 0.2536\nEpoch 36/50\n196/196 [==============================] - 9s 44ms/step - loss: 0.2533\nEpoch 37/50\n196/196 [==============================] - 9s 48ms/step - loss: 0.2532\nEpoch 38/50\n196/196 [==============================] - 9s 45ms/step - loss: 0.2530\nEpoch 39/50\n196/196 [==============================] - 9s 45ms/step - loss: 0.2529\nEpoch 40/50\n196/196 [==============================] - 9s 47ms/step - loss: 0.2528\nEpoch 41/50\n196/196 [==============================] - 9s 48ms/step - loss: 0.2527\nEpoch 42/50\n196/196 [==============================] - 9s 46ms/step - loss: 0.2526\nEpoch 43/50\n196/196 [==============================] - 9s 45ms/step - loss: 0.2524\nEpoch 44/50\n196/196 [==============================] - 9s 46ms/step - loss: 0.2523\nEpoch 45/50\n196/196 [==============================] - 8s 40ms/step - loss: 0.2522\nEpoch 46/50\n196/196 [==============================] - 6s 30ms/step - loss: 0.2520\nEpoch 47/50\n196/196 [==============================] - 4s 21ms/step - loss: 0.2519\nEpoch 48/50\n196/196 [==============================] - 4s 18ms/step - loss: 0.2519\nEpoch 49/50\n196/196 [==============================] - 4s 19ms/step - loss: 0.2517\nEpoch 50/50\n196/196 [==============================] - 5s 23ms/step - loss: 0.2517\n" ], [ "z = encoder.predict(X_train)", "_____no_output_____" ] ], [ [ "### Plot model output", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots( figsize=(8, 8))\nsc = ax.scatter(\n z[:, 0],\n z[:, 1],\n c=Y_train.astype(int)[:len(z)],\n cmap=\"tab10\",\n s=0.1,\n alpha=0.5,\n rasterized=True,\n)\nax.axis('equal')\nax.set_title(\"UMAP in Tensorflow embedding\", fontsize=20)\nplt.colorbar(sc, ax=ax);", "_____no_output_____" ] ], [ [ "### View loss", "_____no_output_____" ] ], [ [ "from tfumap.umap import retrieve_tensors\nimport seaborn as sns", "_____no_output_____" ] ], [ [ "### Save output", "_____no_output_____" ] ], [ [ "from tfumap.paths import ensure_dir, MODEL_DIR", "_____no_output_____" ], [ "output_dir = MODEL_DIR/'projections'/ dataset / '64' /'ae_only'\nensure_dir(output_dir)", "_____no_output_____" ], [ "encoder.save(output_dir / 'encoder')", "WARNING: Logging before flag parsing goes to stderr.\nW0821 22:13:22.498517 139878705719104 deprecation.py:323] From /mnt/cube/tsainbur/conda_envs/tpy3/lib/python3.6/site-packages/tensorflow/python/training/tracking/tracking.py:111: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.\nInstructions for updating:\nThis property should not be used in TensorFlow 2.0, as updates are applied automatically.\nW0821 22:13:22.564436 139878705719104 deprecation.py:323] From /mnt/cube/tsainbur/conda_envs/tpy3/lib/python3.6/site-packages/tensorflow/python/training/tracking/tracking.py:111: Layer.updates (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.\nInstructions for updating:\nThis property should not be used in TensorFlow 2.0, as updates are applied automatically.\nI0821 22:13:23.913495 139878705719104 builder_impl.py:775] Assets written to: /mnt/cube/tsainbur/Projects/github_repos/umap_tf_networks/models/projections/fmnist/64/ae_only/encoder/assets\n" ], [ "decoder.save(output_dir / 'encoder')", "I0821 22:13:27.026618 139878705719104 builder_impl.py:775] Assets written to: /mnt/cube/tsainbur/Projects/github_repos/umap_tf_networks/models/projections/fmnist/64/ae_only/encoder/assets\n" ], [ "#loss_df.to_pickle(output_dir / 'loss_df.pickle')", "_____no_output_____" ], [ "np.save(output_dir / 'z.npy', z)", "_____no_output_____" ] ], [ [ "### compute metrics ", "_____no_output_____" ] ], [ [ "X_test.shape", "_____no_output_____" ], [ "z_test = encoder.predict(X_test.reshape((len(X_test), 28,28,1)))", "_____no_output_____" ] ], [ [ "#### silhouette", "_____no_output_____" ] ], [ [ "from tfumap.silhouette import silhouette_score_block", "_____no_output_____" ], [ "ss, sil_samp = silhouette_score_block(z, Y_train, n_jobs = -1)\nss", "_____no_output_____" ], [ "ss_test, sil_samp_test = silhouette_score_block(z_test, Y_test, n_jobs = -1)\nss_test", "_____no_output_____" ], [ "fig, axs = plt.subplots(ncols = 2, figsize=(10, 5))\naxs[0].scatter(z[:, 0], z[:, 1], s=0.1, alpha=0.5, c=sil_samp, cmap=plt.cm.viridis)\naxs[1].scatter(z_test[:, 0], z_test[:, 1], s=1, alpha=0.5, c=sil_samp_test, cmap=plt.cm.viridis)", "_____no_output_____" ] ], [ [ "#### KNN", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsClassifier", "_____no_output_____" ], [ "neigh5 = KNeighborsClassifier(n_neighbors=5)\nneigh5.fit(z, Y_train)\nscore_5nn = neigh5.score(z_test, Y_test)\nscore_5nn", "_____no_output_____" ], [ "neigh1 = KNeighborsClassifier(n_neighbors=1)\nneigh1.fit(z, Y_train)\nscore_1nn = neigh1.score(z_test, Y_test)\nscore_1nn", "_____no_output_____" ] ], [ [ "#### Trustworthiness", "_____no_output_____" ] ], [ [ "from sklearn.manifold import trustworthiness", "_____no_output_____" ], [ "tw = trustworthiness(X_train_flat[:10000], z[:10000])", "_____no_output_____" ], [ "tw_test = trustworthiness(X_test_flat[:10000], z_test[:10000])", "_____no_output_____" ], [ "tw, tw_test", "_____no_output_____" ] ], [ [ "### Save output metrics", "_____no_output_____" ] ], [ [ "from tfumap.paths import ensure_dir, MODEL_DIR, DATA_DIR", "_____no_output_____" ] ], [ [ "#### train", "_____no_output_____" ] ], [ [ "metrics_df = pd.DataFrame(\n columns=[\n \"dataset\",\n \"class_\",\n \"dim\",\n \"trustworthiness\",\n \"silhouette_score\",\n \"silhouette_samples\",\n ]\n)\nmetrics_df.loc[len(metrics_df)] = [dataset, 'ae_only', n_components, tw, ss, sil_samp]\nmetrics_df", "_____no_output_____" ], [ "save_loc = DATA_DIR / 'projection_metrics' / 'ae_only' / 'train' / str(n_components) / (dataset + '.pickle')\nensure_dir(save_loc)\nmetrics_df.to_pickle(save_loc)", "_____no_output_____" ] ], [ [ "#### test", "_____no_output_____" ] ], [ [ "metrics_df_test = pd.DataFrame(\n columns=[\n \"dataset\",\n \"class_\",\n \"dim\",\n \"trustworthiness\",\n \"silhouette_score\",\n \"silhouette_samples\",\n ]\n)\nmetrics_df_test.loc[len(metrics_df)] = [dataset, 'ae_only', n_components, tw_test, ss_test, sil_samp_test]\nmetrics_df_test", "_____no_output_____" ], [ "save_loc = DATA_DIR / 'projection_metrics' / 'ae' / 'test' / str(n_components) / (dataset + '.pickle')\nensure_dir(save_loc)\nmetrics_df.to_pickle(save_loc)", "_____no_output_____" ] ], [ [ "#### knn ", "_____no_output_____" ] ], [ [ "nn_acc_df = pd.DataFrame(columns = [\"method_\",\"dimensions\",\"dataset\",\"1NN_acc\",\"5NN_acc\"])\nnn_acc_df.loc[len(nn_acc_df)] = ['ae_only', n_components, dataset, score_1nn, score_5nn]\nnn_acc_df", "_____no_output_____" ], [ "save_loc = DATA_DIR / 'knn_classifier' / 'ae_only' / 'train' / str(n_components) / (dataset + '.pickle')\nensure_dir(save_loc)\nnn_acc_df.to_pickle(save_loc)", "_____no_output_____" ] ], [ [ "### Reconstruction", "_____no_output_____" ] ], [ [ "from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, r2_score", "_____no_output_____" ], [ "X_recon = decoder.predict(encoder.predict(X_test.reshape((len(X_test), 28, 28, 1))))\nX_real = X_test.reshape((len(X_test), 28, 28, 1))", "_____no_output_____" ], [ "x_real = X_test.reshape((len(X_test), np.product(np.shape(X_test)[1:])))\nx_recon = X_recon.reshape((len(X_test), np.product(np.shape(X_test)[1:])))", "_____no_output_____" ], [ "reconstruction_acc_df = pd.DataFrame(\n columns=[\"method_\", \"dimensions\", \"dataset\", \"MSE\", \"MAE\", \"MedAE\", \"R2\"]\n)\n\nMSE = mean_squared_error(\n x_real, \n x_recon\n)\nMAE = mean_absolute_error(\n x_real, \n x_recon\n)\nMedAE = median_absolute_error(\n x_real, \n x_recon\n)\nR2 = r2_score(\n x_real, \n x_recon\n)\n\nreconstruction_acc_df.loc[len(reconstruction_acc_df)] = ['ae_only', n_components, dataset, MSE, MAE, MedAE, R2]\nreconstruction_acc_df", "_____no_output_____" ], [ "save_loc = DATA_DIR / 'reconstruction_acc' / 'ae_only' / str(n_components) / (dataset + '.pickle')\nensure_dir(save_loc)\nreconstruction_acc_df.to_pickle(save_loc)", "_____no_output_____" ] ], [ [ "### Compute clustering quality", "_____no_output_____" ] ], [ [ "from sklearn.cluster import KMeans\nfrom sklearn.metrics import homogeneity_completeness_v_measure", "_____no_output_____" ], [ "def get_cluster_metrics(row, n_init=5):\n \n # load cluster information\n save_loc = DATA_DIR / 'clustering_metric_df'/ ('_'.join([row.class_, str(row.dim), row.dataset]) + '.pickle')\n print(save_loc)\n if save_loc.exists() and save_loc.is_file():\n \n cluster_df = pd.read_pickle(save_loc)\n return cluster_df\n \n # make cluster metric dataframe\n cluster_df = pd.DataFrame(\n columns=[\n \"dataset\",\n \"class_\",\n \"dim\",\n \"silhouette\",\n \"homogeneity\",\n \"completeness\",\n \"v_measure\",\n \"init_\",\n \"n_clusters\",\n \"model\",\n ]\n )\n y = row.train_label\n z = row.train_z\n n_labels = len(np.unique(y))\n for n_clusters in tqdm(np.arange(n_labels - int(n_labels / 2), n_labels + int(n_labels / 2)), leave=False, desc = 'n_clusters'):\n for init_ in tqdm(range(n_init), leave=False, desc='init'):\n kmeans = KMeans(n_clusters=n_clusters, random_state=init_).fit(z)\n clustered_y = kmeans.labels_\n homogeneity, completeness, v_measure = homogeneity_completeness_v_measure(\n y, clustered_y\n )\n ss, _ = silhouette_score_block(z, clustered_y)\n cluster_df.loc[len(cluster_df)] = [\n row.dataset,\n row.class_,\n row.dim,\n ss,\n homogeneity,\n completeness,\n v_measure,\n init_,\n n_clusters,\n kmeans,\n ]\n \n # save cluster df in case this fails somewhere\n ensure_dir(save_loc)\n cluster_df.to_pickle(save_loc)\n return cluster_df", "_____no_output_____" ], [ "projection_df = pd.DataFrame(columns = ['dataset', 'class_', 'train_z', 'train_label', 'dim'])\nprojection_df.loc[len(projection_df)] = [dataset, 'ae_only', z, Y_train, n_components]\nprojection_df", "_____no_output_____" ], [ "get_cluster_metrics(projection_df.iloc[0], n_init=5)", "/mnt/cube/tsainbur/Projects/github_repos/umap_tf_networks/data/clustering_metric_df/ae_only_64_fmnist.pickle\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d0a8e8af392d8ad432d4109263a7ab409e079b5c
74,191
ipynb
Jupyter Notebook
spacex falcon 9 first stage landing prediction/web scraping for spacex falcon 9 first stage landing prediction.ipynb
HasibAlMuzdadid/Data-Science-Projects
e32990cc69570e7cf6aaabd4f80efd9c11dd9441
[ "Apache-2.0" ]
1
2022-03-07T19:37:48.000Z
2022-03-07T19:37:48.000Z
spacex falcon 9 first stage landing prediction/web scraping for spacex falcon 9 first stage landing prediction.ipynb
HasibAlMuzdadid/Data-Science-Projects
e32990cc69570e7cf6aaabd4f80efd9c11dd9441
[ "Apache-2.0" ]
null
null
null
spacex falcon 9 first stage landing prediction/web scraping for spacex falcon 9 first stage landing prediction.ipynb
HasibAlMuzdadid/Data-Science-Projects
e32990cc69570e7cf6aaabd4f80efd9c11dd9441
[ "Apache-2.0" ]
null
null
null
29.939871
1,461
0.481999
[ [ [ "# **Space X Falcon 9 First Stage Landing Prediction**\n", "_____no_output_____" ], [ "## Web scraping Falcon 9 and Falcon Heavy Launches Records from Wikipedia\n", "_____no_output_____" ], [ "We will be performing web scraping to collect Falcon 9 historical launch records from a Wikipedia page titled `List of Falcon 9 and Falcon Heavy launches`\n\n[https://en.wikipedia.org/wiki/List_of_Falcon\\_9\\_and_Falcon_Heavy_launches](https://en.wikipedia.org/wiki/List_of_Falcon\\_9\\_and_Falcon_Heavy_launches?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01)\n", "_____no_output_____" ], [ "More specifically, the launch records are stored in a HTML table.\n", "_____no_output_____" ], [ "First let's import required packages for this lab\n", "_____no_output_____" ] ], [ [ "!pip3 install beautifulsoup4\n!pip3 install requests", "Collecting beautifulsoup4\n Downloading beautifulsoup4-4.10.0-py3-none-any.whl (97 kB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m97.4/97.4 KB\u001b[0m \u001b[31m16.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hCollecting soupsieve>1.2\n Downloading soupsieve-2.3.1-py3-none-any.whl (37 kB)\nInstalling collected packages: soupsieve, beautifulsoup4\nSuccessfully installed beautifulsoup4-4.10.0 soupsieve-2.3.1\nRequirement already satisfied: requests in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (2.27.1)\nRequirement already satisfied: certifi>=2017.4.17 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from requests) (2021.10.8)\nRequirement already satisfied: urllib3<1.27,>=1.21.1 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from requests) (1.26.8)\nRequirement already satisfied: idna<4,>=2.5 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from requests) (3.3)\nRequirement already satisfied: charset-normalizer~=2.0.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from requests) (2.0.12)\n" ], [ "import sys\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport unicodedata\nimport pandas as pd", "_____no_output_____" ] ], [ [ "Some helper functions for processing web scraped HTML table\n", "_____no_output_____" ] ], [ [ "def date_time(table_cells):\n \"\"\"\n This function returns the data and time from the HTML table cell\n Input: the element of a table data cell extracts extra row\n \"\"\"\n return [data_time.strip() for data_time in list(table_cells.strings)][0:2]\n\ndef booster_version(table_cells):\n \"\"\"\n This function returns the booster version from the HTML table cell \n Input: the element of a table data cell extracts extra row\n \"\"\"\n out=''.join([booster_version for i,booster_version in enumerate( table_cells.strings) if i%2==0][0:-1])\n return out\n\ndef landing_status(table_cells):\n \"\"\"\n This function returns the landing status from the HTML table cell \n Input: the element of a table data cell extracts extra row\n \"\"\"\n out=[i for i in table_cells.strings][0]\n return out\n\n\ndef get_mass(table_cells):\n mass=unicodedata.normalize(\"NFKD\", table_cells.text).strip()\n if mass:\n mass.find(\"kg\")\n new_mass=mass[0:mass.find(\"kg\")+2]\n else:\n new_mass=0\n return new_mass\n\n\ndef extract_column_from_header(row):\n \"\"\"\n This function returns the landing status from the HTML table cell \n Input: the element of a table data cell extracts extra row\n \"\"\"\n if (row.br):\n row.br.extract()\n if row.a:\n row.a.extract()\n if row.sup:\n row.sup.extract()\n \n colunm_name = ' '.join(row.contents)\n \n # Filter the digit and empty names\n if not(colunm_name.strip().isdigit()):\n colunm_name = colunm_name.strip()\n return colunm_name \n", "_____no_output_____" ] ], [ [ "To keep the lab tasks consistent, we will be asked to scrape the data from a snapshot of the `List of Falcon 9 and Falcon Heavy launches` Wikipage updated on\n`9th June 2021`\n", "_____no_output_____" ] ], [ [ "static_url = \"https://en.wikipedia.org/w/index.php?title=List_of_Falcon_9_and_Falcon_Heavy_launches&oldid=1027686922\"", "_____no_output_____" ] ], [ [ "Next, request the HTML page from the above URL and get a `response` object\n", "_____no_output_____" ], [ "### TASK 1: Request the Falcon9 Launch Wiki page from its URL\n", "_____no_output_____" ], [ "First, let's perform an HTTP GET method to request the Falcon9 Launch HTML page, as an HTTP response.\n", "_____no_output_____" ] ], [ [ "# use requests.get() method with the provided static_url\n# assign the response to a object\npage=requests.get(static_url)", "_____no_output_____" ] ], [ [ "Create a `BeautifulSoup` object from the HTML `response`\n", "_____no_output_____" ] ], [ [ "# Use BeautifulSoup() to create a BeautifulSoup object from a response text content\nsoup = BeautifulSoup(page.text, 'html.parser')", "_____no_output_____" ] ], [ [ "Print the page title to verify if the `BeautifulSoup` object was created properly\n", "_____no_output_____" ] ], [ [ "# Use soup.title attribute\nsoup.title", "_____no_output_____" ] ], [ [ "### TASK 2: Extract all column/variable names from the HTML table header\n", "_____no_output_____" ], [ "Next, we want to collect all relevant column names from the HTML table header\n", "_____no_output_____" ], [ "Let's try to find all tables on the wiki page first. ", "_____no_output_____" ] ], [ [ "# Use the find_all function in the BeautifulSoup object, with element type `table`\n# Assign the result to a list called `html_tables`\nhtml_tables=soup.find_all('table')", "_____no_output_____" ] ], [ [ "Starting from the third table is our target table contains the actual launch records.\n", "_____no_output_____" ] ], [ [ "# Let's print the third table and check its content\nfirst_launch_table = html_tables[2]\nprint(first_launch_table)", "<table class=\"wikitable plainrowheaders collapsible\" style=\"width: 100%;\">\n<tbody><tr>\n<th scope=\"col\">Flight No.\n</th>\n<th scope=\"col\">Date and<br/>time (<a href=\"/wiki/Coordinated_Universal_Time\" title=\"Coordinated Universal Time\">UTC</a>)\n</th>\n<th scope=\"col\"><a href=\"/wiki/List_of_Falcon_9_first-stage_boosters\" title=\"List of Falcon 9 first-stage boosters\">Version,<br/>Booster</a> <sup class=\"reference\" id=\"cite_ref-booster_11-0\"><a href=\"#cite_note-booster-11\">[b]</a></sup>\n</th>\n<th scope=\"col\">Launch site\n</th>\n<th scope=\"col\">Payload<sup class=\"reference\" id=\"cite_ref-Dragon_12-0\"><a href=\"#cite_note-Dragon-12\">[c]</a></sup>\n</th>\n<th scope=\"col\">Payload mass\n</th>\n<th scope=\"col\">Orbit\n</th>\n<th scope=\"col\">Customer\n</th>\n<th scope=\"col\">Launch<br/>outcome\n</th>\n<th scope=\"col\"><a href=\"/wiki/Falcon_9_first-stage_landing_tests\" title=\"Falcon 9 first-stage landing tests\">Booster<br/>landing</a>\n</th></tr>\n<tr>\n<th rowspan=\"2\" scope=\"row\" style=\"text-align:center;\">1\n</th>\n<td>4 June 2010,<br/>18:45\n</td>\n<td><a href=\"/wiki/Falcon_9_v1.0\" title=\"Falcon 9 v1.0\">F9 v1.0</a><sup class=\"reference\" id=\"cite_ref-MuskMay2012_13-0\"><a href=\"#cite_note-MuskMay2012-13\">[7]</a></sup><br/>B0003.1<sup class=\"reference\" id=\"cite_ref-block_numbers_14-0\"><a href=\"#cite_note-block_numbers-14\">[8]</a></sup>\n</td>\n<td><a href=\"/wiki/Cape_Canaveral_Space_Force_Station\" title=\"Cape Canaveral Space Force Station\">CCAFS</a>,<br/><a href=\"/wiki/Cape_Canaveral_Space_Launch_Complex_40\" title=\"Cape Canaveral Space Launch Complex 40\">SLC-40</a>\n</td>\n<td><a href=\"/wiki/Dragon_Spacecraft_Qualification_Unit\" title=\"Dragon Spacecraft Qualification Unit\">Dragon Spacecraft Qualification Unit</a>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Low_Earth_orbit\" title=\"Low Earth orbit\">LEO</a>\n</td>\n<td><a href=\"/wiki/SpaceX\" title=\"SpaceX\">SpaceX</a>\n</td>\n<td class=\"table-success\" style=\"background: #9EFF9E; vertical-align: middle; text-align: center;\">Success\n</td>\n<td class=\"table-failure\" style=\"background: #FFC7C7; vertical-align: middle; text-align: center;\">Failure<sup class=\"reference\" id=\"cite_ref-ns20110930_15-0\"><a href=\"#cite_note-ns20110930-15\">[9]</a></sup><sup class=\"reference\" id=\"cite_ref-16\"><a href=\"#cite_note-16\">[10]</a></sup><br/><small>(parachute)</small>\n</td></tr>\n<tr>\n<td colspan=\"9\">First flight of Falcon 9 v1.0.<sup class=\"reference\" id=\"cite_ref-sfn20100604_17-0\"><a href=\"#cite_note-sfn20100604-17\">[11]</a></sup> Used a boilerplate version of Dragon capsule which was not designed to separate from the second stage.<small>(<a href=\"#First_flight_of_Falcon_9\">more details below</a>)</small> Attempted to recover the first stage by parachuting it into the ocean, but it burned up on reentry, before the parachutes even deployed.<sup class=\"reference\" id=\"cite_ref-parachute_18-0\"><a href=\"#cite_note-parachute-18\">[12]</a></sup>\n</td></tr>\n<tr>\n<th rowspan=\"2\" scope=\"row\" style=\"text-align:center;\">2\n</th>\n<td>8 December 2010,<br/>15:43<sup class=\"reference\" id=\"cite_ref-spaceflightnow_Clark_Launch_Report_19-0\"><a href=\"#cite_note-spaceflightnow_Clark_Launch_Report-19\">[13]</a></sup>\n</td>\n<td><a href=\"/wiki/Falcon_9_v1.0\" title=\"Falcon 9 v1.0\">F9 v1.0</a><sup class=\"reference\" id=\"cite_ref-MuskMay2012_13-1\"><a href=\"#cite_note-MuskMay2012-13\">[7]</a></sup><br/>B0004.1<sup class=\"reference\" id=\"cite_ref-block_numbers_14-1\"><a href=\"#cite_note-block_numbers-14\">[8]</a></sup>\n</td>\n<td><a href=\"/wiki/Cape_Canaveral_Space_Force_Station\" title=\"Cape Canaveral Space Force Station\">CCAFS</a>,<br/><a href=\"/wiki/Cape_Canaveral_Space_Launch_Complex_40\" title=\"Cape Canaveral Space Launch Complex 40\">SLC-40</a>\n</td>\n<td><a href=\"/wiki/SpaceX_Dragon\" title=\"SpaceX Dragon\">Dragon</a> <a class=\"mw-redirect\" href=\"/wiki/COTS_Demo_Flight_1\" title=\"COTS Demo Flight 1\">demo flight C1</a><br/>(Dragon C101)\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Low_Earth_orbit\" title=\"Low Earth orbit\">LEO</a> (<a href=\"/wiki/International_Space_Station\" title=\"International Space Station\">ISS</a>)\n</td>\n<td><div class=\"plainlist\">\n<ul><li><a href=\"/wiki/NASA\" title=\"NASA\">NASA</a> (<a href=\"/wiki/Commercial_Orbital_Transportation_Services\" title=\"Commercial Orbital Transportation Services\">COTS</a>)</li>\n<li><a href=\"/wiki/National_Reconnaissance_Office\" title=\"National Reconnaissance Office\">NRO</a></li></ul>\n</div>\n</td>\n<td class=\"table-success\" style=\"background: #9EFF9E; vertical-align: middle; text-align: center;\">Success<sup class=\"reference\" id=\"cite_ref-ns20110930_15-1\"><a href=\"#cite_note-ns20110930-15\">[9]</a></sup>\n</td>\n<td class=\"table-failure\" style=\"background: #FFC7C7; vertical-align: middle; text-align: center;\">Failure<sup class=\"reference\" id=\"cite_ref-ns20110930_15-2\"><a href=\"#cite_note-ns20110930-15\">[9]</a></sup><sup class=\"reference\" id=\"cite_ref-20\"><a href=\"#cite_note-20\">[14]</a></sup><br/><small>(parachute)</small>\n</td></tr>\n<tr>\n<td colspan=\"9\">Maiden flight of <a class=\"mw-redirect\" href=\"/wiki/Dragon_capsule\" title=\"Dragon capsule\">Dragon capsule</a>, consisting of over 3 hours of testing thruster maneuvering and reentry.<sup class=\"reference\" id=\"cite_ref-spaceflightnow_Clark_unleashing_Dragon_21-0\"><a href=\"#cite_note-spaceflightnow_Clark_unleashing_Dragon-21\">[15]</a></sup> Attempted to recover the first stage by parachuting it into the ocean, but it disintegrated upon reentry, before the parachutes were deployed.<sup class=\"reference\" id=\"cite_ref-parachute_18-1\"><a href=\"#cite_note-parachute-18\">[12]</a></sup> <small>(<a href=\"#COTS_demo_missions\">more details below</a>)</small> It also included two <a href=\"/wiki/CubeSat\" title=\"CubeSat\">CubeSats</a>,<sup class=\"reference\" id=\"cite_ref-NRO_Taps_Boeing_for_Next_Batch_of_CubeSats_22-0\"><a href=\"#cite_note-NRO_Taps_Boeing_for_Next_Batch_of_CubeSats-22\">[16]</a></sup> and a wheel of <a href=\"/wiki/Brou%C3%A8re\" title=\"Brouère\">Brouère</a> cheese.\n</td></tr>\n<tr>\n<th rowspan=\"2\" scope=\"row\" style=\"text-align:center;\">3\n</th>\n<td>22 May 2012,<br/>07:44<sup class=\"reference\" id=\"cite_ref-BBC_new_era_23-0\"><a href=\"#cite_note-BBC_new_era-23\">[17]</a></sup>\n</td>\n<td><a href=\"/wiki/Falcon_9_v1.0\" title=\"Falcon 9 v1.0\">F9 v1.0</a><sup class=\"reference\" id=\"cite_ref-MuskMay2012_13-2\"><a href=\"#cite_note-MuskMay2012-13\">[7]</a></sup><br/>B0005.1<sup class=\"reference\" id=\"cite_ref-block_numbers_14-2\"><a href=\"#cite_note-block_numbers-14\">[8]</a></sup>\n</td>\n<td><a href=\"/wiki/Cape_Canaveral_Space_Force_Station\" title=\"Cape Canaveral Space Force Station\">CCAFS</a>,<br/><a href=\"/wiki/Cape_Canaveral_Space_Launch_Complex_40\" title=\"Cape Canaveral Space Launch Complex 40\">SLC-40</a>\n</td>\n<td><a href=\"/wiki/SpaceX_Dragon\" title=\"SpaceX Dragon\">Dragon</a> <a class=\"mw-redirect\" href=\"/wiki/Dragon_C2%2B\" title=\"Dragon C2+\">demo flight C2+</a><sup class=\"reference\" id=\"cite_ref-C2_24-0\"><a href=\"#cite_note-C2-24\">[18]</a></sup><br/>(Dragon C102)\n</td>\n<td>525 kg (1,157 lb)<sup class=\"reference\" id=\"cite_ref-25\"><a href=\"#cite_note-25\">[19]</a></sup>\n</td>\n<td><a href=\"/wiki/Low_Earth_orbit\" title=\"Low Earth orbit\">LEO</a> (<a href=\"/wiki/International_Space_Station\" title=\"International Space Station\">ISS</a>)\n</td>\n<td><a href=\"/wiki/NASA\" title=\"NASA\">NASA</a> (<a href=\"/wiki/Commercial_Orbital_Transportation_Services\" title=\"Commercial Orbital Transportation Services\">COTS</a>)\n</td>\n<td class=\"table-success\" style=\"background: #9EFF9E; vertical-align: middle; text-align: center;\">Success<sup class=\"reference\" id=\"cite_ref-26\"><a href=\"#cite_note-26\">[20]</a></sup>\n</td>\n<td class=\"table-noAttempt\" style=\"background: #EEE; vertical-align: middle; white-space: nowrap; text-align: center;\">No attempt\n</td></tr>\n<tr>\n<td colspan=\"9\">Dragon spacecraft demonstrated a series of tests before it was allowed to approach the <a href=\"/wiki/International_Space_Station\" title=\"International Space Station\">International Space Station</a>. Two days later, it became the first commercial spacecraft to board the ISS.<sup class=\"reference\" id=\"cite_ref-BBC_new_era_23-1\"><a href=\"#cite_note-BBC_new_era-23\">[17]</a></sup> <small>(<a href=\"#COTS_demo_missions\">more details below</a>)</small>\n</td></tr>\n<tr>\n<th rowspan=\"3\" scope=\"row\" style=\"text-align:center;\">4\n</th>\n<td rowspan=\"2\">8 October 2012,<br/>00:35<sup class=\"reference\" id=\"cite_ref-SFN_LLog_27-0\"><a href=\"#cite_note-SFN_LLog-27\">[21]</a></sup>\n</td>\n<td rowspan=\"2\"><a href=\"/wiki/Falcon_9_v1.0\" title=\"Falcon 9 v1.0\">F9 v1.0</a><sup class=\"reference\" id=\"cite_ref-MuskMay2012_13-3\"><a href=\"#cite_note-MuskMay2012-13\">[7]</a></sup><br/>B0006.1<sup class=\"reference\" id=\"cite_ref-block_numbers_14-3\"><a href=\"#cite_note-block_numbers-14\">[8]</a></sup>\n</td>\n<td rowspan=\"2\"><a href=\"/wiki/Cape_Canaveral_Space_Force_Station\" title=\"Cape Canaveral Space Force Station\">CCAFS</a>,<br/><a href=\"/wiki/Cape_Canaveral_Space_Launch_Complex_40\" title=\"Cape Canaveral Space Launch Complex 40\">SLC-40</a>\n</td>\n<td><a href=\"/wiki/SpaceX_CRS-1\" title=\"SpaceX CRS-1\">SpaceX CRS-1</a><sup class=\"reference\" id=\"cite_ref-sxManifest20120925_28-0\"><a href=\"#cite_note-sxManifest20120925-28\">[22]</a></sup><br/>(Dragon C103)\n</td>\n<td>4,700 kg (10,400 lb)\n</td>\n<td><a href=\"/wiki/Low_Earth_orbit\" title=\"Low Earth orbit\">LEO</a> (<a href=\"/wiki/International_Space_Station\" title=\"International Space Station\">ISS</a>)\n</td>\n<td><a href=\"/wiki/NASA\" title=\"NASA\">NASA</a> (<a href=\"/wiki/Commercial_Resupply_Services\" title=\"Commercial Resupply Services\">CRS</a>)\n</td>\n<td class=\"table-success\" style=\"background: #9EFF9E; vertical-align: middle; text-align: center;\">Success\n</td>\n<td rowspan=\"2\" style=\"background:#ececec; text-align:center;\"><span class=\"nowrap\">No attempt</span>\n</td></tr>\n<tr>\n<td><a href=\"/wiki/Orbcomm_(satellite)\" title=\"Orbcomm (satellite)\">Orbcomm-OG2</a><sup class=\"reference\" id=\"cite_ref-Orbcomm_29-0\"><a href=\"#cite_note-Orbcomm-29\">[23]</a></sup>\n</td>\n<td>172 kg (379 lb)<sup class=\"reference\" id=\"cite_ref-gunter-og2_30-0\"><a href=\"#cite_note-gunter-og2-30\">[24]</a></sup>\n</td>\n<td><a href=\"/wiki/Low_Earth_orbit\" title=\"Low Earth orbit\">LEO</a>\n</td>\n<td><a href=\"/wiki/Orbcomm\" title=\"Orbcomm\">Orbcomm</a>\n</td>\n<td class=\"table-partial\" style=\"background: #FE9; vertical-align: middle; text-align: center;\">Partial failure<sup class=\"reference\" id=\"cite_ref-nyt-20121030_31-0\"><a href=\"#cite_note-nyt-20121030-31\">[25]</a></sup>\n</td></tr>\n<tr>\n<td colspan=\"9\">CRS-1 was successful, but the <a href=\"/wiki/Secondary_payload\" title=\"Secondary payload\">secondary payload</a> was inserted into an abnormally low orbit and subsequently lost. This was due to one of the nine <a href=\"/wiki/SpaceX_Merlin\" title=\"SpaceX Merlin\">Merlin engines</a> shutting down during the launch, and NASA declining a second reignition, as per <a href=\"/wiki/International_Space_Station\" title=\"International Space Station\">ISS</a> visiting vehicle safety rules, the primary payload owner is contractually allowed to decline a second reignition. NASA stated that this was because SpaceX could not guarantee a high enough likelihood of the second stage completing the second burn successfully which was required to avoid any risk of secondary payload's collision with the ISS.<sup class=\"reference\" id=\"cite_ref-OrbcommTotalLoss_32-0\"><a href=\"#cite_note-OrbcommTotalLoss-32\">[26]</a></sup><sup class=\"reference\" id=\"cite_ref-sn20121011_33-0\"><a href=\"#cite_note-sn20121011-33\">[27]</a></sup><sup class=\"reference\" id=\"cite_ref-34\"><a href=\"#cite_note-34\">[28]</a></sup>\n</td></tr>\n<tr>\n<th rowspan=\"2\" scope=\"row\" style=\"text-align:center;\">5\n</th>\n<td>1 March 2013,<br/>15:10\n</td>\n<td><a href=\"/wiki/Falcon_9_v1.0\" title=\"Falcon 9 v1.0\">F9 v1.0</a><sup class=\"reference\" id=\"cite_ref-MuskMay2012_13-4\"><a href=\"#cite_note-MuskMay2012-13\">[7]</a></sup><br/>B0007.1<sup class=\"reference\" id=\"cite_ref-block_numbers_14-4\"><a href=\"#cite_note-block_numbers-14\">[8]</a></sup>\n</td>\n<td><a href=\"/wiki/Cape_Canaveral_Space_Force_Station\" title=\"Cape Canaveral Space Force Station\">CCAFS</a>,<br/><a href=\"/wiki/Cape_Canaveral_Space_Launch_Complex_40\" title=\"Cape Canaveral Space Launch Complex 40\">SLC-40</a>\n</td>\n<td><a href=\"/wiki/SpaceX_CRS-2\" title=\"SpaceX CRS-2\">SpaceX CRS-2</a><sup class=\"reference\" id=\"cite_ref-sxManifest20120925_28-1\"><a href=\"#cite_note-sxManifest20120925-28\">[22]</a></sup><br/>(Dragon C104)\n</td>\n<td>4,877 kg (10,752 lb)\n</td>\n<td><a href=\"/wiki/Low_Earth_orbit\" title=\"Low Earth orbit\">LEO</a> (<a class=\"mw-redirect\" href=\"/wiki/ISS\" title=\"ISS\">ISS</a>)\n</td>\n<td><a href=\"/wiki/NASA\" title=\"NASA\">NASA</a> (<a href=\"/wiki/Commercial_Resupply_Services\" title=\"Commercial Resupply Services\">CRS</a>)\n</td>\n<td class=\"table-success\" style=\"background: #9EFF9E; vertical-align: middle; text-align: center;\">Success\n</td>\n<td class=\"table-noAttempt\" style=\"background: #EEE; vertical-align: middle; white-space: nowrap; text-align: center;\">No attempt\n</td></tr>\n<tr>\n<td colspan=\"9\">Last launch of the original Falcon 9 v1.0 <a href=\"/wiki/Launch_vehicle\" title=\"Launch vehicle\">launch vehicle</a>, first use of the unpressurized trunk section of Dragon.<sup class=\"reference\" id=\"cite_ref-sxf9_20110321_35-0\"><a href=\"#cite_note-sxf9_20110321-35\">[29]</a></sup>\n</td></tr>\n<tr>\n<th rowspan=\"2\" scope=\"row\" style=\"text-align:center;\">6\n</th>\n<td>29 September 2013,<br/>16:00<sup class=\"reference\" id=\"cite_ref-pa20130930_36-0\"><a href=\"#cite_note-pa20130930-36\">[30]</a></sup>\n</td>\n<td><a href=\"/wiki/Falcon_9_v1.1\" title=\"Falcon 9 v1.1\">F9 v1.1</a><sup class=\"reference\" id=\"cite_ref-MuskMay2012_13-5\"><a href=\"#cite_note-MuskMay2012-13\">[7]</a></sup><br/>B1003<sup class=\"reference\" id=\"cite_ref-block_numbers_14-5\"><a href=\"#cite_note-block_numbers-14\">[8]</a></sup>\n</td>\n<td><a class=\"mw-redirect\" href=\"/wiki/Vandenberg_Air_Force_Base\" title=\"Vandenberg Air Force Base\">VAFB</a>,<br/><a href=\"/wiki/Vandenberg_Space_Launch_Complex_4\" title=\"Vandenberg Space Launch Complex 4\">SLC-4E</a>\n</td>\n<td><a href=\"/wiki/CASSIOPE\" title=\"CASSIOPE\">CASSIOPE</a><sup class=\"reference\" id=\"cite_ref-sxManifest20120925_28-2\"><a href=\"#cite_note-sxManifest20120925-28\">[22]</a></sup><sup class=\"reference\" id=\"cite_ref-CASSIOPE_MDA_37-0\"><a href=\"#cite_note-CASSIOPE_MDA-37\">[31]</a></sup>\n</td>\n<td>500 kg (1,100 lb)\n</td>\n<td><a href=\"/wiki/Polar_orbit\" title=\"Polar orbit\">Polar orbit</a> <a href=\"/wiki/Low_Earth_orbit\" title=\"Low Earth orbit\">LEO</a>\n</td>\n<td><a href=\"/wiki/Maxar_Technologies\" title=\"Maxar Technologies\">MDA</a>\n</td>\n<td class=\"table-success\" style=\"background: #9EFF9E; vertical-align: middle; text-align: center;\">Success<sup class=\"reference\" id=\"cite_ref-pa20130930_36-1\"><a href=\"#cite_note-pa20130930-36\">[30]</a></sup>\n</td>\n<td class=\"table-no2\" style=\"background: #FFE3E3; color: black; vertical-align: middle; text-align: center;\">Uncontrolled<br/><small>(ocean)</small><sup class=\"reference\" id=\"cite_ref-ocean_landing_38-0\"><a href=\"#cite_note-ocean_landing-38\">[d]</a></sup>\n</td></tr>\n<tr>\n<td colspan=\"9\">First commercial mission with a private customer, first launch from Vandenberg, and demonstration flight of Falcon 9 v1.1 with an improved 13-tonne to LEO capacity.<sup class=\"reference\" id=\"cite_ref-sxf9_20110321_35-1\"><a href=\"#cite_note-sxf9_20110321-35\">[29]</a></sup> After separation from the second stage carrying Canadian commercial and scientific satellites, the first stage booster performed a controlled reentry,<sup class=\"reference\" id=\"cite_ref-39\"><a href=\"#cite_note-39\">[32]</a></sup> and an <a href=\"/wiki/Falcon_9_first-stage_landing_tests\" title=\"Falcon 9 first-stage landing tests\">ocean touchdown test</a> for the first time. This provided good test data, even though the booster started rolling as it neared the ocean, leading to the shutdown of the central engine as the roll depleted it of fuel, resulting in a hard impact with the ocean.<sup class=\"reference\" id=\"cite_ref-pa20130930_36-2\"><a href=\"#cite_note-pa20130930-36\">[30]</a></sup> This was the first known attempt of a rocket engine being lit to perform a supersonic retro propulsion, and allowed SpaceX to enter a public-private partnership with <a href=\"/wiki/NASA\" title=\"NASA\">NASA</a> and its Mars entry, descent, and landing technologies research projects.<sup class=\"reference\" id=\"cite_ref-40\"><a href=\"#cite_note-40\">[33]</a></sup> <small>(<a href=\"#Maiden_flight_of_v1.1\">more details below</a>)</small>\n</td></tr>\n<tr>\n<th rowspan=\"2\" scope=\"row\" style=\"text-align:center;\">7\n</th>\n<td>3 December 2013,<br/>22:41<sup class=\"reference\" id=\"cite_ref-sfn_wwls20130624_41-0\"><a href=\"#cite_note-sfn_wwls20130624-41\">[34]</a></sup>\n</td>\n<td><a href=\"/wiki/Falcon_9_v1.1\" title=\"Falcon 9 v1.1\">F9 v1.1</a><br/>B1004\n</td>\n<td><a href=\"/wiki/Cape_Canaveral_Space_Force_Station\" title=\"Cape Canaveral Space Force Station\">CCAFS</a>,<br/><a href=\"/wiki/Cape_Canaveral_Space_Launch_Complex_40\" title=\"Cape Canaveral Space Launch Complex 40\">SLC-40</a>\n</td>\n<td><a href=\"/wiki/SES-8\" title=\"SES-8\">SES-8</a><sup class=\"reference\" id=\"cite_ref-sxManifest20120925_28-3\"><a href=\"#cite_note-sxManifest20120925-28\">[22]</a></sup><sup class=\"reference\" id=\"cite_ref-spx-pr_42-0\"><a href=\"#cite_note-spx-pr-42\">[35]</a></sup><sup class=\"reference\" id=\"cite_ref-aw20110323_43-0\"><a href=\"#cite_note-aw20110323-43\">[36]</a></sup>\n</td>\n<td>3,170 kg (6,990 lb)\n</td>\n<td><a href=\"/wiki/Geostationary_transfer_orbit\" title=\"Geostationary transfer orbit\">GTO</a>\n</td>\n<td><a href=\"/wiki/SES_S.A.\" title=\"SES S.A.\">SES</a>\n</td>\n<td class=\"table-success\" style=\"background: #9EFF9E; vertical-align: middle; text-align: center;\">Success<sup class=\"reference\" id=\"cite_ref-SNMissionStatus7_44-0\"><a href=\"#cite_note-SNMissionStatus7-44\">[37]</a></sup>\n</td>\n<td class=\"table-noAttempt\" style=\"background: #EEE; vertical-align: middle; white-space: nowrap; text-align: center;\">No attempt<br/><sup class=\"reference\" id=\"cite_ref-sf10120131203_45-0\"><a href=\"#cite_note-sf10120131203-45\">[38]</a></sup>\n</td></tr>\n<tr>\n<td colspan=\"9\">First <a href=\"/wiki/Geostationary_transfer_orbit\" title=\"Geostationary transfer orbit\">Geostationary transfer orbit</a> (GTO) launch for Falcon 9,<sup class=\"reference\" id=\"cite_ref-spx-pr_42-1\"><a href=\"#cite_note-spx-pr-42\">[35]</a></sup> and first successful reignition of the second stage.<sup class=\"reference\" id=\"cite_ref-46\"><a href=\"#cite_note-46\">[39]</a></sup> SES-8 was inserted into a <a href=\"/wiki/Geostationary_transfer_orbit\" title=\"Geostationary transfer orbit\">Super-Synchronous Transfer Orbit</a> of 79,341 km (49,300 mi) in apogee with an <a href=\"/wiki/Orbital_inclination\" title=\"Orbital inclination\">inclination</a> of 20.55° to the <a href=\"/wiki/Equator\" title=\"Equator\">equator</a>.\n</td></tr></tbody></table>\n" ] ], [ [ "We can see the columns names embedded in the table header elements `<th>` as follows:\n", "_____no_output_____" ], [ "```\n<tr>\n<th scope=\"col\">Flight No.\n</th>\n<th scope=\"col\">Date and<br/>time (<a href=\"/wiki/Coordinated_Universal_Time\" title=\"Coordinated Universal Time\">UTC</a>)\n</th>\n<th scope=\"col\"><a href=\"/wiki/List_of_Falcon_9_first-stage_boosters\" title=\"List of Falcon 9 first-stage boosters\">Version,<br/>Booster</a> <sup class=\"reference\" id=\"cite_ref-booster_11-0\"><a href=\"#cite_note-booster-11\">[b]</a></sup>\n</th>\n<th scope=\"col\">Launch site\n</th>\n<th scope=\"col\">Payload<sup class=\"reference\" id=\"cite_ref-Dragon_12-0\"><a href=\"#cite_note-Dragon-12\">[c]</a></sup>\n</th>\n<th scope=\"col\">Payload mass\n</th>\n<th scope=\"col\">Orbit\n</th>\n<th scope=\"col\">Customer\n</th>\n<th scope=\"col\">Launch<br/>outcome\n</th>\n<th scope=\"col\"><a href=\"/wiki/Falcon_9_first-stage_landing_tests\" title=\"Falcon 9 first-stage landing tests\">Booster<br/>landing</a>\n</th></tr>\n```\n", "_____no_output_____" ], [ "Next, we just need to iterate through the `<th>` elements and apply the provided `extract_column_from_header()` to extract column name one by one\n", "_____no_output_____" ] ], [ [ "column_names = []\n\n# Apply find_all() function with `th` element on first_launch_table\n# Iterate each th element and apply the provided extract_column_from_header() to get a column name\n# Append the Non-empty column name (`if name is not None and len(name) > 0`) into a list called column_names\nfor i in first_launch_table.find_all('th'):\n if extract_column_from_header(i)!=None:\n if len(extract_column_from_header(i))>0:\n column_names.append(extract_column_from_header(i))", "_____no_output_____" ] ], [ [ "Check the extracted column names\n", "_____no_output_____" ] ], [ [ "print(column_names)", "['Flight No.', 'Date and time ( )', 'Launch site', 'Payload', 'Payload mass', 'Orbit', 'Customer', 'Launch outcome']\n" ] ], [ [ "## TASK 3: Create a data frame by parsing the launch HTML tables\n", "_____no_output_____" ], [ "We will create an empty dictionary with keys from the extracted column names in the previous task. Later, this dictionary will be converted into a Pandas dataframe\n", "_____no_output_____" ] ], [ [ "launch_dict= dict.fromkeys(column_names)\n\n# Remove an irrelvant column\ndel launch_dict['Date and time ( )']\n\n# Let's initial the launch_dict with each value to be an empty list\nlaunch_dict['Flight No.'] = []\nlaunch_dict['Launch site'] = []\nlaunch_dict['Payload'] = []\nlaunch_dict['Payload mass'] = []\nlaunch_dict['Orbit'] = []\nlaunch_dict['Customer'] = []\nlaunch_dict['Launch outcome'] = []\n# Added some new columns\nlaunch_dict['Version Booster']=[]\nlaunch_dict['Booster landing']=[]\nlaunch_dict['Date']=[]\nlaunch_dict['Time']=[]", "_____no_output_____" ] ], [ [ "Next, we just need to fill up the `launch_dict` with launch records extracted from table rows.\n", "_____no_output_____" ], [ "Usually, HTML tables in Wiki pages are likely to contain unexpected annotations and other types of noises, such as reference links `B0004.1[8]`, missing values `N/A [e]`, inconsistent formatting, etc.\n", "_____no_output_____" ] ], [ [ "extracted_row = 0\n#Extract each table \nfor table_number,table in enumerate(soup.find_all('table',\"wikitable plainrowheaders collapsible\")):\n # get table row \n for rows in table.find_all(\"tr\"):\n #check to see if first table heading is as number corresponding to launch a number \n if rows.th:\n if rows.th.string:\n flight_number=rows.th.string.strip()\n flag=flight_number.isdigit()\n else:\n flag=False\n #get table element \n row=rows.find_all('td')\n #if it is number save cells in a dictonary \n if flag:\n extracted_row += 1\n # Flight Number value\n # TODO: Append the flight_number into launch_dict with key `Flight No.`\n launch_dict['Flight No.'].append(flight_number)\n print(flight_number)\n datatimelist=date_time(row[0])\n \n # Date value\n # TODO: Append the date into launch_dict with key `Date`\n date = datatimelist[0].strip(',')\n launch_dict['Date'].append(date)\n print(date)\n \n # Time value\n # TODO: Append the time into launch_dict with key `Time`\n time = datatimelist[1]\n launch_dict['Time'].append(time)\n print(time)\n \n # Booster version\n # TODO: Append the bv into launch_dict with key `Version Booster`\n bv=booster_version(row[1])\n if not(bv):\n bv=row[1].a.string\n launch_dict['Version Booster'].append(bv)\n print(bv)\n \n # Launch Site\n # TODO: Append the bv into launch_dict with key `Launch Site`\n launch_site = row[2].a.string\n launch_dict['Launch site'].append(launch_site)\n print(launch_site)\n \n # Payload\n # TODO: Append the payload into launch_dict with key `Payload`\n payload = row[3].a.string\n launch_dict['Payload'].append(payload)\n print(payload)\n \n # Payload Mass\n # TODO: Append the payload_mass into launch_dict with key `Payload mass`\n payload_mass = get_mass(row[4])\n launch_dict['Payload mass'].append(payload_mass)\n print(payload_mass)\n \n # Orbit\n # TODO: Append the orbit into launch_dict with key `Orbit`\n orbit = row[5].a.string\n launch_dict['Orbit'].append(orbit)\n print(orbit)\n \n # Customer\n # TODO: Append the customer into launch_dict with key `Customer`\n if row[6].a!=None:\n customer = row[6].a.string\n else: \n customer='None'\n launch_dict['Customer'].append(customer)\n print(customer)\n \n # Launch outcome\n # TODO: Append the launch_outcome into launch_dict with key `Launch outcome`\n launch_outcome = list(row[7].strings)[0]\n launch_dict['Launch outcome'].append(launch_outcome)\n print(launch_outcome)\n \n # Booster landing\n # TODO: Append the launch_outcome into launch_dict with key `Booster landing`\n booster_landing = landing_status(row[8])\n launch_dict['Booster landing'].append(booster_landing)\n print(booster_landing)\n \n print(\"******\")\n ", "1\n4 June 2010\n18:45\nF9 v1.0B0003.1\nCCAFS\nDragon Spacecraft Qualification Unit\n0\nLEO\nSpaceX\nSuccess\n\nFailure\n******\n2\n8 December 2010\n15:43\nF9 v1.0B0004.1\nCCAFS\nDragon\n0\nLEO\nNASA\nSuccess\nFailure\n******\n3\n22 May 2012\n07:44\nF9 v1.0B0005.1\nCCAFS\nDragon\n525 kg\nLEO\nNASA\nSuccess\nNo attempt\n\n******\n4\n8 October 2012\n00:35\nF9 v1.0B0006.1\nCCAFS\nSpaceX CRS-1\n4,700 kg\nLEO\nNASA\nSuccess\n\nNo attempt\n******\n5\n1 March 2013\n15:10\nF9 v1.0B0007.1\nCCAFS\nSpaceX CRS-2\n4,877 kg\nLEO\nNASA\nSuccess\n\nNo attempt\n\n******\n6\n29 September 2013\n16:00\nF9 v1.1B1003\nVAFB\nCASSIOPE\n500 kg\nPolar orbit\nMDA\nSuccess\nUncontrolled\n******\n7\n3 December 2013\n22:41\nF9 v1.1\nCCAFS\nSES-8\n3,170 kg\nGTO\nSES\nSuccess\nNo attempt\n******\n8\n6 January 2014\n22:06\nF9 v1.1\nCCAFS\nThaicom 6\n3,325 kg\nGTO\nThaicom\nSuccess\nNo attempt\n******\n9\n18 April 2014\n19:25\nF9 v1.1\nCape Canaveral\nSpaceX CRS-3\n2,296 kg\nLEO\nNASA\nSuccess\n\nControlled\n******\n10\n14 July 2014\n15:15\nF9 v1.1\nCape Canaveral\nOrbcomm-OG2\n1,316 kg\nLEO\nOrbcomm\nSuccess\nControlled\n******\n11\n5 August 2014\n08:00\nF9 v1.1\nCape Canaveral\nAsiaSat 8\n4,535 kg\nGTO\nAsiaSat\nSuccess\nNo attempt\n******\n12\n7 September 2014\n05:00\nF9 v1.1\nCape Canaveral\nAsiaSat 6\n4,428 kg\nGTO\nAsiaSat\nSuccess\nNo attempt\n\n******\n13\n21 September 2014\n05:52\nF9 v1.1\nCape Canaveral\nSpaceX CRS-4\n2,216 kg\nLEO\nNASA\nSuccess\nUncontrolled\n******\n14\n10 January 2015\n09:47\nF9 v1.1\nCape Canaveral\nSpaceX CRS-5\n2,395 kg\nLEO\nNASA\nSuccess\nFailure \n******\n15\n11 February 2015\n23:03\nF9 v1.1\nCape Canaveral\nDSCOVR\n570 kg\nHEO\nUSAF\nSuccess\n\nControlled\n******\n16\n2 March 2015\n03:50\nF9 v1.1\nCape Canaveral\nABS-3A\n4,159 kg\nGTO\nABS\nSuccess\n\nNo attempt\n******\n17\n14 April 2015\n20:10\nF9 v1.1\nCape Canaveral\nSpaceX CRS-6\n1,898 kg\nLEO\nNASA\nSuccess\n\nFailure\n******\n18\n27 April 2015\n23:03\nF9 v1.1\nCape Canaveral\nTürkmenÄlem 52°E / MonacoSAT\n4,707 kg\nGTO\nNone\nSuccess\n\nNo attempt\n******\n19\n28 June 2015\n14:21\nF9 v1.1\nCape Canaveral\nSpaceX CRS-7\n1,952 kg\nLEO\nNASA\nFailure\nPrecluded\n******\n20\n22 December 2015\n01:29\nF9 FT\nCape Canaveral\nOrbcomm-OG2\n2,034 kg\nLEO\nOrbcomm\nSuccess\n\nSuccess\n******\n21\n17 January 2016\n18:42\nF9 v1.1\nVAFB\nJason-3\n553 kg\nLEO\nNASA\nSuccess\n\nFailure\n******\n22\n4 March 2016\n23:35\nF9 FT\nCape Canaveral\nSES-9\n5,271 kg\nGTO\nSES\nSuccess\n\nFailure\n******\n23\n8 April 2016\n20:43\nF9 FT\nCape Canaveral\nSpaceX CRS-8\n3,136 kg\nLEO\nNASA\nSuccess\nSuccess\n******\n24\n6 May 2016\n05:21\nF9 FT\nCape Canaveral\nJCSAT-14\n4,696 kg\nGTO\nSKY Perfect JSAT Group\nSuccess\n\nSuccess\n******\n25\n27 May 2016\n21:39\nF9 FT\nCape Canaveral\nThaicom 8\n3,100 kg\nGTO\nThaicom\nSuccess\n\nSuccess\n******\n26\n15 June 2016\n14:29\nF9 FT\nCape Canaveral\nABS-2A\n3,600 kg\nGTO\nABS\nSuccess\n\nFailure\n******\n27\n18 July 2016\n04:45\nF9 FT\nCape Canaveral\nSpaceX CRS-9\n2,257 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n28\n14 August 2016\n05:26\nF9 FT\nCape Canaveral\nJCSAT-16\n4,600 kg\nGTO\nSKY Perfect JSAT Group\nSuccess\n\nSuccess\n******\n29\n14 January 2017\n17:54\nF9 FT\nVAFB\nIridium NEXT\n9,600 kg\nPolar\nIridium Communications\nSuccess\n\nSuccess\n******\n30\n19 February 2017\n14:39\nF9 FT\nKSC\nSpaceX CRS-10\n2,490 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n31\n16 March 2017\n06:00\nF9 FT\nKSC\nEchoStar 23\n5,600 kg\nGTO\nEchoStar\nSuccess\n\nNo attempt\n******\n32\n30 March 2017\n22:27\nF9 FT♺\nKSC\nSES-10\n5,300 kg\nGTO\nSES\nSuccess\nSuccess\n******\n33\n1 May 2017\n11:15\nF9 FT\nKSC\nNROL-76\nC\nLEO\nNRO\nSuccess\n\nSuccess\n******\n34\n15 May 2017\n23:21\nF9 FT\nKSC\nInmarsat-5 F4\n6,070 kg\nGTO\nInmarsat\nSuccess\n\nNo attempt\n******\n35\n3 June 2017\n21:07\nF9 FT\nKSC\nSpaceX CRS-11\n2,708 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n36\n23 June 2017\n19:10\nF9 FTB1029.2\nKSC\nBulgariaSat-1\n3,669 kg\nGTO\nBulsatcom\nSuccess\n\nSuccess\n******\n37\n25 June 2017\n20:25\nF9 FT\nVAFB\nIridium NEXT\n9,600 kg\nLEO\nIridium Communications\nSuccess\n\nSuccess\n******\n38\n5 July 2017\n23:38\nF9 FT\nKSC\nIntelsat 35e\n6,761 kg\nGTO\nIntelsat\nSuccess\n\nNo attempt\n******\n39\n14 August 2017\n16:31\nF9 B4\nKSC\nSpaceX CRS-12\n3,310 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n40\n24 August 2017\n18:51\nF9 FT\nVAFB\nFormosat-5\n475 kg\nSSO\nNSPO\nSuccess\n\nSuccess\n******\n41\n7 September 2017\n14:00\nF9 B4\nKSC\nBoeing X-37B\n4,990 kg\nLEO\nUSAF\nSuccess\n\nSuccess\n******\n42\n9 October 2017\n12:37\nF9 B4\nVAFB\nIridium NEXT\n9,600 kg\nPolar\nIridium Communications\nSuccess\n\nSuccess\n******\n43\n11 October 2017\n22:53:00\nF9 FTB1031.2\nKSC\nSES-11\n5,200 kg\nGTO\nSES S.A.\nSuccess\n\nSuccess\n******\n44\n30 October 2017\n19:34\nF9 B4\nKSC\nKoreasat 5A\n3,500 kg\nGTO\nKT Corporation\nSuccess\n\nSuccess\n******\n45\n15 December 2017\n15:36\nF9 FTB1035.2\nCape Canaveral\nSpaceX CRS-13\n2,205 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n46\n23 December 2017\n01:27\nF9 FTB1036.2\nVAFB\nIridium NEXT\n9,600 kg\nPolar\nIridium Communications\nSuccess\nControlled\n******\n47\n8 January 2018\n01:00\nF9 B4\nCCAFS\nZuma\nC\nLEO\nNorthrop Grumman\nSuccess\nSuccess\n******\n48\n31 January 2018\n21:25\nF9 FTB1032.2\nCCAFS\nGovSat-1\n4,230 kg\nGTO\nSES\nSuccess\nControlled\n******\n49\n22 February 2018\n14:17\nF9 FTB1038.2\nVAFB\nPaz\n2,150 kg\nSSO\nHisdesat\nSuccess\nNo attempt\n******\n50\n6 March 2018\n05:33\nF9 B4\nCCAFS\nHispasat 30W-6\n6,092 kg\nGTO\nHispasat\nSuccess\nNo attempt\n******\n51\n30 March 2018\n14:14\nF9 B4B1041.2\nVAFB\nIridium NEXT\n9,600 kg\nPolar\nIridium Communications\nSuccess\nNo attempt\n******\n52\n2 April 2018\n20:30\nF9 B4B1039.2\nCCAFS\nSpaceX CRS-14\n2,647 kg\nLEO\nNASA\nSuccess\nNo attempt\n******\n53\n18 April 2018\n22:51\nF9 B4\nCCAFS\nTransiting Exoplanet Survey Satellite\n362 kg\nHEO\nNASA\nSuccess\nSuccess\n******\n54\n11 May 2018\n20:14\nF9 B5B1046.1\nKSC\nBangabandhu-1\n3,600 kg\nGTO\nThales-Alenia\nSuccess\nSuccess\n******\n55\n22 May 2018\n19:47\nF9 B4B1043.2\nVAFB\nIridium NEXT\n6,460 kg\nPolar\nIridium Communications\nSuccess\nNo attempt\n******\n56\n4 June 2018\n04:45\nF9 B4B1040.2\nCCAFS\nSES-12\n5,384 kg\nGTO\nSES\nSuccess\nNo attempt\n******\n57\n29 June 2018\n09:42\nF9 B4B1045.2\nCCAFS\nSpaceX CRS-15\n2,697 kg\nLEO\nNASA\nSuccess\nNo attempt\n******\n58\n22 July 2018\n05:50\nF9 B5\nCCAFS\nTelstar 19V\n7,075 kg\nGTO\nTelesat\nSuccess\nSuccess\n******\n59\n25 July 2018\n11:39\nF9 B5B1048\nVAFB\nIridium NEXT\n9,600 kg\nPolar\nIridium Communications\nSuccess\nSuccess\n******\n60\n7 August 2018\n05:18\nF9 B5B1046.2\nCCAFS\nMerah Putih\n5,800 kg\nGTO\nTelkom Indonesia\nSuccess\nSuccess\n******\n61\n10 September 2018\n04:45\nF9 B5\nCCAFS\nTelstar 18V\n7,060 kg\nGTO\nTelesat\nSuccess\nSuccess\n******\n62\n8 October 2018\n02:22\nF9 B5B1048.2\nVAFB\nSAOCOM 1A\n3,000 kg\nSSO\nCONAE\nSuccess\nSuccess\n******\n63\n15 November 2018\n20:46\nF9 B5B1047.2\nKSC\nEs'hail 2\n5,300 kg\nGTO\nEs'hailSat\nSuccess\nSuccess\n******\n64\n3 December 2018\n18:34:05\nF9 B5B1046.3\nVAFB\nSSO-A\n~4,000 kg\nSSO\nSpaceflight Industries\nSuccess\nSuccess\n******\n65\n5 December 2018\n18:16\nF9 B5\nCCAFS\nSpaceX CRS-16\n2,500 kg\nLEO\nNASA\nSuccess\n\nFailure\n******\n66\n23 December 2018\n13:51\nF9 B5\nCCAFS\nGPS III\n4,400 kg\nMEO\nUSAF\nSuccess\nNo attempt\n******\n67\n11 January 2019\n15:31\nF9 B5B1049.2\nVAFB\nIridium NEXT\n9,600 kg\nPolar\nIridium Communications\nSuccess\n\nSuccess\n******\n68\n22 February 2019\n01:45\nF9 B5B1048.3\nCCAFS\nNusantara Satu\n4,850 kg\nGTO\nPSN\nSuccess\n\nSuccess\n******\n69\n2 March 2019\n07:49\nF9 B5[268]\nKSC\nCrew Dragon Demo-1\n12,055 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n70\n4 May 2019\n06:48\nF9 B5\nCCAFS\nSpaceX CRS-17\n2,495 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n71\n24 May 2019\n02:30\nF9 B5B1049.3\nCCAFS\nStarlink\n13,620 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n72\n12 June 2019\n14:17\nF9 B5B1051.2\nVAFB\nRADARSAT Constellation\n4,200 kg\nSSO\nCanadian Space Agency\nSuccess\n\nSuccess\n******\n73\n25 July 2019\n22:01\nF9 B5B1056.2\nCCAFS\nSpaceX CRS-18\n2,268 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n74\n6 August 2019\n23:23\nF9 B5B1047.3\nCCAFS\nAMOS-17\n6,500 kg\nGTO\nSpacecom\nSuccess\n\nNo attempt\n******\n75\n11 November 2019\n14:56\nF9 B5\nCCAFS\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n76\n5 December 2019\n17:29\nF9 B5\nCCAFS\nSpaceX CRS-19\n2,617 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n77\n17 December 2019\n00:10\nF9 B5B1056.3\nCCAFS\nJCSat-18\n6,956 kg\nGTO\nSky Perfect JSAT\nSuccess\n\nSuccess\n******\n78\n7 January 2020\n02:19:21\nF9 B5\nCCAFS\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n79\n19 January 2020\n15:30\nF9 B5\nKSC\nCrew Dragon in-flight abort test\n12,050 kg\nSub-orbital\nNASA\nSuccess\n\nNo attempt\n\n******\n80\n29 January 2020\n14:07\nF9 B5\nCCAFS\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n81\n17 February 2020\n15:05\nF9 B5\nCCAFS\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nFailure\n******\n82\n7 March 2020\n04:50\nF9 B5\nCCAFS\nSpaceX CRS-20\n1,977 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n83\n18 March 2020\n12:16\nF9 B5\nKSC\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nFailure\n******\n84\n22 April 2020\n19:30\nF9 B5\nKSC\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n85\n30 May 2020\n19:22\nF9 B5\nKSC\nCrew Dragon Demo-2\n12,530 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n86\n4 June 2020\n01:25\nF9 B5\nCCAFS\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n87\n13 June 2020\n09:21\nF9 B5\nCCAFS\nStarlink\n15,410 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n88\n30 June 2020\n20:10:46\nF9 B5\nCCAFS\nGPS III\n4,311 kg\nMEO\nU.S. Space Force\nSuccess\n\nSuccess\n******\n89\n20 July 2020\n21:30\nF9 B5B1058.2\nCCAFS\nANASIS-II\n5,000–6,000 kg\nGTO\nRepublic of Korea Army\nSuccess\n\nSuccess\n******\n90\n7 August 2020\n05:12\nF9 B5\nKSC\nStarlink\n14,932 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n91\n18 August 2020\n14:31\nF9 B5B1049.6\nCCAFS\nStarlink\n~15,440 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n92\n30 August 2020\n23:18\nF9 B5\nCCAFS\nSAOCOM 1B\n3,130 kg\nSSO\nCONAE\nSuccess\n\nSuccess\n******\n93\n3 September 2020\n12:46:14\nF9 B5B1060.2\nKSC\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n94\n6 October 2020\n11:29:34\nF9 B5B1058.3\nKSC\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n95\n18 October 2020\n12:25:57\nF9 B5B1051.6\nKSC\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n96\n24 October 2020\n15:31:34\nF9 B5\nCCAFS\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n97\n5 November 2020\n23:24:23\nF9 B5\nCCAFS\nGPS III\n4,311 kg\nMEO\nUSSF\nSuccess\n\nSuccess\n******\n98\n16 November 2020\n00:27\nF9 B5\nKSC\nCrew-1\n~12,500 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n99\n21 November 2020\n17:17:08\nF9 B5\nVAFB\nSentinel-6 Michael Freilich (Jason-CS A)\n1,192 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n100\n25 November 2020\n02:13\nF9 B5 ♺\nCCAFS\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n101\n6 December 2020\n16:17:08\nF9 B5 ♺\nKSC\nSpaceX CRS-21\n2,972 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n102\n13 December 2020\n17:30:00\nF9 B5 ♺\nCCSFS\nSXM-7\n7,000 kg\nGTO\nSirius XM\nSuccess\n\nSuccess\n******\n103\n19 December 2020\n14:00:00\nF9 B5 ♺\nKSC\nNROL-108\nC\nLEO\nNRO\nSuccess\n\nSuccess\n******\n104\n8 January 2021\n02:15\nF9 B5\nCCSFS\nTürksat 5A\n3,500 kg\nGTO\nTürksat\nSuccess\n\nSuccess\n******\n105\n20 January 2021\n13:02\nF9 B5B1051.8\nKSC\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n106\n24 January 2021\n15:00\nF9 B5B1058.5\nCCSFS\nTransporter-1\n~5,000 kg\nSSO\nNone\nSuccess\n\nSuccess\n******\n107\n4 February 2021\n06:19\nF9 B5 ♺\nCCSFS\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n108\n16 February 2021\n03:59:37\nF9 B5 ♺\nCCSFS\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nFailure\n******\n109\n4 March 2021\n08:24\nF9 B5 ♺\nKSC\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n110\n11 March 2021\n08:13:29\nF9 B5 ♺\nCCSFS\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n111\n14 March 2021\n10:01\nF9 B5 ♺\nKSC\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n112\n24 March 2021\n08:28\nF9 B5B1060.6\nCCSFS\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n113\n7 April 2021\n16:34\nF9 B5 ♺\nCCSFS\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n114\n23 April 2021\n9:49\nF9 B5B1061.2\nKSC\nCrew-2\n~13,000 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n115\n29 April 2021\n03:44\nF9 B5B1060.7\nCCSFS\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n116\n4 May 2021\n19:01\nF9 B5B1049.9\nKSC\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n117\n9 May 2021\n06:42\nF9 B5B1051.10\nCCSFS\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n118\n15 May 2021\n22:56\nF9 B5B1058.8\nKSC\nStarlink\n~14,000 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n119\n26 May 2021\n18:59\nF9 B5B1063.2\nCCSFS\nStarlink\n15,600 kg\nLEO\nSpaceX\nSuccess\n\nSuccess\n******\n120\n3 June 2021\n17:29\nF9 B5B1067.1\nKSC\nSpaceX CRS-22\n3,328 kg\nLEO\nNASA\nSuccess\n\nSuccess\n******\n121\n6 June 2021\n04:26\nF9 B5\nCCSFS\nSXM-8\n7,000 kg\nGTO\nSirius XM\nSuccess\n\nSuccess\n******\n" ] ], [ [ "After we have filled in the parsed launch record values into `launch_dict`, we can create a dataframe from it.\n", "_____no_output_____" ] ], [ [ "df=pd.DataFrame(launch_dict)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0a8f23b8842a26ff4f68adad7e20fbba1ca722d
2,778
ipynb
Jupyter Notebook
notebook - machine learning sklearn/ipython notebook/2017-25-11-so-preprocessing-encoding-categorical-features-sklearn.ipynb
bjfisica/MachineLearning
20349301ae7f82cd5048410b0cf1f7a5f7d7e5a2
[ "MIT" ]
52
2019-02-15T16:37:13.000Z
2022-02-17T18:34:30.000Z
notebook - machine learning sklearn/ipython notebook/2017-25-11-so-preprocessing-encoding-categorical-features-sklearn.ipynb
ariffyasri/Complete-Data-Science-Toolkits
8a65587c548c412b91d4cb7263ed5e56b249be8a
[ "MIT" ]
null
null
null
notebook - machine learning sklearn/ipython notebook/2017-25-11-so-preprocessing-encoding-categorical-features-sklearn.ipynb
ariffyasri/Complete-Data-Science-Toolkits
8a65587c548c412b91d4cb7263ed5e56b249be8a
[ "MIT" ]
22
2017-11-25T23:42:16.000Z
2019-01-07T09:22:35.000Z
20.426471
65
0.512239
[ [ [ "## LabelEncoder\nEncode labels with value between 0 and n_classes-1.\n", "_____no_output_____" ] ], [ [ "import warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "from sklearn import preprocessing\n# call our labelEncoder class\nle = preprocessing.LabelEncoder()\n# fit our data\nle.fit([1, 2, 2, 6])\n# print classes\nle.classes_\n# transform\nle.transform([1, 1, 2, 6]) \n#print inverse data\nle.inverse_transform([0, 0, 1, 2])", "_____no_output_____" ], [ "le = preprocessing.LabelEncoder()\nle.fit([\"paris\", \"paris\", \"tokyo\", \"amsterdam\"])\n\nlist(le.classes_)\n\nle.transform([\"tokyo\", \"tokyo\", \"paris\"]) \n\n#list(le.inverse_transform([2, 2, 1]))", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ] ]
d0a8f87e4a9271f6441574e95e62692e8a6ced2e
43,162
ipynb
Jupyter Notebook
_notebooks/2022-01-02-plots.ipynb
pinkocto/dinonene
12c40016ba4f2e8419b327f43dec085f231cdb4c
[ "Apache-2.0" ]
null
null
null
_notebooks/2022-01-02-plots.ipynb
pinkocto/dinonene
12c40016ba4f2e8419b327f43dec085f231cdb4c
[ "Apache-2.0" ]
null
null
null
_notebooks/2022-01-02-plots.ipynb
pinkocto/dinonene
12c40016ba4f2e8419b327f43dec085f231cdb4c
[ "Apache-2.0" ]
null
null
null
96.993258
12,288
0.854872
[ [ [ "# 여러그림 그리기\n> 여러그림 그리기, Anscombe's quartet\n- toc: true\n- branch: master\n- badges: true\n- comments: true\n- author: dinonene\n- categories: [python]", "_____no_output_____" ], [ "`-` (1/2) 여러그림그리기\n\n`-` (2/2) Anscombe's quartet", "_____no_output_____" ], [ "### 여러그림 그리기", "_____no_output_____" ], [ "#### (1) 겹쳐그리기", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "x=np.arange(-5,5,0.1)\ny=2*x+np.random.normal(loc=0,scale=1,size=100)\nplt.plot(x,y,'.b')\nplt.plot(x,2*x,'--r')", "_____no_output_____" ] ], [ [ "#### (2) 따로그리기 -subplots", "_____no_output_____" ] ], [ [ "x=[1,2,3,4]\ny=[1,2,4,3]\n_, axs = plt.subplots(2,2)\naxs[0,0].plot(x,y,'o:r')\naxs[0,1].plot(x,y,'xb')\naxs[1,0].plot(x,y,'xm')\naxs[1,1].plot(x,y,'.--k')", "_____no_output_____" ] ], [ [ "> note: fmt = `[marker][line][color]`", "_____no_output_____" ], [ "```python\nplt.subplots??\n\n # using the variable ax for single a Axes\n fig, ax = plt.subplots()\n\n # using the variable axs for multiple Axes\n fig, axs = plt.subplots(2, 2)\n\n # using tuple unpacking for multiple Axes\n fig, (ax1, ax2) = plt.subplots(1, 2)\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n \n \n \n fig = figure(**fig_kw)\n axs = fig.subplots(nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey,\n squeeze=squeeze, subplot_kw=subplot_kw,\n gridspec_kw=gridspec_kw)\n return fig, axs\n```", "_____no_output_____" ], [ "- subplots의 리턴값이 $\\tt{(fig,axs)}$ 이 나오게 된다. 우리는 뒤의 axs만 관심이 있으므로 앞의 fig는 `_`로 처리한다.", "_____no_output_____" ], [ "### Anscombe's quartet", "_____no_output_____" ], [ "`-` 교훈: 데이터를 분석하기 전에 항상 시각화를 하라.", "_____no_output_____" ] ], [ [ "x = [10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5]\ny1 = [8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68]\ny2 = [9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74]\ny3 = [7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73]\nx4 = [8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8]\ny4 = [6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89]", "_____no_output_____" ], [ "_, axs = plt.subplots(2,2)\naxs[0,0].plot(x,y1,'o', color='orange')\naxs[0,1].plot(x,y2,'o', color='orange')\naxs[1,0].plot(x,y3,'o', color='orange')\naxs[1,1].plot(x4,y4,'o', color='orange')", "_____no_output_____" ] ], [ [ "`-` 상관계수를 잠깐 복습해보자.\n\n- 상관계수는 -1~1 사이의 값을 가진다. (코쉬슈바르츠 부등식을 사용하여 증명가능)\n- 완전한 직선이라면 상관계수가 1 또는 -1이다.\n- 상관계수가 1에 가까우면 양의 상관관계에 있다고 말하고 -1에 가까우면 음의 상관관계에 있다고 말한다.", "_____no_output_____" ], [ "`-` 의문: 자료의 모양이 직선모양에 가까우면 상관계수가 큰 것이 맞나?\n\n- $x,y$ 값이 모두 큰 하나의 관측치가 상관계수값을 키울 수 있지 않나?", "_____no_output_____" ], [ "`-` 상관계수가 좋은 것은 맞나? (=상관계수는 두 변수의 관계를 설명하기에 충분히 적절한 통계량인가?)", "_____no_output_____" ] ], [ [ "n=len(x)\nxtilde = (x-np.mean(x)) / (np.std(x)*np.sqrt(n)) ## x표준화\ny1tilde = (y1-np.mean(y1)) / (np.std(y1)*np.sqrt(n)) ## y1표준화", "_____no_output_____" ], [ "sum(xtilde*y1tilde)", "_____no_output_____" ], [ "np.corrcoef(x,y1)", "_____no_output_____" ], [ "np.corrcoef([x,y1,y2,y3])", "_____no_output_____" ], [ "np.corrcoef([x4,y4])", "_____no_output_____" ] ], [ [ "`-` 위의 4개의 그림에 대한 상관계수는 모두 같다. (0.81652)", "_____no_output_____" ], [ "`-` 상관계수는 두 변수의 관계를 설명하기에 부적절하다.", "_____no_output_____" ], [ "- 상관계수는 1번그림과 같이 두 변수가 선형관계에 있을 때 그 정도를 나타내는 통계량일 뿐이다.\n- 선형관계가 아닌 것처럼 보이는 자료에서 상관계수를 계산할 수 있겠으나 의미가 없다.", "_____no_output_____" ], [ "`-` 교훈2: 기본적인 통계량들은 실제자료를 분석하기에 부적절할 수 있다. (=통계량은 적절한 가정이 동반되어야 의미가 있다.)", "_____no_output_____" ], [ "> note: 통계학자는 (1) 적절한 가정을 수학적인 언어로 정의하고 (2) 그 가정하에서 통계량이 의미있다는 것을 증명해야 한다. (3) 그리고 그 결과를 시각화하여 설득한다.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0a90b6da1c271d83505e1d8a2f5ccc88e708fbc
13,117
ipynb
Jupyter Notebook
LSTM-Attention.ipynb
d1sharma/lign167
5803d477aee284a4991d7597c5472f41d4d01a56
[ "Apache-2.0" ]
null
null
null
LSTM-Attention.ipynb
d1sharma/lign167
5803d477aee284a4991d7597c5472f41d4d01a56
[ "Apache-2.0" ]
null
null
null
LSTM-Attention.ipynb
d1sharma/lign167
5803d477aee284a4991d7597c5472f41d4d01a56
[ "Apache-2.0" ]
null
null
null
32.14951
394
0.501258
[ [ [ "# Basic Modules for data and text processing\nimport pandas as pd\nimport numpy as np\nimport string\nimport nltk\nimport re\nfrom nltk.corpus import stopwords\nfrom nltk.stem.snowball import SnowballStemmer\nfrom sklearn.model_selection import train_test_split\n\n# Keras Modules \nfrom keras.preprocessing.text import Tokenizer # This tokenizes the text\nfrom keras.preprocessing.sequence import pad_sequences # This equalises the input we want to give\nfrom keras.models import Sequential # We will build sequential models only\nfrom keras.layers import Dense, Flatten, LSTM, Conv1D, MaxPooling1D, Dropout, Activation # All of the layers of our model\nfrom keras.layers.embeddings import Embedding # How to build our word vectors\n\n# Plotly\nimport plotly.offline as py\nimport plotly.graph_objs as go\npy.init_notebook_mode(connected=True)", "Using TensorFlow backend.\n" ] ], [ [ "## Data Processing Stuff", "_____no_output_____" ] ], [ [ "df = pd.read_csv('D:\\Data\\processed.csv',delimiter = \"\\t\",error_bad_lines=False)", "_____no_output_____" ], [ "print(df.head(1))", " Unnamed: 0 Unnamed: 0.1 To Subject \\\n0 0 0 frozenset({'[email protected]'}) NaN \n\n content user labeled rep \n0 Here is our forecast allen-p False 0 \n" ], [ "# Clearning the Dataframe a bit\ndf = df.drop(['Unnamed: 0','Unnamed: 0.1', 'Subject','user','labeled','To'], axis=1)\ndf['label'] = df['rep']\ndf = df.drop(['rep'],axis=1)\n#df = df[df.label != 'unsup']\nprint(df.shape)", "(100000, 2)\n" ], [ "# Text Normalising Function\n\ndef clean_text(text):\n \n ## Remove puncuation \n text = text.translate(string.punctuation)\n \n ## Convert Words to lower case and split them\n text = text.lower().split()\n \n ## Remove stop words (commonly used stuff eg, is and was)\n stops = set(stopwords.words(\"english\"))\n text = [w for w in text if not w in stops and len(w) >= 3]\n \n text = \" \".join(text) \n \n # Common Dictionary Corpus\n text = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", text)\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\",\", \" \", text)\n text = re.sub(r\"\\.\", \" \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\/\", \" \", text)\n text = re.sub(r\"\\^\", \" ^ \", text)\n text = re.sub(r\"\\+\", \" + \", text)\n text = re.sub(r\"\\-\", \" - \", text)\n text = re.sub(r\"\\=\", \" = \", text)\n text = re.sub(r\"'\", \" \", text)\n text = re.sub(r\"(\\d+)(k)\", r\"\\g<1>000\", text)\n text = re.sub(r\":\", \" : \", text)\n text = re.sub(r\" e g \", \" eg \", text)\n text = re.sub(r\" b g \", \" bg \", text)\n text = re.sub(r\" u s \", \" american \", text)\n text = re.sub(r\"\\0s\", \"0\", text)\n text = re.sub(r\" 9 11 \", \"911\", text)\n text = re.sub(r\"e - mail\", \"email\", text)\n text = re.sub(r\"j k\", \"jk\", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n \n ## Stemming\n text = text.split()\n stemmer = SnowballStemmer('english')\n stemmed_words = [stemmer.stem(word) for word in text]\n text = \" \".join(stemmed_words)\n \n return text", "_____no_output_____" ], [ "# Clear some text\nprint(df.shape)\n\n# Drop empty rows (NaN)\ndf = df.dropna()\n\n# Using the text cleaning function\ndf['content'] = df['content'].map(lambda x: clean_text(x))\n\nprint(df.shape)", "(100000, 2)\n(99598, 2)\n" ], [ "## Creating Sequences\nvocabulary_size = 25000\ntokenizer = Tokenizer(num_words = vocabulary_size)\ntokenizer.fit_on_texts(df['content'])\n\nsequences = tokenizer.texts_to_sequences(df['content'])\ndata = pad_sequences(sequences,maxlen=70)", "_____no_output_____" ], [ "# Now splitting training and testing data\n\nprint(\"Reached here\")\n\nX_train, X_test, y_train, y_test = train_test_split(data, df['label'], test_size=.25)\nprint(\"X_train {} \\n y_train {}\".format(X_train.shape,y_train.shape))\n#import csv\n#csv.field_size_limit()\n#csv.field_size_limit(10000000)", "Reached here\nX_train (74698, 70) \n y_train (74698,)\n" ] ], [ [ "## Neural Architecture begins here", "_____no_output_____" ] ], [ [ "# Defining the Model\n\nmodel = Sequential()\nmodel.add(Embedding(vocabulary_size,140,input_length=70))\nmodel.add(LSTM(140, dropout =0.2, recurrent_dropout=0.3))\nmodel.add(Activation('softmax'))\nmodel.add(Dense(1, activation='sigmoid'))\n\nprint(model.summary())", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_1 (Embedding) (None, 70, 140) 3500000 \n_________________________________________________________________\nlstm_1 (LSTM) (None, 140) 157360 \n_________________________________________________________________\nactivation_1 (Activation) (None, 140) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 1) 141 \n=================================================================\nTotal params: 3,657,501\nTrainable params: 3,657,501\nNon-trainable params: 0\n_________________________________________________________________\nNone\n" ], [ "# Compile the model\n\nmodel.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy'])", "_____no_output_____" ], [ "# Fit and Train the Model \n\n## HyperParameters\nbatch_size = 7000\nnum_epochs = 10\n\n# Making validation set\nX_valid, y_valid = X_train[:batch_size], y_train[:batch_size]\nX_train2, y_train2 = X_train[batch_size:], y_train[batch_size:]\n\nmodel.fit(X_train2,y_train2, validation_data=(X_valid,y_valid), batch_size=batch_size,epochs=num_epochs)", "Train on 67698 samples, validate on 7000 samples\nEpoch 1/10\n67698/67698 [==============================] - 22s 321us/step - loss: 0.6885 - acc: 0.8342 - val_loss: 0.6835 - val_acc: 0.8329\nEpoch 2/10\n67698/67698 [==============================] - 22s 319us/step - loss: 0.6735 - acc: 0.8342 - val_loss: 0.6622 - val_acc: 0.8329\nEpoch 3/10\n67698/67698 [==============================] - 22s 318us/step - loss: 0.6562 - acc: 0.8342 - val_loss: 0.6484 - val_acc: 0.8329\nEpoch 4/10\n67698/67698 [==============================] - 22s 320us/step - loss: 0.6445 - acc: 0.8342 - val_loss: 0.6406 - val_acc: 0.8329\nEpoch 5/10\n67698/67698 [==============================] - 22s 319us/step - loss: 0.6376 - acc: 0.8342 - val_loss: 0.6344 - val_acc: 0.8329\nEpoch 6/10\n67698/67698 [==============================] - 22s 319us/step - loss: 0.6316 - acc: 0.8342 - val_loss: 0.6287 - val_acc: 0.8329\nEpoch 7/10\n67698/67698 [==============================] - 22s 318us/step - loss: 0.6259 - acc: 0.8342 - val_loss: 0.6231 - val_acc: 0.8329\nEpoch 8/10\n67698/67698 [==============================] - 22s 318us/step - loss: 0.6205 - acc: 0.8342 - val_loss: 0.6178 - val_acc: 0.8329\nEpoch 9/10\n67698/67698 [==============================] - 22s 319us/step - loss: 0.6151 - acc: 0.8342 - val_loss: 0.6126 - val_acc: 0.8329\nEpoch 10/10\n67698/67698 [==============================] - 22s 320us/step - loss: 0.6100 - acc: 0.8342 - val_loss: 0.6076 - val_acc: 0.8329\n" ], [ "# Finding the accuracy\n\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"Test Accuracy: \",scores[1])", "Test Accuracy: 0.832570281124498\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d0a90be17a8910b601fb45c0432ca9b8a1db03de
266,909
ipynb
Jupyter Notebook
Covid_19_india_analysis (4).ipynb
singhrau0/Covid_19_india_predictor
cfef36f2decb57ab897837ba3ec4a43b2f5779e3
[ "Apache-2.0" ]
null
null
null
Covid_19_india_analysis (4).ipynb
singhrau0/Covid_19_india_predictor
cfef36f2decb57ab897837ba3ec4a43b2f5779e3
[ "Apache-2.0" ]
null
null
null
Covid_19_india_analysis (4).ipynb
singhrau0/Covid_19_india_predictor
cfef36f2decb57ab897837ba3ec4a43b2f5779e3
[ "Apache-2.0" ]
null
null
null
168.609602
35,888
0.85815
[ [ [ "# Importing Important File regarding Analysis", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\nimport datetime as dt\nimport time\nimport ipywidgets\n#from ipython.display import display\n", "_____no_output_____" ], [ "data1 = pd.read_csv(\"covid_19_india (1).csv\",dayfirst=True)", "_____no_output_____" ], [ "data1", "_____no_output_____" ] ], [ [ "# Data cleaning of Covid 19 india", "_____no_output_____" ] ], [ [ "missing_values = [\"NaN\",\"na\",\"Na\",\"-\"]\ndata1 = pd.read_csv(\"covid_19_india.csv\",na_values= missing_values)", "_____no_output_____" ], [ "data1.isnull().sum()", "_____no_output_____" ], [ "#Now we have seen that \"-\" means empty values. Now we gonna fill this empty value with Zero because in terms of corona cases \n# it does not effect on our data", "_____no_output_____" ], [ "data1 = data1.fillna(0)", "_____no_output_____" ], [ "data1.loc[445:446]", "_____no_output_____" ], [ "data1.rename(columns = {\"State/UnionTerritory\":\"state\",\"ConfirmedIndianNational\":\"confirmed_indian\",\"ConfirmedForeignNational\":\"confirmed_foreign\"},inplace = True)", "_____no_output_____" ], [ "#data1.isnull().sum()", "_____no_output_____" ], [ "#chaging date as date type", "_____no_output_____" ], [ "data1.head()", "_____no_output_____" ], [ "data1[\"Date\"] = pd.to_datetime(data1[\"Date\"])\ndata1[\"confirmed_indian\"] = pd.to_numeric(data1[\"confirmed_indian\"])", "_____no_output_____" ], [ "data1.dtypes", "_____no_output_____" ], [ "# Now we are going to analyise state wise data of covid", "_____no_output_____" ], [ "data1 = data1[[\"Date\",\"state\",\"Cured\",\"Deaths\",\"Confirmed\"]]", "_____no_output_____" ], [ "data1.columns = [\"date\",\"state\",\"cured\",\"death\",\"confirmed\"]", "_____no_output_____" ], [ "data1.head()", "_____no_output_____" ], [ "data1.tail()", "_____no_output_____" ], [ "#here we are inserting rows of entire india cases", "_____no_output_____" ], [ "temp_data = data1\ntemp_new = data1\ntemp_local = data1.date.unique()\nn = len(temp_local)\nfor i in range(n):\n temp_data = data1[data1.date == temp_local[i]]\n #print(temp_local[50],temp_data)\n x = temp_data.cured.sum()\n y = temp_data.death.sum()\n z = temp_data.confirmed.sum()\n #print(x,y,z)\n #temp_df = {\"state\":\"india\",}\n temp_new.loc[i] = [temp_local[i],\"india\",x,y,z]", "_____no_output_____" ], [ "# As we can clearly see that the latest dates of data we have is of 8 may 2021 so we are going to till that day\n", "_____no_output_____" ], [ "f = len(data1.index)\n#print(f)\nz = data1.date.values[f-1]\n#print(z)\nlatest_date = data1[data1.date == z ]", "_____no_output_____" ], [ "latest_date.head()", "_____no_output_____" ], [ "latest_date = latest_date.sort_values(by='confirmed',ascending=False)", "_____no_output_____" ], [ "latest_date.head(10)", "_____no_output_____" ], [ "#Here above we have top most confirmed cases according to state", "_____no_output_____" ], [ "#Now we are going to see graphical representation of confirmed cases", "_____no_output_____" ], [ "x = 0\ny = 0", "_____no_output_____" ], [ "state = latest_date.state", "_____no_output_____" ] ], [ [ "# State v/s Confirmed cases in India by Graph", "_____no_output_____" ] ], [ [ "for i in range(len(state)):\n new_date = latest_date[x:y+6]\n x+=6\n y+=6\n sns.set(rc = {'figure.figsize':(20,10)})\n sns.barplot(x = \"state\", y = \"confirmed\", data = new_date, hue = \"state\")\n plt.show()\n #time.sleep(1)\n ", "_____no_output_____" ], [ "#Start date of cases is from 30-01-2020 and end date is 08-05-2021 of our data ", "_____no_output_____" ] ], [ [ "# Visualising the statical data of covid 19 India", "_____no_output_____" ], [ "# Statewise Visualisation of (Confirmed vs Cured vs Death data of covid)", "_____no_output_____" ] ], [ [ "#Here we are making Daily cases column in our dataset and returning new dataframe named as result_data", "_____no_output_____" ], [ "list1 = []\nfor i in range(len(state)):\n new_data = data1[data1.state == state.values[i]]\n x = 0\n list = []\n new_data = new_data.reset_index()\n new_data4 = new_data.confirmed.values[0]\n for j in range(len(new_data.confirmed)):\n #print(\"hello\",j)\n new_data1 = new_data.confirmed.values[j]\n if new_data1 == new_data4:\n new_data4 -= 10 \n #print(\"fuck\",new_data1)\n list.append(new_data1)\n if new_data.index.stop != j+1:\n new_data3 = new_data.confirmed.values[j+1] - new_data1\n list.append(new_data3)\n #print(list)\n #print(\"here\",len(list))\n new_data_state = new_data\n new_data_state.insert(6,\"daily_cases\",list)\n list1.append(new_data_state)\n result_data = pd.concat(list1)\n result_data = result_data.reset_index()", "_____no_output_____" ], [ "#Here we are using the ipywidgets of library to make our data more handy to use for users , \n#so they can select the state and look for the data", "_____no_output_____" ], [ "temp_state = latest_date.state\nnew_temp_state = ipywidgets.Dropdown(\n options=temp_state,\n #options = temp_value\n value=temp_state.values[0],\n description='Select state:',\n disabled=False,\n )\n#print(new_temp_state.value)\n#print(data_select2.value)\n#display(new_temp_state)\ndef select (select):\n temp2_state = result_data[result_data.state == new_temp_state.value]\n temp_value = [\"confirmed\",\"death\",\"cured\",\"daily_cases\"]\n data_select2 = ipywidgets.Dropdown(\n options= temp_value,\n value=temp_value[0],\n description='Select option:',\n disabled=False,\n )\n \n #print(\"first\",new_temp_state)\n #print(\"second\",temp2_state)\n def data_select(data_select):\n \n sns.set(rc = {'figure.figsize':(15,10)})\n if data_select2.value == \"confirmed\":\n plot1 = sns.lineplot(x = \"date\",y = \"confirmed\", data = temp2_state,color = \"g\")\n elif data_select2.value == \"cured\": \n plot2 = sns.lineplot(x = \"date\",y = \"cured\", data = temp2_state, color = \"b\")\n elif data_select2.value ==\"death\":\n plot3 = sns.lineplot(x = \"date\",y = \"death\", data = temp2_state, color = \"r\")\n elif data_select2.value ==\"daily_cases\":\n plot3 = sns.lineplot(x = \"date\",y = \"daily_cases\", data = temp2_state, color = \"y\")\n #plt.title(new_temp_state)\n data_select(data_select2)\n ipywidgets.interact(data_select, data_select = data_select2)", "_____no_output_____" ], [ "# Here Green Line indicating Confirmed Cases of Covid in that state\n# Here Blue Line indicating Cured Cases of Covid in that state\n# Here red indicating death Cases of Covid in that state", "_____no_output_____" ] ], [ [ "# please select the state and different cases option to visualise your data", "_____no_output_____" ] ], [ [ "ipywidgets.interact(select, select = new_temp_state)", "_____no_output_____" ] ], [ [ "# Here we are making our prediction based on each state", "_____no_output_____" ] ], [ [ "#using ipywidgets library to create dropdown option for user to easily select the query\ntemp_value = [\"confirmed\",\"death\",\"cured\",\"daily_cases\"]\ndata_select3 = ipywidgets.Dropdown(\n options= temp_value,\n value=temp_value[0],\n description='Select option:',\n disabled=False,\n )\n#making our ultimate state predictor function to predict our data statewise\ndef ultimate_prediction(predict):\n #creating datepicker for user\n select_date = ipywidgets.DatePicker(\n description='Pick a Date',\n disabled=False\n )\n #creating predict_data function for returning the future cases depend upon the user selection\n def predict_data(prediction):\n from sklearn.model_selection import train_test_split\n #print(\"first\",new_temp_state)\n temp_state_value = result_data[result_data.state == new_temp_state.value]\n temp_state_value['date'] = temp_state_value['date'].map(dt.datetime.toordinal)\n #print(\"second\",temp_state_value['date'])\n #temp_state_value.head()\n x = temp_state_value['date']\n temp = data_select3.value\n #print(temp)\n y = temp_state_value[temp]\n x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3)\n from sklearn.ensemble import RandomForestRegressor\n from sklearn.linear_model import LinearRegression\n rf = RandomForestRegressor()\n lr = LinearRegression()\n rf.fit(np.array(x_train).reshape(-1,1),np.array(y_train).reshape(-1,1))\n lr.fit(np.array(x_train).reshape(-1,1),np.array(y_train).reshape(-1,1))\n choose_date = select_date.value\n h = None\n #print(choose_date)\n choose_date2 = dt.date.today()\n choose_date2 = choose_date2.toordinal()\n if choose_date == h:\n result1 = rf.predict([[choose_date2]])\n result2 = lr.predict([[choose_date2]])\n \n if choose_date != h:\n choose_date = choose_date.toordinal()\n #print(choose_date)\n result1 = rf.predict([[choose_date]])\n result2 = lr.predict([[choose_date]]) \n result1 = result1.astype(int)\n result2 = result2.astype(int)\n print('Output from Random Forest Regressor is :',result1)\n print('Output from Linear Regression Model is :',result2)\n return None\n ipywidgets.interact(predict_data, prediction = select_date)\nnew_temp_state ", "_____no_output_____" ] ], [ [ "# *please select the option to avoid error", "_____no_output_____" ], [ "# *please note following things", "_____no_output_____" ], [ "### Confirmed cases is total confirmed cases untill selected day.\n### Death cases is total death cases untill selected day.\n### Cured cases is total Cures cases untill selected day.\n### daily cases is the cases only of that day you select.", "_____no_output_____" ] ], [ [ "ipywidgets.interact(ultimate_prediction, predict = data_select3)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ] ]
d0a91421acc6ac45dbe8f0167354b144efbef34a
46,849
ipynb
Jupyter Notebook
code.ipynb
mihir-m-gandhi/Log-Based-Intrusion-Detection
d6d79cdfb6f57e7cd5f1e137c0f7590ce7caad41
[ "MIT" ]
4
2021-02-09T09:40:05.000Z
2021-02-16T17:00:03.000Z
code.ipynb
mihir-m-gandhi/Log-Based-Intrusion-Detection
d6d79cdfb6f57e7cd5f1e137c0f7590ce7caad41
[ "MIT" ]
null
null
null
code.ipynb
mihir-m-gandhi/Log-Based-Intrusion-Detection
d6d79cdfb6f57e7cd5f1e137c0f7590ce7caad41
[ "MIT" ]
null
null
null
45.840509
1,445
0.325493
[ [ [ "import pandas as pd\nimport numpy as np\nimport sys\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\nfrom sklearn.feature_selection import RFE\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn import preprocessing", "_____no_output_____" ], [ "col_names = [\"duration\",\"protocol_type\",\"service\",\"flag\",\"src_bytes\",\n \"dst_bytes\",\"land\",\"wrong_fragment\",\"urgent\",\"hot\",\"num_failed_logins\",\n \"logged_in\",\"num_compromised\",\"root_shell\",\"su_attempted\",\"num_root\",\n \"num_file_creations\",\"num_shells\",\"num_access_files\",\"num_outbound_cmds\",\n \"is_host_login\",\"is_guest_login\",\"count\",\"srv_count\",\"serror_rate\",\n \"srv_serror_rate\",\"rerror_rate\",\"srv_rerror_rate\",\"same_srv_rate\",\n \"diff_srv_rate\",\"srv_diff_host_rate\",\"dst_host_count\",\"dst_host_srv_count\",\n \"dst_host_same_srv_rate\",\"dst_host_diff_srv_rate\",\"dst_host_same_src_port_rate\",\n \"dst_host_srv_diff_host_rate\",\"dst_host_serror_rate\",\"dst_host_srv_serror_rate\",\n \"dst_host_rerror_rate\",\"dst_host_srv_rerror_rate\",\"label\"]\n\n\ndf = pd.read_csv(\"KDDTrain+_2.csv\", header=None, names = col_names)\ndf_test = pd.read_csv(\"KDDTest+_2.csv\", header=None, names = col_names)\n", "_____no_output_____" ], [ "categorical_columns=['protocol_type', 'service', 'flag']\n# insert code to get a list of categorical columns into a variable, categorical_columns\ncategorical_columns=['protocol_type', 'service', 'flag'] \n # Get the categorical values into a 2D numpy array\ndf_categorical_values = df[categorical_columns]\ntestdf_categorical_values = df_test[categorical_columns]\ndf_categorical_values.head()", "_____no_output_____" ], [ "# protocol type\nunique_protocol=sorted(df.protocol_type.unique())\nstring1 = 'Protocol_type_'\nunique_protocol2=[string1 + x for x in unique_protocol]\n# service\nunique_service=sorted(df.service.unique())\nstring2 = 'service_'\nunique_service2=[string2 + x for x in unique_service]\n# flag\nunique_flag=sorted(df.flag.unique())\nstring3 = 'flag_'\nunique_flag2=[string3 + x for x in unique_flag]\n# put together\ndumcols=unique_protocol2 + unique_service2 + unique_flag2\nprint(dumcols)\n\n#do same for test set\nunique_service_test=sorted(df_test.service.unique())\nunique_service2_test=[string2 + x for x in unique_service_test]\ntestdumcols=unique_protocol2 + unique_service2_test + unique_flag2", "['Protocol_type_icmp', 'Protocol_type_tcp', 'Protocol_type_udp', 'service_IRC', 'service_X11', 'service_Z39_50', 'service_aol', 'service_auth', 'service_bgp', 'service_courier', 'service_csnet_ns', 'service_ctf', 'service_daytime', 'service_discard', 'service_domain', 'service_domain_u', 'service_echo', 'service_eco_i', 'service_ecr_i', 'service_efs', 'service_exec', 'service_finger', 'service_ftp', 'service_ftp_data', 'service_gopher', 'service_harvest', 'service_hostnames', 'service_http', 'service_http_2784', 'service_http_443', 'service_http_8001', 'service_imap4', 'service_iso_tsap', 'service_klogin', 'service_kshell', 'service_ldap', 'service_link', 'service_login', 'service_mtp', 'service_name', 'service_netbios_dgm', 'service_netbios_ns', 'service_netbios_ssn', 'service_netstat', 'service_nnsp', 'service_nntp', 'service_ntp_u', 'service_other', 'service_pm_dump', 'service_pop_2', 'service_pop_3', 'service_printer', 'service_private', 'service_red_i', 'service_remote_job', 'service_rje', 'service_shell', 'service_smtp', 'service_sql_net', 'service_ssh', 'service_sunrpc', 'service_supdup', 'service_systat', 'service_telnet', 'service_tftp_u', 'service_tim_i', 'service_time', 'service_urh_i', 'service_urp_i', 'service_uucp', 'service_uucp_path', 'service_vmnet', 'service_whois', 'flag_OTH', 'flag_REJ', 'flag_RSTO', 'flag_RSTOS0', 'flag_RSTR', 'flag_S0', 'flag_S1', 'flag_S2', 'flag_S3', 'flag_SF', 'flag_SH']\n" ], [ "df_categorical_values_enc=df_categorical_values.apply(LabelEncoder().fit_transform)\nprint(df_categorical_values_enc.head())\n# test set\ntestdf_categorical_values_enc=testdf_categorical_values.apply(LabelEncoder().fit_transform)", " protocol_type service flag\n0 1 20 9\n1 2 44 9\n2 1 49 5\n3 1 24 9\n4 1 24 9\n" ], [ "enc = OneHotEncoder()\ndf_categorical_values_encenc = enc.fit_transform(df_categorical_values_enc)\ndf_cat_data = pd.DataFrame(df_categorical_values_encenc.toarray(),columns=dumcols)\n# test set\ntestdf_categorical_values_encenc = enc.fit_transform(testdf_categorical_values_enc)\ntestdf_cat_data = pd.DataFrame(testdf_categorical_values_encenc.toarray(),columns=testdumcols)\n\ndf_cat_data.head()", "/usr/local/lib/python3.7/site-packages/sklearn/preprocessing/_encoders.py:371: FutureWarning: The handling of integer data will change in version 0.22. Currently, the categories are determined based on the range [0, max(values)], while in the future they will be determined based on the unique values.\nIf you want the future behaviour and silence this warning, you can specify \"categories='auto'\".\nIn case you used a LabelEncoder before this OneHotEncoder to convert the categories to integers, then you can now use the OneHotEncoder directly.\n warnings.warn(msg, FutureWarning)\n/usr/local/lib/python3.7/site-packages/sklearn/preprocessing/_encoders.py:371: FutureWarning: The handling of integer data will change in version 0.22. Currently, the categories are determined based on the range [0, max(values)], while in the future they will be determined based on the unique values.\nIf you want the future behaviour and silence this warning, you can specify \"categories='auto'\".\nIn case you used a LabelEncoder before this OneHotEncoder to convert the categories to integers, then you can now use the OneHotEncoder directly.\n warnings.warn(msg, FutureWarning)\n" ], [ "trainservice=df['service'].tolist()\ntestservice= df_test['service'].tolist()\ndifference=list(set(trainservice) - set(testservice))\nstring = 'service_'\ndifference=[string + x for x in difference]", "_____no_output_____" ], [ "for col in difference:\n testdf_cat_data[col] = 0\n\ntestdf_cat_data.shape", "_____no_output_____" ], [ "newdf=df.join(df_cat_data)\nnewdf.drop('flag', axis=1, inplace=True)\nnewdf.drop('protocol_type', axis=1, inplace=True)\nnewdf.drop('service', axis=1, inplace=True)\n# test data\nnewdf_test=df_test.join(testdf_cat_data)\nnewdf_test.drop('flag', axis=1, inplace=True)\nnewdf_test.drop('protocol_type', axis=1, inplace=True)\nnewdf_test.drop('service', axis=1, inplace=True)\nprint(newdf.shape)\nprint(newdf_test.shape)", "(125973, 123)\n(22544, 123)\n" ], [ "# take label column\nlabeldf=newdf['label']\nlabeldf_test=newdf_test['label']\n# change the label column\nnewlabeldf=labeldf.replace({ 'normal' : 0, 'neptune' : 1 ,'back': 1, 'land': 1, 'pod': 1, 'smurf': 1, 'teardrop': 1,'mailbomb': 1, 'apache2': 1, 'processtable': 1, 'udpstorm': 1, 'worm': 1,\n 'ipsweep' : 2,'nmap' : 2,'portsweep' : 2,'satan' : 2,'mscan' : 2,'saint' : 2\n ,'ftp_write': 3,'guess_passwd': 3,'imap': 3,'multihop': 3,'phf': 3,'spy': 3,'warezclient': 3,'warezmaster': 3,'sendmail': 3,'named': 3,'snmpgetattack': 3,'snmpguess': 3,'xlock': 3,'xsnoop': 3,'httptunnel': 3,\n 'buffer_overflow': 4,'loadmodule': 4,'perl': 4,'rootkit': 4,'ps': 4,'sqlattack': 4,'xterm': 4})\nnewlabeldf_test=labeldf_test.replace({ 'normal' : 0, 'neptune' : 1 ,'back': 1, 'land': 1, 'pod': 1, 'smurf': 1, 'teardrop': 1,'mailbomb': 1, 'apache2': 1, 'processtable': 1, 'udpstorm': 1, 'worm': 1,\n 'ipsweep' : 2,'nmap' : 2,'portsweep' : 2,'satan' : 2,'mscan' : 2,'saint' : 2\n ,'ftp_write': 3,'guess_passwd': 3,'imap': 3,'multihop': 3,'phf': 3,'spy': 3,'warezclient': 3,'warezmaster': 3,'sendmail': 3,'named': 3,'snmpgetattack': 3,'snmpguess': 3,'xlock': 3,'xsnoop': 3,'httptunnel': 3,\n 'buffer_overflow': 4,'loadmodule': 4,'perl': 4,'rootkit': 4,'ps': 4,'sqlattack': 4,'xterm': 4})\n# put the new label column back\nnewdf['label'] = newlabeldf\nnewdf_test['label'] = newlabeldf_test\nprint(newdf['label'].head())", "0 0\n1 0\n2 1\n3 0\n4 0\nName: label, dtype: object\n" ], [ "to_drop_DoS = [2,3,4]\n\nDoS_df=newdf[~newdf['label'].isin(to_drop_DoS)];\n\n#test\nDoS_df_test=newdf_test[~newdf_test['label'].isin(to_drop_DoS)];\nprint('Train:')\nprint('Dimensions of DoS:' ,DoS_df.shape)\nprint('Test:')\nprint('Dimensions of DoS:' ,DoS_df_test.shape)\n", "Train:\nDimensions of DoS: (113270, 123)\nTest:\nDimensions of DoS: (17171, 123)\n" ], [ "X_DoS = DoS_df.drop('label',1)\nY_DoS = DoS_df.label\nY_DoS=Y_DoS.astype('int')\n\nX_DoS_test = DoS_df_test.drop('label',1)\ncolNames=list(X_DoS)\ncolNames_test=list(X_DoS_test)\n\n", " duration src_bytes dst_bytes land wrong_fragment urgent hot \\\n0 0 0 0 0 0 0 0 \n1 0 0 0 0 0 0 0 \n2 2 12983 0 0 0 0 0 \n5 0 267 14515 0 0 0 0 \n6 0 1022 387 0 0 0 0 \n8 0 327 467 0 0 0 0 \n11 0 616 330 0 0 0 0 \n12 0 0 0 0 0 0 0 \n13 0 0 0 0 0 0 0 \n14 37 773 364200 0 0 0 0 \n15 0 350 3610 0 0 0 0 \n16 0 213 659 0 0 0 0 \n17 0 246 2090 0 0 0 0 \n18 0 45 44 0 0 0 0 \n19 0 0 0 0 0 0 0 \n20 0 0 0 0 0 0 0 \n22 0 196 1823 0 0 0 0 \n23 0 277 1816 0 0 0 0 \n24 0 0 0 0 0 0 0 \n25 0 0 0 0 0 0 0 \n26 0 294 6442 0 0 0 0 \n27 0 300 440 0 0 0 0 \n28 0 520 0 0 0 0 0 \n29 0 54 51 0 0 0 0 \n30 805 76944 1 0 0 0 0 \n31 0 720 281 0 0 0 0 \n32 0 301 19794 0 0 0 0 \n33 0 1 1 0 0 0 0 \n36 0 209 12894 0 0 0 0 \n38 0 43 71 0 0 0 0 \n... ... ... ... ... ... ... ... \n22508 0 336 1203 0 0 0 0 \n22510 0 1008 0 0 0 0 0 \n22511 0 235 892 0 0 0 0 \n22512 0 0 0 0 0 0 0 \n22514 0 115 0 0 0 0 0 \n22515 0 0 0 0 0 0 0 \n22516 0 322 396 0 0 0 0 \n22517 7498 0 44 0 0 0 0 \n22518 0 295 757 0 0 0 0 \n22519 8209 0 15 0 0 0 0 \n22522 0 54540 8314 0 0 0 2 \n22523 0 289 9522 0 0 0 0 \n22525 2064 55744 0 0 0 0 0 \n22526 0 169 4997 0 0 0 0 \n22527 0 236 16257 0 0 0 0 \n22528 0 1032 0 0 0 0 0 \n22529 0 9 139 0 0 0 0 \n22530 0 0 0 0 0 0 0 \n22531 0 0 0 0 0 0 0 \n22532 0 264 14839 0 0 0 0 \n22533 0 274 1623 0 0 0 0 \n22534 0 0 0 0 0 0 0 \n22535 0 280 6087 0 0 0 0 \n22536 0 0 0 0 0 0 0 \n22537 1 2599 293 0 0 0 0 \n22538 0 1032 0 0 0 0 0 \n22539 0 794 333 0 0 0 0 \n22540 0 317 938 0 0 0 0 \n22541 0 54540 8314 0 0 0 2 \n22542 0 42 42 0 0 0 0 \n\n num_failed_logins logged_in num_compromised ... flag_S2 flag_S3 \\\n0 0 0 0 ... 0.0 0.0 \n1 0 0 0 ... 0.0 0.0 \n2 0 0 0 ... 0.0 0.0 \n5 0 1 0 ... 0.0 0.0 \n6 0 1 0 ... 0.0 0.0 \n8 0 1 0 ... 0.0 0.0 \n11 0 1 0 ... 0.0 0.0 \n12 0 0 0 ... 0.0 0.0 \n13 0 0 0 ... 0.0 0.0 \n14 0 1 0 ... 0.0 0.0 \n15 0 1 0 ... 0.0 0.0 \n16 0 1 0 ... 0.0 0.0 \n17 0 1 0 ... 0.0 0.0 \n18 0 0 0 ... 0.0 0.0 \n19 0 0 0 ... 0.0 0.0 \n20 0 0 0 ... 0.0 0.0 \n22 0 1 0 ... 0.0 0.0 \n23 0 1 0 ... 0.0 0.0 \n24 0 0 0 ... 0.0 0.0 \n25 0 0 0 ... 0.0 0.0 \n26 0 1 0 ... 0.0 0.0 \n27 0 1 0 ... 0.0 0.0 \n28 0 0 0 ... 0.0 0.0 \n29 0 0 0 ... 0.0 0.0 \n30 0 1 0 ... 0.0 0.0 \n31 0 1 0 ... 0.0 0.0 \n32 0 1 0 ... 0.0 0.0 \n33 0 0 0 ... 0.0 0.0 \n36 0 1 0 ... 0.0 0.0 \n38 0 0 0 ... 0.0 0.0 \n... ... ... ... ... ... ... \n22508 0 1 0 ... 0.0 0.0 \n22510 0 0 0 ... 0.0 0.0 \n22511 0 1 0 ... 0.0 0.0 \n22512 0 0 0 ... 0.0 0.0 \n22514 0 0 0 ... 0.0 0.0 \n22515 0 0 0 ... 0.0 0.0 \n22516 0 1 0 ... 0.0 0.0 \n22517 0 0 0 ... 0.0 0.0 \n22518 0 1 0 ... 0.0 0.0 \n22519 0 0 0 ... 0.0 0.0 \n22522 0 1 1 ... 0.0 0.0 \n22523 0 1 0 ... 0.0 0.0 \n22525 0 1 0 ... 0.0 0.0 \n22526 0 1 0 ... 0.0 0.0 \n22527 0 1 0 ... 0.0 0.0 \n22528 0 0 0 ... 0.0 0.0 \n22529 0 0 0 ... 0.0 0.0 \n22530 0 0 0 ... 0.0 0.0 \n22531 0 0 0 ... 0.0 0.0 \n22532 0 1 0 ... 0.0 0.0 \n22533 0 1 0 ... 0.0 0.0 \n22534 0 0 0 ... 0.0 0.0 \n22535 0 1 0 ... 0.0 0.0 \n22536 0 0 0 ... 0.0 0.0 \n22537 0 1 0 ... 0.0 0.0 \n22538 0 0 0 ... 0.0 0.0 \n22539 0 1 0 ... 0.0 0.0 \n22540 0 1 0 ... 0.0 0.0 \n22541 0 1 1 ... 0.0 0.0 \n22542 0 0 0 ... 0.0 0.0 \n\n flag_SF flag_SH service_harvest service_http_8001 service_aol \\\n0 0.0 0.0 0 0 0 \n1 0.0 0.0 0 0 0 \n2 1.0 0.0 0 0 0 \n5 1.0 0.0 0 0 0 \n6 1.0 0.0 0 0 0 \n8 1.0 0.0 0 0 0 \n11 1.0 0.0 0 0 0 \n12 0.0 0.0 0 0 0 \n13 0.0 0.0 0 0 0 \n14 1.0 0.0 0 0 0 \n15 1.0 0.0 0 0 0 \n16 1.0 0.0 0 0 0 \n17 1.0 0.0 0 0 0 \n18 1.0 0.0 0 0 0 \n19 0.0 0.0 0 0 0 \n20 0.0 0.0 0 0 0 \n22 1.0 0.0 0 0 0 \n23 1.0 0.0 0 0 0 \n24 0.0 0.0 0 0 0 \n25 0.0 0.0 0 0 0 \n26 1.0 0.0 0 0 0 \n27 1.0 0.0 0 0 0 \n28 1.0 0.0 0 0 0 \n29 1.0 0.0 0 0 0 \n30 0.0 0.0 0 0 0 \n31 1.0 0.0 0 0 0 \n32 1.0 0.0 0 0 0 \n33 1.0 0.0 0 0 0 \n36 1.0 0.0 0 0 0 \n38 1.0 0.0 0 0 0 \n... ... ... ... ... ... \n22508 1.0 0.0 0 0 0 \n22510 1.0 0.0 0 0 0 \n22511 1.0 0.0 0 0 0 \n22512 0.0 0.0 0 0 0 \n22514 1.0 0.0 0 0 0 \n22515 0.0 0.0 0 0 0 \n22516 1.0 0.0 0 0 0 \n22517 1.0 0.0 0 0 0 \n22518 1.0 0.0 0 0 0 \n22519 1.0 0.0 0 0 0 \n22522 1.0 0.0 0 0 0 \n22523 1.0 0.0 0 0 0 \n22525 0.0 0.0 0 0 0 \n22526 1.0 0.0 0 0 0 \n22527 1.0 0.0 0 0 0 \n22528 1.0 0.0 0 0 0 \n22529 1.0 0.0 0 0 0 \n22530 0.0 0.0 0 0 0 \n22531 0.0 0.0 0 0 0 \n22532 1.0 0.0 0 0 0 \n22533 1.0 0.0 0 0 0 \n22534 0.0 0.0 0 0 0 \n22535 1.0 0.0 0 0 0 \n22536 0.0 0.0 0 0 0 \n22537 1.0 0.0 0 0 0 \n22538 1.0 0.0 0 0 0 \n22539 1.0 0.0 0 0 0 \n22540 1.0 0.0 0 0 0 \n22541 1.0 0.0 0 0 0 \n22542 1.0 0.0 0 0 0 \n\n service_urh_i service_red_i service_http_2784 \n0 0 0 0 \n1 0 0 0 \n2 0 0 0 \n5 0 0 0 \n6 0 0 0 \n8 0 0 0 \n11 0 0 0 \n12 0 0 0 \n13 0 0 0 \n14 0 0 0 \n15 0 0 0 \n16 0 0 0 \n17 0 0 0 \n18 0 0 0 \n19 0 0 0 \n20 0 0 0 \n22 0 0 0 \n23 0 0 0 \n24 0 0 0 \n25 0 0 0 \n26 0 0 0 \n27 0 0 0 \n28 0 0 0 \n29 0 0 0 \n30 0 0 0 \n31 0 0 0 \n32 0 0 0 \n33 0 0 0 \n36 0 0 0 \n38 0 0 0 \n... ... ... ... \n22508 0 0 0 \n22510 0 0 0 \n22511 0 0 0 \n22512 0 0 0 \n22514 0 0 0 \n22515 0 0 0 \n22516 0 0 0 \n22517 0 0 0 \n22518 0 0 0 \n22519 0 0 0 \n22522 0 0 0 \n22523 0 0 0 \n22525 0 0 0 \n22526 0 0 0 \n22527 0 0 0 \n22528 0 0 0 \n22529 0 0 0 \n22530 0 0 0 \n22531 0 0 0 \n22532 0 0 0 \n22533 0 0 0 \n22534 0 0 0 \n22535 0 0 0 \n22536 0 0 0 \n22537 0 0 0 \n22538 0 0 0 \n22539 0 0 0 \n22540 0 0 0 \n22541 0 0 0 \n22542 0 0 0 \n\n[17171 rows x 122 columns]\n" ], [ "# scaler1 = preprocessing.StandardScaler().fit(X_DoS)\n# X_DoS=scaler1.transform(X_DoS) \n\n# scaler5 = preprocessing.StandardScaler().fit(X_DoS_test)\n# X_DoS_test=scaler5.transform(X_DoS_test)", "_____no_output_____" ], [ "from sklearn.feature_selection import SelectPercentile, f_classif\nnp.seterr(divide='ignore', invalid='ignore');\nselector=SelectPercentile(f_classif, percentile=10)\nX_newDoS = selector.fit_transform(X_DoS,Y_DoS)\nX_newDoS.shape\n", "/usr/local/lib/python3.7/site-packages/sklearn/feature_selection/univariate_selection.py:114: UserWarning: Features [ 16 44 63 66 68 86 114] are constant.\n UserWarning)\n" ], [ "true=selector.get_support()\nnewcolindex_DoS=[i for i, x in enumerate(true) if x]\nprint(newcolindex_DoS)\n\nnewcolname_DoS=list( colNames[i] for i in newcolindex_DoS )\nnewcolname_DoS\n\n# newcolname_DoS_test=list( colNames_test[i] for i in newcolindex_DoS )\n# newcolname_DoS_test\n", "[8, 19, 21, 22, 25, 28, 29, 30, 34, 35, 65, 116, 120]\n" ], [ "from sklearn.feature_selection import RFE\nfrom sklearn.tree import DecisionTreeClassifier\n# Create a decision tree classifier. By convention, clf means 'classifier'\nclf = DecisionTreeClassifier(random_state=0)\n\n#rank all features, i.e continue the elimination until the last one\n# rfe = RFE(clf, n_features_to_select=1)\nclf.fit(X_newDoS, Y_DoS)", "_____no_output_____" ], [ "X_DoS_test=(X_DoS_test[['logged_in',\n 'count',\n 'serror_rate',\n 'srv_serror_rate',\n 'same_srv_rate',\n 'dst_host_count',\n 'dst_host_srv_count',\n 'dst_host_same_srv_rate',\n 'dst_host_serror_rate',\n 'dst_host_srv_serror_rate',\n 'service_http',\n 'flag_S0',\n 'flag_SF']])\n", "_____no_output_____" ], [ "clf.predict(X_D)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0a914d920fc08f004d906daa6791ad2110166dd
6,091
ipynb
Jupyter Notebook
notebooks/dodavky.ipynb
vitkolos/ockovani-covid
518cdd2151a61f0bb3730188ab930c8f9711681f
[ "Apache-2.0" ]
62
2021-01-16T18:02:57.000Z
2022-01-12T21:10:34.000Z
notebooks/dodavky.ipynb
vitkolos/ockovani-covid
518cdd2151a61f0bb3730188ab930c8f9711681f
[ "Apache-2.0" ]
224
2021-01-16T19:11:01.000Z
2022-03-15T19:42:37.000Z
notebooks/dodavky.ipynb
vitkolos/ockovani-covid
518cdd2151a61f0bb3730188ab930c8f9711681f
[ "Apache-2.0" ]
13
2021-01-16T18:38:11.000Z
2021-07-02T20:09:47.000Z
28.462617
128
0.378263
[ [ [ "import sys \nsys.path.insert(0, '..')\n\nfrom config import Config\n\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\ndb = create_engine(Config.SQLALCHEMY_DATABASE_URI)", "_____no_output_____" ], [ "odhad = pd.read_sql_query(\n \"\"\"\n select vekova_skupina, min(datum) datum, min(pocet) pocet from (\n select min(vekova_skupina) vekova_skupina, sum(pocet) * 0.7 pocet\n from populace_kategorie\n join populace on vek >= min_vek and orp_kod = 'CZ0'\n where min_vek >= 18\n group by vekova_skupina\n ) t\n join (\n select datum, sum(pocet) over (order by datum rows between unbounded preceding and current row) as celkem_lidi \n from (\n select datum, sum(pocet / davky) as pocet \n from dodavky_vakcin d \n join vakciny v on (d.vyrobce = v.vyrobce)\n group by datum\n ) t1\n ) t2\n on pocet < celkem_lidi\n group by vekova_skupina\n order by vekova_skupina, datum\n \"\"\",\n db\n)\nodhad", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
d0a91581e44dea9d10cea8a7cca74451392f8fbc
6,657
ipynb
Jupyter Notebook
Assignments/Final-Project.ipynb
joseph-hellerstein/advanced-controls-lectures
dc43f6c3517616da3b0ea7c93192d911414ee202
[ "MIT" ]
null
null
null
Assignments/Final-Project.ipynb
joseph-hellerstein/advanced-controls-lectures
dc43f6c3517616da3b0ea7c93192d911414ee202
[ "MIT" ]
null
null
null
Assignments/Final-Project.ipynb
joseph-hellerstein/advanced-controls-lectures
dc43f6c3517616da3b0ea7c93192d911414ee202
[ "MIT" ]
null
null
null
30.259091
186
0.592159
[ [ [ "# FINAL PROJECT", "_____no_output_____" ], [ "In the final project, you will create a closed loop system for an SBML model.\nStart by selecting a model from the [BioModels Curated branch](https://www.ebi.ac.uk/biomodels/search?query=*%3A*+AND+curationstatus%3A%22Manually+curated%22&domain=biomodels).)\nYou don't have to restrict yourself to thoses models, but you'll find that the curated BioModels are fairly easy to reproduce.) You'll find a paper associated with each model.\n\n1. **Specify the requirements for the control system.** (10 pt) After reading the paper for the model, propose a chemical species for the control output\nand an operating point for it. You should also indicate the desired\noperating characteristics such as settling time and overshoot.\nJustify this in terms of the potential value of controlling\nthe chemical species and why the particular operating point makes sense.\n1. **Determine the control input you will use**. (10 pt) This will require evaluating the DC gain of\nof candidate control inputs on chemical species that are the control outputs.\n1. **Design the control system**. (15 pt) You will specify a controller and possibly a filter.\nYou will select the parameters of the elements based on the poles and DC gains implied by\nthe operating characteristics in (1).\n1. **Evaluate your design.** (25 pt) You will construct a simulation testbed in which the\nSBML model is regulated by your control architecture.\nYou should evaluate the effects of disturbances and noise.\n1. **Discuss trade-offs.** (10 pt) Discuss trade-offs in the selection of parameter values in your design.\nThis should consider the impact of parameter values on the closed loop transfer functions.\n\nYou will receive 5 extra points if you use LaTex for your mathematical analyses.\n\n**Students will do a 15 minute presentation of their project during the last week of class.**\nThe presentation is ungraded. The purpose is to provide early feedback on the project.", "_____no_output_____" ], [ "# Preliminaries", "_____no_output_____" ] ], [ [ "!pip install -q controlSBML\nimport control\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tellurium as te", "_____no_output_____" ] ], [ [ "# Helpful Hints", "_____no_output_____" ], [ "## LaTex Basics", "_____no_output_____" ], [ "1. All LaTex appears between dollar signs (``$``)\n1. Subscripts: $x_2$\n1. Superscript: $x^2$\n1. Summation: $\\sum_{n=0}^{\\infty} e^{nt}$\n1. Derivatives: $\\dot{x}(t)$\n1. Bold: ${\\bf A}$\n1. Fractions: $\\frac{a}{b}$", "_____no_output_____" ], [ "## Closed Loop Transfer Functions", "_____no_output_____" ], [ "<img src=\"https://github.com/joseph-hellerstein/advanced-controls-lectures/blob/main/Lecture_13_14-Closed-Loop-Systems/Closed-Loop-System.png?raw=true\"\n alt=\"Markdown Monster icon\" width=600pt\n style=\"float: left; margin-right: 10px;\" />", "_____no_output_____" ], [ "**Transfer Functions**\n\\begin{eqnarray}\nH_{RY}(s) & = & \\frac{Y(s)}{R(s)} & = & \\frac{C(s) G(s)}{1 + C(s) G(s) F(s)} \\\\\nH_{RE}(s) & = & \\frac{E(s)}{R(s)} & = & \\frac{1}{1 + C(s) G(s) F(s)} \\\\\nH_{NY}(s) & = & \\frac{Y(s)}{N(s)} & = & -\\frac{ F(s)}{1 + C(s) G(s) F(s)} \\\\\nH_{DY}(s) & = & \\frac{Y(s)}{D(s)} & = & \\frac{ C(s)}{1 + C(s) G(s) F(s)} \\\\\n\\end{eqnarray}", "_____no_output_____" ], [ "# 1. Specify Requirements", "_____no_output_____" ], [ "# 2. Determine the Control Input", "_____no_output_____" ], [ "# 3. Design the Control System", "_____no_output_____" ], [ "# 4. Evaluate the Design", "_____no_output_____" ], [ "# 5. Discuss Trade-offs", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0a91ada79b01c2a27b4d0bf7e31f89130808889
647,084
ipynb
Jupyter Notebook
_notebooks/2021-07-02-movielens-eda-modeling.ipynb
recohut/notebook
610670666a1c3d8ef430d42f712ff72ecdbd8f86
[ "Apache-2.0" ]
null
null
null
_notebooks/2021-07-02-movielens-eda-modeling.ipynb
recohut/notebook
610670666a1c3d8ef430d42f712ff72ecdbd8f86
[ "Apache-2.0" ]
1
2022-01-12T05:40:57.000Z
2022-01-12T05:40:57.000Z
_notebooks/2021-07-02-movielens-eda-modeling.ipynb
recohut/notebook
610670666a1c3d8ef430d42f712ff72ecdbd8f86
[ "Apache-2.0" ]
1
2021-08-13T19:00:26.000Z
2021-08-13T19:00:26.000Z
95.285525
91,966
0.737604
[ [ [ "# Movielens EDA and Modeling\n> EDA and modeling on movielens dataset\n\n- toc: true\n- badges: true\n- comments: true\n- categories: [EDA, Movie, Visualization]\n- image:", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "# download dataset\n!wget http://files.grouplens.org/datasets/movielens/ml-100k.zip && unzip ml-100k.zip", "_____no_output_____" ], [ "!wget http://files.grouplens.org/datasets/movielens/ml-latest.zip && unzip ml-latest.zip", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nfrom datetime import datetime\nfrom collections import OrderedDict\n\nimport gc\nimport sys\nfrom os.path import join\nfrom os.path import exists\nfrom functools import reduce\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom IPython.display import Markdown\n\nimport json\nimport requests\nfrom IPython.display import HTML", "_____no_output_____" ], [ "import sys\nsys.path.append(\"/content/drive/MyDrive\")\nimport mykeys\n\napi_key = mykeys.moviedb_key\nheaders = {'Accept': 'application/json'}\npayload = {'api_key': api_key} \ntry:\n response = requests.get(\n \"http://api.themoviedb.org/3/configuration\",\n params=payload,\n headers=headers,\n )\n response = json.loads(response.text)\n base_url = response['images']['base_url'] + 'w185'\nexcept:\n raise ValueError(\"Your API key might be invalid.\")", "_____no_output_____" ] ], [ [ "## Loading", "_____no_output_____" ] ], [ [ "# loading ratings data\nnames = ['user_id', 'item_id', 'rating', 'timestamp']\nratings_df = pd.read_csv('ml-100k/u.data', sep='\\t', names=names)\nprint('First 5:')\ndisplay(ratings_df.head())\nprint()\nprint('Last 5:')\ndisplay(ratings_df.tail())", "First 5:\n" ], [ "names = [\"user_id\", \"age\" , \"gender\",\"occupation\", \"zip_code\"]\nuser_df = pd.read_csv('ml-100k/u.user', sep='|', names=names)\nprint('First 5:')\ndisplay(user_df.head())\nprint()\nprint('Last 5:')\ndisplay(user_df.tail())", "First 5:\n" ], [ "# loading ratings data\nnames = [\"genre\", \"id\"]\ngenre_df = pd.read_csv('ml-100k/u.genre', sep='|', names=names)\nprint('First 5:')\ndisplay(genre_df.head())\nprint()\nprint('Last 5:')\ndisplay(genre_df.tail())", "First 5:\n" ], [ "# loading ratings data\nnames = [\"item_id\", \"movie_title\", \"release_date\", \"video_release_date\", \"IMDb_URL\"]\nitems_df = pd.read_csv('ml-100k/u.item', sep='|', encoding=\"iso-8859-1\", header=None)\nprint('First 5:')\ndisplay(items_df.head())\nprint()\nprint('Last 5:')\ndisplay(items_df.tail())\n\n# loading movies info\n# item_info = pd.read_csv('ml-100k/u.item', sep='|', encoding=\"iso-8859-1\", \n # header=None)\n# item_info.columns = ['title']\n# item_info.head()", "First 5:\n" ], [ "links_df = pd.read_csv('ml-latest/links.csv', dtype=str).set_index('movieId', drop=True)\nlinks_df.head()", "_____no_output_____" ] ], [ [ "Processed Data Loading", "_____no_output_____" ] ], [ [ "pratings = pd.read_csv(dpath+'ratings.csv', sep=',', encoding='latin-1')\nprint('First 5:')\ndisplay(pratings.head())\nprint()\nprint('Last 5:')\ndisplay(pratings.tail())", "First 5:\n" ], [ "pmovies = pd.read_csv(dpath+'items.csv', sep=',', encoding='latin-1')\nprint('First 5:')\ndisplay(pmovies.head())\nprint()\nprint('Last 5:')\ndisplay(pmovies.tail())", "First 5:\n" ] ], [ [ "## EDA", "_____no_output_____" ], [ "How are the ratings distributed?", "_____no_output_____" ] ], [ [ "norm_counts = (\n ratings_df['rating']\n .value_counts(normalize=True, sort=False)\n .multiply(100)\n .reset_index()\n .rename(columns={'rating': 'percent', 'index': 'rating'})\n)\nax = sns.barplot(x='rating', y='percent', data=norm_counts)\nax.set_title('Rating Frequencies')\nplt.show()", "_____no_output_____" ] ], [ [ "How many ratings were submitted per month?", "_____no_output_____" ] ], [ [ "def _process_ratings(ratings_df):\n ratings_df = ratings_df.copy()\n ratings_df['timestamp'] = ratings_df['timestamp'].apply(datetime.fromtimestamp)\n ratings_df['year'] = ratings_df['timestamp'].dt.year\n ratings_df['month'] = ratings_df['timestamp'].dt.month\n ratings_df = ratings_df.sort_values('timestamp')\n return ratings_df\n\n_ratings_df = _process_ratings(ratings_df)\nmonth_counts = _ratings_df[['year', 'month', 'rating']].groupby(['year', 'month']).count()\nmonth_counts = month_counts.rename(index=str, columns={'rating': '# of Ratings'})\nmonth_counts = month_counts.reset_index()\nmonth_counts['Date'] = month_counts[['year', 'month']].apply(\n lambda x: datetime(year=int(x[0]), month=int(x[1]), day=1), axis=1\n)\nmonth_counts = month_counts.set_index('Date', drop=True)\nmonth_counts['# of Ratings'].plot(style='o-')\nplt.ylabel('# of Ratings')\nplt.title('# of Ratings per Month')\nplt.ylim([0, 25000])\nplt.gca().grid(which='minor')\nplt.show()", "_____no_output_____" ] ], [ [ "How consistent are the average ratings over time?", "_____no_output_____" ] ], [ [ "month_counts = _ratings_df.groupby(['year', 'month'])['rating'].agg([np.mean, np.std])\nmonth_counts = month_counts.rename(index=str, columns={'mean': 'Rating'})\nmonth_counts = month_counts.reset_index()\nmonth_counts['Date'] = month_counts[['year', 'month']].apply(\n lambda x: datetime(year=int(x[0]), month=int(x[1]), day=1), axis=1\n)\nmonth_counts = month_counts.set_index('Date', drop=True)\nmonth_counts['Rating'].plot(style='o-')\nplt.fill_between(month_counts.index,\n month_counts['Rating'] - month_counts['std'],\n month_counts['Rating'] + month_counts['std'],\n alpha=0.3,\n )\nplt.ylim([0, 5])\nplt.ylabel('Rating')\nplt.gca().grid(which='minor')\nplt.title('Rating Consistency over Time')\nplt.show()", "_____no_output_____" ] ], [ [ "How quickly do the movie and user bases grow over time?\n\n*(assume that a user has joined on her first rating, and that she remains a user from then on.)*", "_____no_output_____" ] ], [ [ "_ratings_df['Date'] = _ratings_df[['year', 'month']].apply(\n lambda x: datetime(year=int(x[0]), month=int(x[1]), day=1), axis=1\n)\nn_users = []\nn_movies = []\ndates = np.unique(_ratings_df['Date'])\nfor date in dates:\n n_users.append(_ratings_df[_ratings_df['Date'] <= date]['user_id'].nunique())\n n_movies.append(_ratings_df[_ratings_df['Date'] <= date]['item_id'].nunique())\ndf_users = pd.DataFrame({'Date': dates, '# of Users': n_users}).set_index('Date')\ndf_movies = pd.DataFrame({'Date': dates, '# of Movies': n_movies}).set_index('Date')\nfig, ax = plt.subplots()\ndf_movies['# of Movies'].plot(style='o-', ax=ax)\ndf_users['# of Users'].plot(style='o-', ax=ax)\nplt.ylabel('Count')\nplt.ylim([0, 2000])\nax.grid(which='minor')\nplt.tight_layout()\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "How sparse is the user/movies matrix we'll be dealing with?", "_____no_output_____" ] ], [ [ "def get_rating_matrix(X):\n \"\"\"Function to generate a ratings matrix and mappings for\n the user and item ids to the row and column indices\n Parameters\n ----------\n X : pandas.DataFrame, shape=(n_ratings,>=3)\n First 3 columns must be in order of user, item, rating.\n Returns\n -------\n rating_matrix : 2d numpy array, shape=(n_users, n_items)\n user_map : pandas Series, shape=(n_users,)\n Mapping from the original user id to an integer in the range [0,n_users)\n item_map : pandas Series, shape=(n_items,)\n Mapping from the original item id to an integer in the range [0,n_items)\n \"\"\"\n user_col, item_col, rating_col = X.columns[:3]\n rating = X[rating_col]\n user_map = pd.Series(\n index=np.unique(X[user_col]),\n data=np.arange(X[user_col].nunique()),\n name='user_map',\n )\n item_map = pd.Series(\n index=np.unique(X[item_col]),\n data=np.arange(X[item_col].nunique()),\n name='columns_map',\n )\n user_inds = X[user_col].map(user_map)\n item_inds = X[item_col].map(item_map)\n rating_matrix = (\n pd.pivot_table(\n data=X,\n values=rating_col,\n index=user_inds,\n columns=item_inds,\n )\n .fillna(0)\n .values\n )\n return rating_matrix, user_map, item_map", "_____no_output_____" ], [ "rating_matrix, user_map, item_map = get_rating_matrix(ratings_df)\nwith plt.style.context('seaborn-white'):\n rating_matrix_binary = rating_matrix > 0\n plt.imshow(rating_matrix_binary)\n plt.xlabel('Movie')\n plt.ylabel('User')\n plt.show()", "_____no_output_____" ], [ "rating_matrix", "_____no_output_____" ], [ "Markdown(\n r\"The matrix density is $n_{{ratings}}/(n_{{users}} \\times n_{{movies}}) = {:0.3f}$\"\n .format(np.sum(rating_matrix_binary) / np.prod(rating_matrix.shape))\n)", "_____no_output_____" ], [ "user_counts = ratings_df['user_id'].value_counts(ascending=True)\nuser_counts.index = np.arange(len(user_counts)) / len(user_counts)\nplt.plot(user_counts, user_counts.index, '.', label='Users')\nmovie_counts = ratings_df['item_id'].value_counts(ascending=True)\nmovie_counts.index = np.arange(len(movie_counts)) / len(movie_counts)\nplt.plot(movie_counts, movie_counts.index, '.', label='Movies')\nplt.xlabel('Number of Ratings')\nplt.ylabel('ECDF')\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "## Preprocessing", "_____no_output_____" ] ], [ [ "display(ratings_df.head())", "_____no_output_____" ] ], [ [ "## Baseline Models", "_____no_output_____" ], [ "### Baseline - Simple Average Model\nThe first model we'll test is about the simplest one possible. We'll just average all the training set ratings and use that average for the prediction for all test set examples.", "_____no_output_____" ] ], [ [ "class SimpleAverageModel():\n \"\"\"A very simple model that just uses the average of the ratings in the\n training set as the prediction for the test set.\n\n Attributes\n ----------\n mean : float\n Average of the training set ratings\n \"\"\"\n\n def __init__(self):\n pass\n\n def fit(self, X):\n \"\"\"Given a ratings dataframe X, compute the mean rating\n \n Parameters\n ----------\n X : pandas dataframe, shape = (n_ratings, >=3)\n User, item, rating dataframe. Only the 3rd column is used.\n \n Returns\n -------\n self\n \"\"\"\n self.mean = X.iloc[:, 2].mean()\n return self\n\n def predict(self, X):\n return np.ones(len(X)) * self.mean", "_____no_output_____" ] ], [ [ "### Baseline - Average by ID Model\nWe can probably do a little better by using the user or item (movie) average. Here we'll set up a baseline model class that allows you to pass either a list of userIds or movieIds as X. The prediction for a given ID will just be the average of ratings from that ID, or the overall average if that ID wasn't seen in the training set.", "_____no_output_____" ] ], [ [ "class AverageByIdModel():\n \"\"\"Simple model that predicts based on average ratings for a given Id\n (movieId or userId) from training data\n \n Parameters\n ----------\n id_column : string\n Name of id column (i.e. 'itemId', 'userId') to average by in\n dataframe that will be fitted to\n\n Attributes\n ----------\n averages_by_id : pandas Series, shape = [n_ids]\n Pandas series of rating averages by id\n overall_average : float\n Average rating over all training samples\n \"\"\"\n def __init__(self, id_column):\n self.id_column = id_column\n\n def fit(self, X):\n \"\"\"Fit training data.\n\n Parameters\n ----------\n X : pandas dataframe, shape = (n_ratings, >=3)\n User, item, rating dataframe. Columns beyond 3 are ignored\n\n Returns\n -------\n self : object\n \"\"\"\n rating_column = X.columns[2]\n X = X[[self.id_column, rating_column]].copy()\n X.columns = ['id', 'rating']\n self.averages_by_id = (\n X\n .groupby('id')['rating']\n .mean()\n .rename('average_rating')\n )\n self.overall_average = X['rating'].mean()\n return self\n\n def predict(self, X):\n \"\"\"Return rating predictions\n\n Parameters\n ----------\n X : pandas dataframe, shape = (n_ratings, >=3)\n Array of n_ratings movieIds or userIds\n\n Returns\n -------\n y_pred : numpy array, shape = (n_ratings,)\n Array of n_samples rating predictions\n \"\"\"\n rating_column = X.columns[2]\n X = X[[self.id_column, rating_column]].copy()\n X.columns = ['id', 'rating']\n X = X.join(self.averages_by_id, on='id')\n X['average_rating'].fillna(self.overall_average, inplace=True)\n return X['average_rating'].values", "_____no_output_____" ] ], [ [ "### Baseline - Damped Baseline with User + Movie Data\n\nThis baseline model takes into account the average ratings of both the user and the movie, as well as a damping factor that brings the baseline prediction closer to the overall mean. The damping factor has been shown empirically to improve the perfomance.\n\nThis model follows equation 2.1 from a [collaborative filtering paper](http://files.grouplens.org/papers/FnT%20CF%20Recsys%20Survey.pdf) from [GroupLens](https://grouplens.org/), the same group that published the MovieLens data. This equation defines rhe baseline rating for user $u$ and item $i$ as \n\n$$b_{u,i} = \\mu + b_u + b_i$$\n\nwhere\n\n$$b_u = \\frac{1}{|I_u| + \\beta_u}\\sum_{i \\in I_u} (r_{u,i} - \\mu)$$\n\nand\n\n$$b_i = \\frac{1}{|U_i| + \\beta_i}\\sum_{u \\in U_i} (r_{u,i} - b_u - \\mu).$$\n\n(See equations 2.4 and 2.5). Here, $\\beta_u$ and $\\beta_i$ are damping factors, for which the paper reported 25 is a good number for this dataset. For now we'll just leave these values equal ($\\beta=\\beta_u=\\beta_i$). Here's a summary of the meanings of all the variables here:\n\n| Variable | Meaning |\n| --------------- | ----------------------------------------------------- |\n| $b_{u,i}$ | Baseline rating for user $u$ on item (movie) $i$ |\n| $\\mu$ | The mean of all ratings |\n| $b_u$ | The deviation from $\\mu$ associated with user $u$ |\n| $b_i$ | The deviation from $\\mu+b_u$ associated with user $i$ |\n| $I_u$ | The set of all items rated by user $u$ |\n| $\\mid I_u \\mid$ | The number of items rated by user $u$ |\n| $\\beta_u$ | Damping factor for the users ($=\\beta$) |\n| $r_{u,i}$ | Observed rating for user $u$ on item $i$ |\n| $U_i$ | The set of all users who rated item $i$ |\n| $\\mid U_i \\mid$ | The number of users who rated item $i$ |\n| $\\beta_i$ | Damping factor for the items ($=\\beta$) |", "_____no_output_____" ] ], [ [ "class DampedUserMovieBaselineModel():\n \"\"\"Baseline model that of the form mu + b_u + b_i,\n where mu is the overall average, b_u is a damped user\n average rating residual, and b_i is a damped item (movie)\n average rating residual. See eqn 2.1 of\n http://files.grouplens.org/papers/FnT%20CF%20Recsys%20Survey.pdf\n\n Parameters\n ----------\n damping_factor : float, default=0\n Factor to bring residuals closer to 0. Must be positive.\n\n Attributes\n ----------\n mu : float\n Average rating over all training samples\n b_u : pandas Series, shape = [n_users]\n User residuals\n b_i : pandas Series, shape = [n_movies]\n Movie residuals\n damping_factor : float, default=0\n Factor to bring residuals closer to 0. Must be >= 0.\n \"\"\"\n def __init__(self, damping_factor=0):\n self.damping_factor = damping_factor\n\n def fit(self, X):\n \"\"\"Fit training data.\n\n Parameters\n ----------\n X : DataFrame, shape = [n_samples, >=3]\n User, movie, rating dataFrame. Columns beyond 3 are ignored\n\n Returns\n -------\n self : object\n \"\"\"\n X = X.iloc[:, :3].copy()\n X.columns = ['user', 'item', 'rating']\n self.mu = np.mean(X['rating'])\n user_counts = X['user'].value_counts()\n movie_counts = X['item'].value_counts()\n b_u = (\n X[['user', 'rating']]\n .groupby('user')['rating']\n .sum()\n .subtract(user_counts * self.mu)\n .divide(user_counts + self.damping_factor)\n .rename('b_u')\n )\n X = X.join(b_u, on='user')\n X['item_residual'] = X['rating'] - X['b_u'] - self.mu\n b_i = (\n X[['item', 'item_residual']]\n .groupby('item')['item_residual']\n .sum()\n .divide(movie_counts + self.damping_factor)\n .rename('b_i')\n )\n self.b_u = b_u\n self.b_i = b_i\n return self\n\n def predict(self, X):\n \"\"\"Return rating predictions\n\n Parameters\n ----------\n X : DataFrame, shape = (n_ratings, 2)\n User, item dataframe\n\n Returns\n -------\n y_pred : numpy array, shape = (n_ratings,)\n Array of n_samples rating predictions\n \"\"\"\n X = X.iloc[:, :2].copy()\n X.columns = ['user', 'item']\n X = X.join(self.b_u, on='user').fillna(0)\n X = X.join(self.b_i, on='item').fillna(0)\n return (self.mu + X['b_u'] + X['b_i']).values", "_____no_output_____" ], [ "def get_xval_errs_and_res(df, model, n_splits=5, random_state=0, rating_col='rating'):\n kf = KFold(n_splits=n_splits, random_state=random_state, shuffle=True)\n errs, stds = [], []\n residuals = np.zeros(len(df))\n for train_inds, test_inds in kf.split(df):\n train_df, test_df = df.iloc[train_inds], df.iloc[test_inds]\n pred = model.fit(train_df).predict(test_df)\n residuals[test_inds] = pred - test_df[rating_col]\n mae = mean_absolute_error(pred, test_df[rating_col])\n errs.append(mae)\n return errs, residuals", "_____no_output_____" ], [ "errs_1, res_1 = get_xval_errs_and_res(ratings_df, SimpleAverageModel())\nerrs_2, res_2 = get_xval_errs_and_res(ratings_df, AverageByIdModel('item_id'))\nerrs_3, res_3 = get_xval_errs_and_res(ratings_df, AverageByIdModel('user_id'))\nerrs_4, res_4 = get_xval_errs_and_res(ratings_df, DampedUserMovieBaselineModel(0))\nerrs_5, res_5 = get_xval_errs_and_res(ratings_df, DampedUserMovieBaselineModel(10))\nerrs_6, res_6 = get_xval_errs_and_res(ratings_df, DampedUserMovieBaselineModel(25))\nerrs_7, res_7 = get_xval_errs_and_res(ratings_df, DampedUserMovieBaselineModel(50))\ndf_errs = pd.DataFrame(\n OrderedDict(\n (\n ('Average', errs_1),\n ('Item Average', errs_2),\n ('User Average', errs_3),\n ('Combined 0', errs_4),\n ('Combined 10', errs_5),\n ('Combined 25', errs_6),\n ('Combined 50', errs_7),\n )\n )\n)\ndisplay(df_errs)\ndf_errs = (\n pd.melt(df_errs, value_vars=df_errs.columns)\n .rename({'variable': 'Baseline Model', 'value': 'MAE'}, axis=1)\n)\ndf_res = pd.DataFrame(\n OrderedDict(\n (\n ('Average', res_1),\n ('Item Average', res_2),\n ('User Average', res_3),\n ('Combined 0', res_4),\n ('Combined 10', res_5),\n ('Combined 25', res_6),\n ('Combined 50', res_7),\n )\n )\n)\ndisplay(df_res.tail())\ndf_res = (\n pd.melt(df_res, value_vars=df_res.columns)\n .rename({'variable': 'Baseline Model', 'value': 'Residual'}, axis=1)\n)", "_____no_output_____" ], [ "fig, (ax0, ax1) = plt.subplots(2, 1, figsize=(12,8))\nsns.swarmplot(data=df_errs, x='Baseline Model', y='MAE', ax=ax0)\nsns.violinplot(data=df_res, x='Baseline Model', y='Residual', ax=ax1)\nax0.xaxis.set_visible(False)\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ], [ [ "The MAE plots above show that the combined model with a damping factor of 0 or 10 performs the best, followed by the item average, then the user average. It makes sense that taking into account deviations from the mean due to both user and item would perform the best: there is simply more data being taken into account for each baseline prediction. The same idea explains why the item average performs better than the user average: there are more items than users in this dataset, so averaging over items takes into account more data per baseline prediction than averaging over users. The residual plots underneath the MAE plot illustrate that taking into account more data pulls the density of the residuals closer to 0.\n\n**Selecting the baseline**: Both the Combined 0 and Combined 10 models performed equally, but we'll choose the Combined 10 model, because a higher damping factor is effectively stronger regularization, which will prevent overfitting better than a damping factor of 0.", "_____no_output_____" ], [ "## KNN Collaborative Filtering", "_____no_output_____" ] ], [ [ "# <!-- collapse=True -->\nclass KNNRecommender():\n \"\"\"User-based or Item-based collaborative filtering model that operates on\n dataframes with at least a user-like, item-like, and a rating-like column\n \n Parameters\n ----------\n mode : str, ['item | 'user'], default='item'\n Tells model whether to use item-based or user-based collaborative filtering\n k : int, default=20\n Number of most similar items or users to average for prediction\n basline_algo : object, optional\n Algorithm used to predict baseline scores for each rating. If not provided,\n the mean of all training ratings is used as the baseline. If provided,\n the object must have a fit(X) method and a predict(X) method\n similarity_func : function, default=cosine_similarity\n Function must take a numpy array M of shape (m,n) and return a numpy array\n of shape (m,m) where each element i,j represents the similarity between row\n i and row j of M.\n loop_predict : boolean, default=True\n If True, the model will loop over all user-item pairs in test set and compute\n prediction individually. If False, the model will compute all ratings\n simultaneously. With sparse matrices, looping is typically faster.\n \n Attributes\n ----------\n train_mean : float\n Mean of the training data ratings. Used if baseline_algo is None.\n rating_matrix : 2d numpy array, shape=(n_users, n_items)\n Rating matrix minus baselines\n user_map : pandas Series, shape=(n_users,)\n Mapping from the original user id to an integer in the range [0,n_users)\n item_map : pandas Series, shape=(n_items,)\n Mapping from the original item id to an integer in the range [0,n_items)\n knn_indices : 2d numpy array, shape=([n_users|n_items], k)\n Element i,j represents the index of the jth closet [user|item] to i\n knn_similarities : 2d numpy array, shape=([n_users|n_items], k)\n Element i,j represents the similarity between the jth closest [user|item] to i\n \"\"\"\n def __init__(self, mode='item', k=20, baseline_algo=None,\n similarity_func=cosine_similarity, loop_predict=True):\n if not mode in ['user', 'item']:\n raise ValueError(\"'mode' must be either 'user' or 'item', not '{}'!\".format(mode))\n\n self.mode = mode\n self.k = k\n self.baseline_algo = baseline_algo\n self.similarity_func = similarity_func\n self.loop_predict = loop_predict\n \n self.train_mean = None\n self.rating_matrix = None\n self.user_map = None\n self.item_map = None\n self.knn_indices = None\n self.knn_similarities = None\n \n def _get_rating_matrix(self, X):\n \"\"\"Private function to generate a ratings matrx and mappings for\n the user and item ids to the row and column indices\n \n Parameters\n ----------\n X : pandas.DataFrame, shape=(n_ratings,>=3)\n First 3 columns must be in order of user, item, rating.\n \n Returns\n -------\n rating_matrix : 2d numpy array, shape=(n_users, n_items)\n user_map : pandas Series, shape=(n_users,)\n Mapping from the original user id to an integer in the range [0,n_users)\n item_map : pandas Series, shape=(n_items,)\n Mapping from the original item id to an integer in the range [0,n_items)\n \"\"\"\n user_col, item_col, rating_col = X.columns[:3]\n rating = X[rating_col]\n user_map = pd.Series(\n index=np.unique(X[user_col]),\n data=np.arange(X[user_col].nunique()),\n name='user_map',\n )\n item_map = pd.Series(\n index=np.unique(X[item_col]),\n data=np.arange(X[item_col].nunique()),\n name='columns_map',\n )\n user_inds = X[user_col].map(user_map)\n item_inds = X[item_col].map(item_map)\n rating_matrix = (\n pd.pivot_table(\n data=X,\n values=rating_col,\n index=user_inds,\n columns=item_inds,\n )\n .fillna(0)\n .values\n )\n return rating_matrix, user_map, item_map\n\n def _get_knn_indices_and_similarities(self, rating_matrix):\n \"\"\"Private function to find indices and similarities of k nearest\n neighbors for each user or item\n \n Parameters\n ----------\n rating_matrix : 2d numpy array, shape=(n_users, n_items)\n Matrix of ratings minus baselines\n \n Returns\n -------\n knn_indices : 2d numpy array, shape=([n_users|n_items], k)\n Element i,j represents the index of the jth closet [user|item] to i\n knn_similarities : 2d numpy array, shape=([n_users|n_items], k)\n Element i,j represents the similarity between the jth closest [user|item] to i\n \"\"\"\n if self.mode == 'item':\n n_users_or_items = rating_matrix.shape[1]\n else:\n n_users_or_items = rating_matrix.shape[0]\n if self.k > n_users_or_items:\n new_k = n_users_or_items - 1\n print(\n \"Warning: k = {} > # {}s = {}! Setting k to {}\"\n .format(self.k, n_users_or_items, self.mode, new_k)\n )\n self.k = new_k\n if self.mode == 'item':\n similarity_matrix = self.similarity_func(rating_matrix.T)\n else:\n similarity_matrix = self.similarity_func(rating_matrix)\n np.fill_diagonal(similarity_matrix, -1)\n knn_indices = np.argsort(similarity_matrix, axis=1)[:, ::-1][:, :self.k]\n # https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/neighbors/base.py#L373\n sample_range = np.arange(len(knn_indices))[:, None]\n knn_similarities = similarity_matrix[sample_range, knn_indices]\n return knn_indices, knn_similarities\n \n def fit(self, X):\n \"\"\"Fit model to training data X. Sets the knn_indices, knn_similarities, \n rating_matrix, user_map, and item map variables.\n \n Parameters\n ----------\n X : pandas DataFrame, shape=(n_ratings, >=3)\n First 3 columns must correspond to user, item, and rating in that order\n \n Returns\n -------\n self\n This allows chaining like `KNNRecommender().fit(X_train).predict(X_test)`\n \"\"\"\n if not isinstance(X, pd.DataFrame):\n raise ValueError(\"X must be a DataFrame\")\n X = X.copy()\n user_col, item_col, rating_col = X.columns[:3]\n if self.baseline_algo is None:\n self.train_mean = X[rating_col].mean()\n X['rating_baseline'] = self.train_mean\n else:\n self.baseline_algo.fit(X.iloc[:, :3])\n X['rating_baseline'] = self.baseline_algo.predict(X[[user_col, item_col]])\n X['rating_diff'] = X[rating_col] - X['rating_baseline']\n nodiff_rating_matrix, _, _ = self._get_rating_matrix(X[[user_col, item_col, rating_col]])\n self.knn_indices, self.knn_similarities = self._get_knn_indices_and_similarities(\n nodiff_rating_matrix\n )\n gc.collect()\n self.rating_matrix, self.user_map, self.item_map = self._get_rating_matrix(\n X[[user_col, item_col, 'rating_diff']]\n )\n return self\n\n def _predict_1_ui_pair(self, user, item):\n \"\"\"Predict rating (minus baseline) for 1 user-item pair. Must add\n baseline to get the rating in the original rating scale.\n \n Parameters\n ----------\n user : int\n Must be in range [0, n_users)\n item : int\n Must be in range [0, n_items)\n \n Returns\n -------\n rating_pred : float\n Predicted ratings\n \"\"\"\n if self.mode == 'item':\n inds_i = self.knn_indices[item, :]\n sims_i = self.knn_similarities[item, :]\n # https://stackoverflow.com/a/35696047/2680824\n numerator = np.sum(self.rating_matrix[user, inds_i] * sims_i)\n denominator = np.sum(np.abs(sims_i))\n with np.errstate(divide='ignore', invalid='ignore'):\n rating_pred = numerator / denominator\n else:\n inds_u = self.knn_indices[user, :]\n sims_u = self.knn_similarities[user, :]\n # https://stackoverflow.com/a/35696047/2680824\n numerator = np.sum(self.rating_matrix[inds_u, item] * sims_u)\n denominator = np.sum(np.abs(sims_u))\n with np.errstate(divide='ignore', invalid='ignore'):\n rating_pred = numerator / denominator\n return rating_pred\n\n def predict(self, X):\n \"\"\"Predict ratings for each user-item pair in X\n \n Parameters\n ----------\n X : pandas DataFrame, shape=(n_ratings, >=2)\n First 2 columns of X must correspond to user and item.\n \n Returns\n -------\n pandas Series, shape=(n_ratings,)\n Ratings for each user-item pair in X. No restriction on the data type\n for the user and item ids, other than they must match the training indices.\n \"\"\"\n if not isinstance(X, pd.DataFrame):\n raise ValueError(\"X must be a DataFrame\")\n X = X.copy()\n user_col, item_col = X.columns[:2]\n if self.baseline_algo is None:\n X['rating_baseline'] = self.train_mean\n else:\n X['rating_baseline'] = self.baseline_algo.predict(X)\n X['rating'] = 0\n known_user_and_item_mask = (\n X[user_col].isin(self.user_map.index) & X[item_col].isin(self.item_map.index)\n )\n X_known = X[known_user_and_item_mask]\n user_inds = X_known[user_col].map(self.user_map)\n item_inds = X_known[item_col].map(self.item_map)\n if self.loop_predict:\n rating_pred = np.array([\n self._predict_1_ui_pair(u_ind, i_ind)\n for u_ind, i_ind in zip(user_inds, item_inds)\n ])\n else:\n stacked_ratings = self.rating_matrix[\n self.knn_indices[:, :, None],\n np.arange(self.rating_matrix.shape[1])[None, None, :]\n ]\n numerator_matrix = np.sum(\n stacked_ratings * self.knn_similarities[:, :, None],\n axis=1\n )\n denominator_matrix = np.sum(\n (stacked_ratings != 0) * self.knn_similarities[:, :, None],\n axis=1\n )\n # https://stackoverflow.com/a/35696047/2680824\n with np.errstate(divide='ignore', invalid='ignore'):\n rating_pred_matrix = numerator_matrix / denominator_matrix\n rating_pred = rating_pred_matrix[user_inds, item_inds]\n rating_pred[np.isnan(rating_pred)] = 0\n X.loc[known_user_and_item_mask, 'rating'] = rating_pred\n return X['rating'] + X['rating_baseline']", "_____no_output_____" ] ], [ [ "Determine Optimal k Values", "_____no_output_____" ] ], [ [ "def cart_prod(df_1, df_2):\n df_1['_dummy_'], df_2['_dummy_'] = 1, 1\n return pd.merge(df_1, df_2, on='_dummy_').drop('_dummy_', axis=1)\n\nn_splits = 5\n\nk_list = [1, 2, 5, 10, 20, 50, 100, 200]\nmode_list = ['user', 'item']\ni_fold_list = np.arange(n_splits)\ndf_1 = pd.DataFrame({'k': k_list})\ndf_2 = pd.DataFrame({'mode': mode_list})\ndf_3 = pd.DataFrame({'i_fold': i_fold_list})\nresults_df = reduce(cart_prod, [df_1, df_2, df_3])\nresults_df.head(10)", "_____no_output_____" ], [ "kf = KFold(n_splits=n_splits, random_state=0, shuffle=True)\n\nfor (k, mode), group in results_df.groupby(['k', 'mode']):\n for (index, row), (train_inds, test_inds) in zip(group.iterrows(), kf.split(ratings_df)):\n print(\"k={}, mode={}: i_fold= \".format(row['k'], row['mode']), end='')\n print(\"{}, \".format(row['i_fold']), end='')\n train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds]\n baseline_algo = DampedUserMovieBaselineModel(damping_factor=10)\n t1 = datetime.now()\n rec = KNNRecommender(mode=row['mode'], k=row['k'], baseline_algo=baseline_algo)\n rec.fit(train_df)\n preds = rec.predict(test_df[['user_id', 'item_id']])\n mae = mean_absolute_error(preds, test_df['rating'])\n results_df.loc[index, 'MAE'] = mae\n dt = (datetime.now() - t1).total_seconds()\n print(\"{:5.3f} dt={:.2f} seconds\".format(mae, dt))\n results_df.loc[index, 'time'] = dt", "k=1, mode=item: i_fold= 0, 0.813 dt=1.12 seconds\nk=1, mode=item: i_fold= 1, 0.810 dt=1.07 seconds\nk=1, mode=item: i_fold= 2, 0.803 dt=1.04 seconds\nk=1, mode=item: i_fold= 3, 0.810 dt=1.08 seconds\nk=1, mode=item: i_fold= 4, 0.807 dt=1.08 seconds\nk=1, mode=user: i_fold= 0, 0.828 dt=0.92 seconds\nk=1, mode=user: i_fold= 1, 0.829 dt=0.94 seconds\nk=1, mode=user: i_fold= 2, 0.829 dt=0.93 seconds\nk=1, mode=user: i_fold= 3, 0.833 dt=0.94 seconds\nk=1, mode=user: i_fold= 4, 0.833 dt=0.94 seconds\nk=2, mode=item: i_fold= 0, 0.765 dt=1.06 seconds\nk=2, mode=item: i_fold= 1, 0.764 dt=1.08 seconds\nk=2, mode=item: i_fold= 2, 0.757 dt=1.06 seconds\nk=2, mode=item: i_fold= 3, 0.760 dt=1.09 seconds\nk=2, mode=item: i_fold= 4, 0.765 dt=1.07 seconds\nk=2, mode=user: i_fold= 0, 0.785 dt=0.93 seconds\nk=2, mode=user: i_fold= 1, 0.785 dt=0.93 seconds\nk=2, mode=user: i_fold= 2, 0.781 dt=0.98 seconds\nk=2, mode=user: i_fold= 3, 0.786 dt=0.94 seconds\nk=2, mode=user: i_fold= 4, 0.799 dt=0.96 seconds\nk=5, mode=item: i_fold= 0, 0.734 dt=1.11 seconds\nk=5, mode=item: i_fold= 1, 0.731 dt=1.08 seconds\nk=5, mode=item: i_fold= 2, 0.731 dt=1.04 seconds\nk=5, mode=item: i_fold= 3, 0.730 dt=1.07 seconds\nk=5, mode=item: i_fold= 4, 0.739 dt=1.06 seconds\nk=5, mode=user: i_fold= 0, 0.755 dt=0.93 seconds\nk=5, mode=user: i_fold= 1, 0.754 dt=0.99 seconds\nk=5, mode=user: i_fold= 2, 0.750 dt=0.96 seconds\nk=5, mode=user: i_fold= 3, 0.755 dt=0.93 seconds\nk=5, mode=user: i_fold= 4, 0.766 dt=0.94 seconds\nk=10, mode=item: i_fold= 0, 0.728 dt=1.07 seconds\nk=10, mode=item: i_fold= 1, 0.724 dt=1.08 seconds\nk=10, mode=item: i_fold= 2, 0.725 dt=1.05 seconds\nk=10, mode=item: i_fold= 3, 0.724 dt=1.06 seconds\nk=10, mode=item: i_fold= 4, 0.734 dt=1.07 seconds\nk=10, mode=user: i_fold= 0, 0.746 dt=0.94 seconds\nk=10, mode=user: i_fold= 1, 0.744 dt=0.94 seconds\nk=10, mode=user: i_fold= 2, 0.741 dt=0.94 seconds\nk=10, mode=user: i_fold= 3, 0.744 dt=0.95 seconds\nk=10, mode=user: i_fold= 4, 0.756 dt=0.94 seconds\nk=20, mode=item: i_fold= 0, 0.730 dt=1.07 seconds\nk=20, mode=item: i_fold= 1, 0.724 dt=1.09 seconds\nk=20, mode=item: i_fold= 2, 0.725 dt=1.06 seconds\nk=20, mode=item: i_fold= 3, 0.725 dt=1.08 seconds\nk=20, mode=item: i_fold= 4, 0.736 dt=1.08 seconds\nk=20, mode=user: i_fold= 0, 0.744 dt=0.93 seconds\nk=20, mode=user: i_fold= 1, 0.742 dt=0.99 seconds\nk=20, mode=user: i_fold= 2, 0.738 dt=0.93 seconds\nk=20, mode=user: i_fold= 3, 0.741 dt=0.97 seconds\nk=20, mode=user: i_fold= 4, 0.752 dt=0.99 seconds\nk=50, mode=item: i_fold= 0, 0.734 dt=1.10 seconds\nk=50, mode=item: i_fold= 1, 0.730 dt=1.09 seconds\nk=50, mode=item: i_fold= 2, 0.729 dt=1.08 seconds\nk=50, mode=item: i_fold= 3, 0.730 dt=1.11 seconds\nk=50, mode=item: i_fold= 4, 0.741 dt=1.10 seconds\nk=50, mode=user: i_fold= 0, 0.744 dt=0.95 seconds\nk=50, mode=user: i_fold= 1, 0.742 dt=0.98 seconds\nk=50, mode=user: i_fold= 2, 0.739 dt=0.96 seconds\nk=50, mode=user: i_fold= 3, 0.740 dt=0.97 seconds\nk=50, mode=user: i_fold= 4, 0.753 dt=0.99 seconds\nk=100, mode=item: i_fold= 0, 0.739 dt=1.12 seconds\nk=100, mode=item: i_fold= 1, 0.736 dt=1.16 seconds\nk=100, mode=item: i_fold= 2, 0.734 dt=1.16 seconds\nk=100, mode=item: i_fold= 3, 0.735 dt=1.15 seconds\nk=100, mode=item: i_fold= 4, 0.746 dt=1.13 seconds\nk=100, mode=user: i_fold= 0, 0.747 dt=1.01 seconds\nk=100, mode=user: i_fold= 1, 0.745 dt=0.98 seconds\nk=100, mode=user: i_fold= 2, 0.742 dt=0.99 seconds\nk=100, mode=user: i_fold= 3, 0.743 dt=0.99 seconds\nk=100, mode=user: i_fold= 4, 0.755 dt=0.99 seconds\nk=200, mode=item: i_fold= 0, 0.745 dt=1.18 seconds\nk=200, mode=item: i_fold= 1, 0.742 dt=1.19 seconds\nk=200, mode=item: i_fold= 2, 0.739 dt=1.17 seconds\nk=200, mode=item: i_fold= 3, 0.740 dt=1.17 seconds\nk=200, mode=item: i_fold= 4, 0.752 dt=1.19 seconds\nk=200, mode=user: i_fold= 0, 0.751 dt=1.01 seconds\nk=200, mode=user: i_fold= 1, 0.748 dt=1.04 seconds\nk=200, mode=user: i_fold= 2, 0.746 dt=0.99 seconds\nk=200, mode=user: i_fold= 3, 0.746 dt=1.03 seconds\nk=200, mode=user: i_fold= 4, 0.759 dt=1.00 seconds\n" ], [ "baseline_df = pd.DataFrame({'i_fold': i_fold_list})\n\nfor (index, row), (train_inds, test_inds) in zip(baseline_df.iterrows(), kf.split(ratings_df)):\n print(\"i_fold={}: MAE=\".format(row['i_fold']), end='')\n train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds]\n baseline_algo = DampedUserMovieBaselineModel(damping_factor=10)\n baseline_algo.fit(train_df)\n preds = baseline_algo.predict(test_df[['user_id', 'item_id']])\n mae = mean_absolute_error(preds, test_df['rating'])\n baseline_df.loc[index, 'MAE'] = mae\n print(\"{:5.3f}\".format(mae))", "i_fold=0: MAE=0.758\ni_fold=1: MAE=0.756\ni_fold=2: MAE=0.753\ni_fold=3: MAE=0.754\ni_fold=4: MAE=0.766\n" ], [ "base_avg = baseline_df['MAE'].mean()\nbase_std = baseline_df['MAE'].std()\nsns.pointplot(data=results_df, x='k', hue='mode', y='MAE')\nnk = results_df['k'].nunique()\nplt.plot([-1, nk], [base_avg, base_avg], label='baseline', color='C2')\nplt.fill_between([-1, nk], [base_avg - base_std]*2, [base_avg+base_std]*2, color='C2', alpha=0.2)\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "## ALS & SGD", "_____no_output_____" ] ], [ [ "class ALSRecommender():\n \"\"\"Recommender based on Alternating Least Squares algorithm.\n \n Parameters\n ----------\n k : int, default=5\n Number of latent features\n lmbda : float, default=0.1\n Regularization parameter\n max_epochs : int, default=15\n Max number of iterations to run\n baseline_algo : object\n Object with fit(X) and \n \"\"\"\n def __init__(self, k=5, lmbda=0.1, max_epochs=15, baseline_algo=None, error_metric='mae',\n verbose=True):\n # Force integer in case it comes in as float\n self.k = int(np.round(k))\n self.lmbda = lmbda\n self.max_epochs = max_epochs\n self.baseline_algo = baseline_algo\n self.error_metric = error_metric\n self.verbose = verbose\n\n self.U = None\n self.I = None\n self.initialized = False\n\n def _calc_train_error(self, U, I, R, R_selector=None, error_metric='mae'):\n if R_selector is None:\n R_selector = (R > 0)\n R_hat = np.dot(U.T, I)\n if error_metric == 'mae':\n error = np.sum(R_selector * np.abs(R_hat - R)) / np.sum(R_selector)\n else:\n raise ValueError(\"{} is an unsupported error metric\".format(metric))\n return error\n\n def _fit_init(self, X):\n if not isinstance(X, pd.DataFrame):\n raise ValueError(\"X must be a DataFrame\")\n X = X.copy()\n user_col, item_col, rating_col = X.columns[:3]\n if self.baseline_algo is None:\n self.train_mean = X[rating_col].mean()\n else:\n self.baseline_algo.fit(X)\n self.R, self.user_map, self.item_map = get_rating_matrix(X)\n n_users, n_items = self.R.shape\n self.U = 3 * np.random.rand(self.k, n_users)\n self.I = 3 * np.random.rand(self.k, n_items)\n self.I[0, :] = self.R[self.R != 0].mean(axis=0) # Avg. rating for each movie\n self.E = np.eye(self.k) # (k x k)-dimensional idendity matrix\n self.epoch = 0\n self.train_errors = []\n self.initialized = True\n\n def fit(self, X, n_epochs=None):\n \"\"\"Fit model to training data X. If at least one iteration has already been run,\n then the model will continue from its most recent state.\n Parameters\n ----------\n X : pandas DataFrame, shape=(n_ratings, >=3)\n First 3 columns must correspond to user, item, and rating in that order\n n_epochs : int, optional\n Number of iterations to run. If not provided, will run for self.max_epochs\n Returns\n -------\n self\n This allows chaining like `ALSRecommender().fit(X_train).predict(X_test)`\n \"\"\"\n # Allow continuation from previous state if n_epochs is given. Otherwise start from scratch.\n if n_epochs is None:\n self.initialized = False\n if not self.initialized:\n self._fit_init(X)\n\n epoch_0 = self.epoch\n if n_epochs is None:\n n_epochs = self.max_epochs - epoch_0\n\n n_users, n_items = self.R.shape\n\n # Run n_epochs iterations\n for i_epoch in range(n_epochs):\n if self.epoch >= self.max_epochs:\n print(\"max_epochs = {}\".format(self.max_epochs))\n break\n # Fix I and estimate U\n for i, Ri in enumerate(self.R):\n nui = np.count_nonzero(Ri) # Number of items user i has rated\n if (nui == 0): nui = 1 # Be aware of zero counts!\n # Get array of nonzero indices in row Ii\n Ri_nonzero_selector = np.nonzero(Ri)[0]\n # Select subset of I associated with movies reviewed by user i\n I_Ri = self.I[:, Ri_nonzero_selector]\n # Select subset of row R_i associated with movies reviewed by user i\n Ri_nonzero = self.R[i, Ri_nonzero_selector]\n Ai = np.dot(I_Ri, I_Ri.T) + self.lmbda * nui * self.E\n Vi = np.dot(I_Ri, Ri_nonzero.T)\n self.U[:, i] = np.linalg.solve(Ai, Vi)\n # Fix U and estimate I\n for j, Rj in enumerate(self.R.T):\n nmj = np.count_nonzero(Rj) # Number of users that rated item j\n if (nmj == 0): nmj = 1 # Be aware of zero counts!\n # Get array of nonzero indices in row Ij\n Rj_nonzero_selector = np.nonzero(Rj)[0]\n # Select subset of P associated with users who reviewed movie j\n U_Rj = self.U[:, Rj_nonzero_selector]\n # Select subset of column R_j associated with users who reviewed movie j\n Rj_nonzero = self.R[Rj_nonzero_selector, j]\n Aj = np.dot(U_Rj, U_Rj.T) + self.lmbda * nmj * self.E\n Vj = np.dot(U_Rj, Rj_nonzero)\n self.I[:, j] = np.linalg.solve(Aj, Vj)\n error = self._calc_train_error(self.U, self.I, self.R)\n self.train_errors.append(error)\n if self.verbose:\n print(\"[Epoch {}/{}] train error: {}\".format(self.epoch, self.max_epochs, error))\n self.epoch += 1\n return self\n\n def predict(self, X):\n \"\"\"Generate predictions for user/item pairs\n \n Parameters\n ----------\n X : pandas dataframe, shape = (n_pairs, 2)\n User, item dataframe\n \n Returns\n -------\n rating_pred : 1d numpy array, shape = (n_pairs,)\n Array of rating predictions for each user/item pair\n \"\"\"\n if not isinstance(X, pd.DataFrame):\n raise ValueError(\"X must be a DataFrame\")\n X = X.copy()\n user_col, item_col = X.columns[:2]\n if self.baseline_algo is None:\n X['rating_baseline'] = self.train_mean\n else:\n X['rating_baseline'] = self.baseline_algo.predict(X)\n X['rating'] = 0\n known_user_and_item_mask = (\n X[user_col].isin(self.user_map.index) & X[item_col].isin(self.item_map.index)\n )\n X_known, X_unknown = X[known_user_and_item_mask], X[~known_user_and_item_mask]\n user_inds = X_known[user_col].map(self.user_map)\n item_inds = X_known[item_col].map(self.item_map)\n rating_pred = np.array([\n np.sum(self.U[:, u_ind] * self.I[:, i_ind])\n for u_ind, i_ind in zip(user_inds, item_inds)\n ])\n X.loc[known_user_and_item_mask, 'rating'] = rating_pred\n X.loc[~known_user_and_item_mask, 'rating'] = self.baseline_algo.predict(X_unknown)\n min_rating = np.min(self.R[np.nonzero(self.R)])\n max_rating = np.max(self.R)\n X.loc[X['rating'] < min_rating, 'rating'] = min_rating\n X.loc[X['rating'] > max_rating, 'rating'] = max_rating\n return X['rating'].values\n\n\nclass SGDRecommender():\n \"\"\"Stochastic Gradient Descent recommender.\n \n Parameters\n ----------\n k : int, default=5\n Number of latent features\n learning_rate : float, default=0.1\n Speed at which to descend down gradient\n max_epochs : int, default=15\n Max number of iterations to run\n error_metric : string, default='mae'\n Error metric to use\n user_reg : float, default=0.0\n Regularization parameter for the latent feature weights in U, >=0\n item_reg : float, default=0.0\n Regularization parameter for the latent feature weights in I, >=0\n user_bias_reg : float, default=0.0\n Regularization parameter for the b_u terms, >=0\n item_bias_reg : float, default=0.0\n Regularization parameter for the b_i terms, >=0\n damping_factor : float, default=25\n Damping factor to be used in the baseline algorithm\n minibatch_size : int, default=1\n Number of user/item pairs to evaluate at a time during training\n verbose : boolean, default=True\n If True, print progress.\n \"\"\"\n def __init__(self, k=5, learning_rate=0.1, max_epochs=15, error_metric='mae',\n user_reg=0.0, item_reg=0.0, user_bias_reg=0.0, item_bias_reg=0.0,\n damping_factor=25, minibatch_size=1, verbose=True):\n self.k = k\n self.learning_rate = learning_rate\n self.max_epochs = max_epochs\n self.error_metric = error_metric\n self.user_reg = user_reg\n self.item_reg = item_reg\n self.user_bias_reg = user_bias_reg\n self.item_bias_reg = item_bias_reg\n self.damping_factor = damping_factor\n self.minibatch_size = minibatch_size\n self.verbose = verbose\n\n self.U = None\n self.I = None\n self.initialized = False\n\n def _calc_train_error(self, U, I, mu, b_u, b_i, R, R_selector=None):\n if R_selector is None:\n R_selector = (R > 0)\n R_hat = np.dot(U, I.T) + mu + b_u[:, None] + b_i[None, :]\n if self.error_metric == 'mae':\n error = np.sum(R_selector * np.abs(R_hat - R)) / np.sum(R_selector)\n else:\n raise ValueError(\"{} is an unsupported error metric\".format(metric))\n return error\n\n def _fit_init(self, X):\n if not isinstance(X, pd.DataFrame):\n raise ValueError(\"X must be a DataFrame\")\n user_col, item_col, rating_col = X.columns[:3]\n self.baseline_algo = DampedUserMovieBaselineModel(damping_factor=self.damping_factor)\n self.baseline_algo.fit(X)\n self.mu = X[rating_col].mean()\n self.b_u, self.b_i = self.baseline_algo.b_u.values, self.baseline_algo.b_i.values\n self.R, self.user_map, self.item_map = get_rating_matrix(X)\n n_users, n_items = self.R.shape\n self.U = np.random.normal(scale=1.0/self.k, size=(n_users, self.k))\n self.I = np.random.normal(scale=1.0/self.k, size=(n_items, self.k))\n self.epoch = 0\n self.train_errors = []\n self.initialized = True\n\n def fit(self, X, n_epochs=None):\n \"\"\"Fit model to training data X. If at least one iteration has already been run,\n then the model will continue from its most recent state.\n Parameters\n ----------\n X : pandas DataFrame, shape=(n_ratings, >=3)\n First 3 columns must correspond to user, item, and rating in that order\n n_epochs : int, optional\n Number of iterations to run. If not provided, will run for self.max_epochs\n Returns\n -------\n self\n This allows chaining like `SGDRecommender().fit(X_train).predict(X_test)`\n \"\"\"\n X = X.copy()\n # Allow continuation from previous state if n_epochs is given. Otherwise start from scratch.\n if n_epochs is None:\n self.initialized = False\n if not self.initialized:\n self._fit_init(X)\n X.iloc[:, 0] = X.iloc[:, 0].map(self.user_map)\n X.iloc[:, 1] = X.iloc[:, 1].map(self.item_map)\n\n epoch_0 = self.epoch\n if n_epochs is None:\n n_epochs = self.max_epochs - epoch_0\n\n n_users, n_items = self.R.shape\n\n # Repeat until convergence\n for i_epoch in range(n_epochs):\n if self.epoch >= self.max_epochs:\n print(\"max_epochs = {}\".format(self.max_epochs))\n break\n # Shuffle X\n X = X.sample(frac=1)\n if self.minibatch_size == 1:\n for row in X.itertuples():\n index, user, item, rating = row[:4]\n pred = self.predict_1_train(user, item)\n err = pred - self.R[user, item]\n self.b_u[user] -= self.learning_rate * (err + self.user_bias_reg * self.b_u[user])\n self.b_i[item] -= self.learning_rate * (err + self.item_bias_reg * self.b_i[item])\n self.U[user, :] -= self.learning_rate * (\n err * self.I[item, :] + self.user_reg * self.U[user, :]\n )\n self.I[item, :] -= self.learning_rate * (\n err * self.U[user, :] + self.item_reg * self.I[item, :]\n )\n else:\n raise ValueError(\"Minibatch size greater than 1 not supported yet.\")\n error = self._calc_train_error(self.U, self.I, self.mu, self.b_u, self.b_i, self.R)\n self.train_errors.append(error)\n if self.verbose:\n print(\"[Epoch {}/{}] train error: {}\".format(self.epoch, self.max_epochs, error))\n self.epoch += 1\n return self\n\n def predict_1_train(self, user, item):\n pred = self.mu + self.b_u[user] + self.b_i[item]\n pred += np.dot(self.U[user, :], self.I[item, :])\n return pred\n\n def predict(self, X):\n \"\"\"Generate predictions for user/item pairs\n \n Parameters\n ----------\n X : pandas dataframe, shape = (n_pairs, 2)\n User, item dataframe\n \n Returns\n -------\n rating_pred : 1d numpy array, shape = (n_pairs,)\n Array of rating predictions for each user/item pair\n \"\"\"\n if not isinstance(X, pd.DataFrame):\n raise ValueError(\"X must be a DataFrame\")\n X = X.copy()\n user_col, item_col = X.columns[:2]\n known_user_and_item_mask = (\n X[user_col].isin(self.user_map.index) & X[item_col].isin(self.item_map.index)\n )\n X_known, X_unknown = X[known_user_and_item_mask], X[~known_user_and_item_mask]\n user_inds = X_known[user_col].map(self.user_map)\n item_inds = X_known[item_col].map(self.item_map)\n rating_pred = np.array([\n self.predict_1_train(u_ind, i_ind)\n for u_ind, i_ind in zip(user_inds, item_inds)\n ])\n X.loc[known_user_and_item_mask, 'rating'] = rating_pred\n X.loc[~known_user_and_item_mask, 'rating'] = self.baseline_algo.predict(X_unknown)\n return X['rating'].values", "_____no_output_____" ], [ "n_splits = 3\nskf = StratifiedKFold(n_splits=n_splits, random_state=0)\nsplits = [\n (train_inds, test_inds)\n for train_inds, test_inds in skf.split(ratings_df, ratings_df['user_id'])\n]\n\nfor i_fold, (train_inds, test_inds) in enumerate(splits):\n train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds]\n train_movie_counts = train_df.groupby('user_id').item_id.count()\n test_movie_counts = test_df.groupby('user_id').item_id.count()\n print(\"Fold {}:\".format(i_fold))\n train_min, train_max = train_movie_counts.min(), train_movie_counts.max()\n test_min, test_max = test_movie_counts.min(), test_movie_counts.max()\n print(\" Train: between {} and {} movies per user\".format(train_min, train_max))\n print(\" Test: between {} and {} movies per user\".format(test_min, test_max))", "Fold 0:\n Train: between 13 and 492 movies per user\n Test: between 6 and 245 movies per user\nFold 1:\n Train: between 13 and 491 movies per user\n Test: between 6 and 246 movies per user\nFold 2:\n Train: between 13 and 491 movies per user\n Test: between 6 and 246 movies per user\n" ], [ "def cart_prod(df_1, df_2):\n df_1['_dummy_'], df_2['_dummy_'] = 1, 1\n return pd.merge(df_1, df_2, on='_dummy_').drop('_dummy_', axis=1)\n\ndef prep_results_df(lists_dict):\n df = pd.DataFrame({'_dummy_': [1]})\n for name, list in lists_dict.items():\n df = cart_prod(df, pd.DataFrame({name: list}))\n return df", "_____no_output_____" ] ], [ [ "Evaluation functions", "_____no_output_____" ] ], [ [ "def dcg(top_k_matrix):\n \"\"\"Compute discounted cumulative gain (DCG) for each row (user) in matrix.\n This measures how good the k recommendations for each user are, with\n decreasing weight placed on items farther down the list. DCG needs to be\n normalized before comparing between users (see normalized discounted\n cumulative gain, or NDCG).\n Links:\n https://link.springer.com/article/10.1007/s11704-015-4584-1\n https://gist.github.com/bwhite/3726239\n https://opensourceconnections.com/blog/2018/02/26/ndcg-scorer-in-quepid\n #cg-dcg-idcg-and-ndcg\n Parameters\n ----------\n top_k_matrix : 2d numpy array, shape = (n_users, k)\n Each row should have the top k ratings for each user from a rating\n matrix in descending order.\n Returns\n -------\n 1d numpy array, shape=(n_users,)\n Array of DCG values for each user\n \"\"\"\n return np.sum(\n top_k_matrix\n /\n np.log2(np.arange(2, top_k_matrix.shape[1]+2))[None, :],\n axis=1\n )\n\ndef ndcg(pred_k_matrix, actual_k_matrix):\n \"\"\"Calculate normalized discounted cumulative gain (NDCG) for each user\n (each row). This is simply the DCG divided by the maximum possible DCG for\n each user. NDCG ranges from 0 to 1, where 1 means movies were chosen that\n actually received the highest k ratings.\n Parameters\n ----------\n pred_k_matrix : 2d numpy array, shape = (n_users, k)\n A matrix of the *actual* ratings of the k movies chosen by the\n recommender system for each user\n actual_k_matrix : 2d numpy array, shape = (n_users, k)\n A matrix of the *actual* ratings of the k movies from the test set\n which the user gave the highest ratings to.\n Returns\n -------\n ndcg_array : 1d numpy array, shape = (n_users,)\n Array of NDCG values for each user\n \"\"\"\n max_dcg_array = dcg(actual_k_matrix)\n dcg_array = dcg(pred_k_matrix)\n return dcg_array / max_dcg_array\n\ndef ndcg_from_df(df, pred, k):\n \"\"\"Calculate NDCG for each user in the passed dataframe given predicted\n scores and a number of movies to recommend\n Parameters\n ----------\n df : pandas dataframe, shape = (n_ratings, >=3)\n User, item, rating dataframe. All columns after first 3 are ignored\n pred : 1d array-like, shape = (n_ratings,)\n List/array/series of predicted ratings for each user/item pair in df\n k : int\n Number of movies per user to recommend\n \n Returns\n -------\n user_map : pandas series, shape = (n_users,)\n Index = original user ids, value = mapped integer corresponding to\n position in ndcg_array for that user\n ndcg_array : 1d numpy array, shape = (n_users)\n Array of NDCG scores in range (0, 1]\n \"\"\"\n df = df.iloc[:, :3].copy()\n df.columns = ['user', 'item', 'rating']\n df['pred'] = pred\n pred_matrix, user_map, item_map = get_rating_matrix(df[['user', 'item', 'pred']])\n n_items = len(item_map)\n inds = pred_matrix.argsort(axis=1)[:, :n_items-1-k:-1]\n del pred_matrix\n gc.collect()\n actual_matrix, _, _ = get_rating_matrix(df[['user', 'item', 'rating']])\n pred_k_matrix = actual_matrix[np.arange(len(actual_matrix))[:, None], inds]\n inds = actual_matrix.argsort(axis=1)[:, :n_items-1-k:-1]\n actual_k_matrix = actual_matrix[np.arange(len(actual_matrix))[:, None], inds]\n ndcg_array = ndcg(pred_k_matrix, actual_k_matrix)\n return user_map, ndcg_array", "_____no_output_____" ] ], [ [ "### Choose the best user-based model\n\n*Let's use cross-validation to examine MAE and NDCG@3 on out-of-sample data and choose the \"best\" user-based model*", "_____no_output_____" ] ], [ [ "lists_dict = {\n 'i_fold': np.arange(n_splits),\n 'k': [1, 2, 5, 10, 20, 50, 100],\n}\nk_recs = 3\nbaseline_algo = DampedUserMovieBaselineModel(damping_factor=10)\nuser_results_df = prep_results_df(lists_dict)\ncols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt']\nfor c in cols:\n user_results_df[c] = np.nan\nfor i_fold, (train_inds, test_inds) in enumerate(splits):\n train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds]\n user_results_i = user_results_df[user_results_df['i_fold']==i_fold]\n for index, row in user_results_i[['i_fold', 'k']].iterrows():\n t1 = datetime.now()\n model = KNNRecommender(mode='user', k=row['k'], baseline_algo=baseline_algo)\n preds = model.fit(train_df).predict(test_df[['user_id', 'item_id']])\n dt = (datetime.now() - t1).total_seconds()\n test_err = mean_absolute_error(test_df['rating'], preds)\n user_map, ndcg_array = ndcg_from_df(test_df, preds, k=k_recs)\n ndcg_mean, ndcg_std = np.mean(ndcg_array), np.std(ndcg_array)\n print(\"k={}, i_fold={}: MAE={}, NDCG={}\".format(row['k'], row['i_fold'], test_err, ndcg_mean))\n cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt']\n user_results_df.loc[index, cols] = test_err, ndcg_mean, ndcg_std, dt", "k=1, i_fold=0: MAE=0.815118677174853, NDCG=0.8369655023253458\nk=2, i_fold=0: MAE=0.781600231166416, NDCG=0.8482273405775766\nk=5, i_fold=0: MAE=0.7571264942010565, NDCG=0.8630678403049701\nk=10, i_fold=0: MAE=0.7496153263636528, NDCG=0.8659193360477797\nk=20, i_fold=0: MAE=0.7469469536576984, NDCG=0.8717836126245141\nk=50, i_fold=0: MAE=0.7470360905582336, NDCG=0.8716972263659186\nk=100, i_fold=0: MAE=0.7488371816907775, NDCG=0.8695180372946614\nk=1, i_fold=1: MAE=0.820690147621912, NDCG=0.8359381527480367\nk=2, i_fold=1: MAE=0.784450538355682, NDCG=0.847674607736636\nk=5, i_fold=1: MAE=0.7593870993315751, NDCG=0.8606143875242239\nk=10, i_fold=1: MAE=0.7526014139035732, NDCG=0.8622149031659992\nk=20, i_fold=1: MAE=0.7500167049661343, NDCG=0.8658096963086768\nk=50, i_fold=1: MAE=0.7508972003037176, NDCG=0.8662743964516358\nk=100, i_fold=1: MAE=0.7528793793802401, NDCG=0.8645012299391964\nk=1, i_fold=2: MAE=0.8199492154157525, NDCG=0.8351597108738448\nk=2, i_fold=2: MAE=0.7834779688045745, NDCG=0.8506591379908867\nk=5, i_fold=2: MAE=0.7603686168232268, NDCG=0.8596734081733238\nk=10, i_fold=2: MAE=0.7499603079518496, NDCG=0.8685605335101336\nk=20, i_fold=2: MAE=0.7470651456010144, NDCG=0.8754058641314186\nk=50, i_fold=2: MAE=0.7475564925970107, NDCG=0.8765422449708559\nk=100, i_fold=2: MAE=0.7496003554351732, NDCG=0.8740726774708101\n" ], [ "fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True)\nsns.pointplot(data=user_results_df, x='k', y='test_err', ax=ax0)\nax0.set_ylabel('MAE')\nax0.set_xlabel('')\nsns.pointplot(data=user_results_df, x='k', y='ndcg_mean', ax=ax1)\nax1.set_ylabel('NDCG@{}'.format(k_recs))\nfig.subplots_adjust(hspace=0.1)\nplt.show()", "_____no_output_____" ] ], [ [ "NDCG@3 peaks at $k=50$, and MAE is pretty similar between $k=20$ to $100$, so $k=50$ is the winner.", "_____no_output_____" ] ], [ [ "baseline_algo = DampedUserMovieBaselineModel(damping_factor=10)\nbest_user_model = KNNRecommender(mode='user', k=50, baseline_algo=baseline_algo)", "_____no_output_____" ] ], [ [ "### Choose the best item-based model\n*Let's use cross-validation to examine MAE and NDCG@3 on out-of-sample data and choose the \"best\" item-based model.*", "_____no_output_____" ] ], [ [ "lists_dict = {\n 'i_fold': np.arange(n_splits),\n 'k': [1, 2, 5, 10, 20, 50, 100],\n}\nk_recs = 3\nbaseline_algo = DampedUserMovieBaselineModel(damping_factor=10)\nitem_results_df = prep_results_df(lists_dict)\ncols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt']\nfor c in cols:\n item_results_df[c] = np.nan\nfor i_fold, (train_inds, test_inds) in enumerate(splits):\n train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds]\n item_results_i = item_results_df[item_results_df['i_fold']==i_fold]\n print(\"i_fold={}: \".format(i_fold), end='')\n for index, row in item_results_i[['i_fold', 'k']].iterrows():\n t1 = datetime.now()\n model = KNNRecommender(mode='item', k=row['k'], baseline_algo=baseline_algo)\n preds = model.fit(train_df).predict(test_df[['user_id', 'item_id']])\n dt = (datetime.now() - t1).total_seconds()\n test_err = mean_absolute_error(test_df['rating'], preds)\n user_map, ndcg_array = ndcg_from_df(test_df, preds, k=k_recs)\n ndcg_mean, ndcg_std = np.mean(ndcg_array), np.std(ndcg_array)\n print(\"k={}, \".format(row['k']), end='')\n cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt']\n item_results_df.loc[index, cols] = test_err, ndcg_mean, ndcg_std, dt\n print()", "i_fold=0: k=1, k=2, k=5, k=10, k=20, k=50, k=100, \ni_fold=1: k=1, k=2, k=5, k=10, k=20, k=50, k=100, \ni_fold=2: k=1, k=2, k=5, k=10, k=20, k=50, k=100, \n" ], [ "fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True)\nsns.pointplot(data=item_results_df, x='k', y='test_err', ax=ax0)\nax0.set_ylabel('MAE')\nax0.set_xlabel('')\nsns.pointplot(data=item_results_df, x='k', y='ndcg_mean', ax=ax1)\nax1.set_ylabel('NDCG@{}'.format(k_recs))\nfig.subplots_adjust(hspace=0.1)\nplt.show()", "_____no_output_____" ] ], [ [ "Here, $k=10$ and $k=20$ have similar MAE and NDCG@3, we'll favor higher $k$ in nearest neigbor methods because higher $k$ is less prone to overfitting. $k=20$ is the winner of the item-based models.", "_____no_output_____" ] ], [ [ "baseline_algo = DampedUserMovieBaselineModel(damping_factor=10)\nbest_item_model = KNNRecommender(mode='item', k=20, baseline_algo=baseline_algo)", "_____no_output_____" ] ], [ [ "### Choose the best ALS model\n\nLet's use cross-validation to examine MAE and NDCG@3 on out-of-sample data and choose the \"best\" ALS model.", "_____no_output_____" ] ], [ [ "max_epochs = 15\nlists_dict = {\n 'i_fold': np.arange(n_splits),\n 'i_epoch': np.arange(max_epochs),\n 'k': [5, 10, 50],\n}\nk_recs = 3\nbaseline_algo = DampedUserMovieBaselineModel(damping_factor=10)\nals_epoch_results_df = prep_results_df(lists_dict)\ncols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt']\nfor c in cols:\n als_epoch_results_df[c] = np.nan\nfor i_fold, (train_inds, test_inds) in enumerate(splits):\n train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds]\n als_epoch_results_i = als_epoch_results_df[als_epoch_results_df['i_fold']==i_fold]\n for k, group in als_epoch_results_i.groupby('k'):\n model = ALSRecommender(k=k, lmbda=0.1, max_epochs=max_epochs, baseline_algo=baseline_algo,\n verbose=False)\n print('i_fold={}, k={}: i_epoch='.format(i_fold, k), end='')\n for index, row in group[['i_fold', 'i_epoch']].iterrows():\n t1 = datetime.now()\n preds = model.fit(train_df, n_epochs=1).predict(test_df[['user_id', 'item_id']])\n dt = (datetime.now() - t1).total_seconds()\n test_err = mean_absolute_error(test_df['rating'], preds)\n user_map, ndcg_array = ndcg_from_df(test_df, preds, k=k_recs)\n ndcg_mean, ndcg_std = np.mean(ndcg_array), np.std(ndcg_array)\n print('{}, '.format(row['i_epoch']), end='')\n cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt']\n als_epoch_results_df.loc[index, cols] = test_err, ndcg_mean, ndcg_std, dt\n print()", "i_fold=0, k=5: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=0, k=10: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=0, k=50: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=1, k=5: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=1, k=10: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=1, k=50: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=2, k=5: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=2, k=10: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=2, k=50: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \n" ], [ "fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True)\nsns.pointplot(data=als_epoch_results_df, x='i_epoch', y='test_err', hue='k', ax=ax0)\nax0.set_ylabel('MAE')\nax0.set_xlabel('')\nax0.legend(loc='upper left', bbox_to_anchor=(1.02, 1.0), title='k =')\nsns.pointplot(data=als_epoch_results_df, x='i_epoch', y='ndcg_mean', hue='k', ax=ax1)\nax1.set_ylabel('NDCG@{}'.format(k_recs))\nax1.set_xlabel('Epoch')\nax1.legend_.remove()\nfig.subplots_adjust(hspace=0.1)\nplt.show()", "_____no_output_____" ], [ "max_epochs = 15\nlists_dict = {\n 'i_fold': np.arange(n_splits),\n 'k': [20, 50, 100, 200],\n 'lmbda': [0.05, 0.1, 0.2]\n}\nk_recs = 3\nbaseline_algo = DampedUserMovieBaselineModel(damping_factor=10)\nals_results_df = prep_results_df(lists_dict)\ncols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt']\nfor c in cols:\n als_results_df[c] = np.nan\nfor i_fold, (train_inds, test_inds) in enumerate(splits):\n train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds]\n als_results_i = als_results_df[als_results_df['i_fold']==i_fold]\n for index, row in als_results_i[['k', 'lmbda']].iterrows():\n model = ALSRecommender(k=row['k'], lmbda=row['lmbda'], max_epochs=max_epochs, baseline_algo=baseline_algo,\n verbose=False)\n print('i_fold={}, k={}: lmbda={}'.format(i_fold, row['k'], row['lmbda']))\n t1 = datetime.now()\n preds = model.fit(train_df).predict(test_df[['user_id', 'item_id']])\n dt = (datetime.now() - t1).total_seconds()\n test_err = mean_absolute_error(test_df['rating'], preds)\n user_map, ndcg_array = ndcg_from_df(test_df, preds, k=k_recs)\n ndcg_mean, ndcg_std = np.mean(ndcg_array), np.std(ndcg_array)\n cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt']\n als_results_df.loc[index, cols] = test_err, ndcg_mean, ndcg_std, dt", "i_fold=0, k=20.0: lmbda=0.05\ni_fold=0, k=20.0: lmbda=0.1\ni_fold=0, k=20.0: lmbda=0.2\ni_fold=0, k=50.0: lmbda=0.05\ni_fold=0, k=50.0: lmbda=0.1\ni_fold=0, k=50.0: lmbda=0.2\ni_fold=0, k=100.0: lmbda=0.05\ni_fold=0, k=100.0: lmbda=0.1\ni_fold=0, k=100.0: lmbda=0.2\ni_fold=0, k=200.0: lmbda=0.05\ni_fold=0, k=200.0: lmbda=0.1\ni_fold=0, k=200.0: lmbda=0.2\ni_fold=1, k=20.0: lmbda=0.05\ni_fold=1, k=20.0: lmbda=0.1\ni_fold=1, k=20.0: lmbda=0.2\ni_fold=1, k=50.0: lmbda=0.05\ni_fold=1, k=50.0: lmbda=0.1\ni_fold=1, k=50.0: lmbda=0.2\ni_fold=1, k=100.0: lmbda=0.05\ni_fold=1, k=100.0: lmbda=0.1\ni_fold=1, k=100.0: lmbda=0.2\ni_fold=1, k=200.0: lmbda=0.05\ni_fold=1, k=200.0: lmbda=0.1\ni_fold=1, k=200.0: lmbda=0.2\ni_fold=2, k=20.0: lmbda=0.05\ni_fold=2, k=20.0: lmbda=0.1\ni_fold=2, k=20.0: lmbda=0.2\ni_fold=2, k=50.0: lmbda=0.05\ni_fold=2, k=50.0: lmbda=0.1\ni_fold=2, k=50.0: lmbda=0.2\ni_fold=2, k=100.0: lmbda=0.05\ni_fold=2, k=100.0: lmbda=0.1\ni_fold=2, k=100.0: lmbda=0.2\ni_fold=2, k=200.0: lmbda=0.05\ni_fold=2, k=200.0: lmbda=0.1\ni_fold=2, k=200.0: lmbda=0.2\n" ], [ "fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True)\nsns.pointplot(data=als_results_df, x='k', y='test_err', hue='lmbda', ax=ax0)\nax0.set_ylabel('MAE')\nax0.set_xlabel('')\nax0.legend(loc='upper left', bbox_to_anchor=(1.02, 1.0), title=r'$\\lambda =$')\nsns.pointplot(data=als_results_df, x='k', y='ndcg_mean', hue='lmbda', ax=ax1)\nax1.set_ylabel('NDCG@{}'.format(k_recs))\nax1.legend_.remove()\nfig.subplots_adjust(hspace=0.1)\nplt.show()", "_____no_output_____" ] ], [ [ "Here, it looks like MAE is pretty flat with respect to the learning rate $\\lambda$, but NDCG@3 shows some interesting variations. The highest NDCG@3 comes from $\\lambda=0.1$ and $k>=50$. With matrix factorization methods like ALS, we want to favor lower $k$ for better generalizability, so $\\lambda=0.1$ and $k=50$ is the winner of the ALS category.", "_____no_output_____" ] ], [ [ "baseline_algo = DampedUserMovieBaselineModel(damping_factor=10)\nbest_als_model = ALSRecommender(k=50, lmbda=0.1, max_epochs=30, baseline_algo=baseline_algo)", "_____no_output_____" ] ], [ [ "### Choose the best SGD model\n*Let's use cross-validation to examine MAE and NDCG@3 on out-of-sample data and choose the \"best\" SGD model.*", "_____no_output_____" ] ], [ [ "max_epochs = 15\nlists_dict = {\n 'i_fold': np.arange(n_splits),\n 'i_epoch': np.arange(max_epochs),\n 'k': [5, 10, 50],\n}\nk_recs = 3\nbaseline_algo = DampedUserMovieBaselineModel(damping_factor=10)\nsgd_epoch_results_df = prep_results_df(lists_dict)\ncols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt']\nfor c in cols:\n sgd_epoch_results_df[c] = np.nan\nfor i_fold, (train_inds, test_inds) in enumerate(splits):\n train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds]\n sgd_epoch_results_i = sgd_epoch_results_df[sgd_epoch_results_df['i_fold']==i_fold]\n for k, group in sgd_epoch_results_i.groupby('k'):\n model = SGDRecommender(k=k, learning_rate=0.01, max_epochs=max_epochs, damping_factor=10,\n verbose=False)\n print('i_fold={}, k={}: i_epoch='.format(i_fold, k), end='')\n for index, row in group[['i_fold', 'i_epoch']].iterrows():\n t1 = datetime.now()\n preds = model.fit(train_df, n_epochs=1).predict(test_df[['user_id', 'item_id']])\n dt = (datetime.now() - t1).total_seconds()\n test_err = mean_absolute_error(test_df['rating'], preds)\n user_map, ndcg_array = ndcg_from_df(test_df, preds, k=k_recs)\n ndcg_mean, ndcg_std = np.mean(ndcg_array), np.std(ndcg_array)\n print('{}, '.format(row['i_epoch']), end='')\n cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt']\n sgd_epoch_results_df.loc[index, cols] = test_err, ndcg_mean, ndcg_std, dt\n print()", "i_fold=0, k=5: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=0, k=10: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=0, k=50: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=1, k=5: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=1, k=10: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=1, k=50: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=2, k=5: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=2, k=10: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \ni_fold=2, k=50: i_epoch=0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, \n" ], [ "fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True)\nsns.pointplot(data=sgd_epoch_results_df, x='i_epoch', y='test_err', hue='k', ax=ax0)\nax0.set_ylabel('MAE')\nax0.set_xlabel('')\nax0.legend(loc='upper left', bbox_to_anchor=(1.02, 1.0), title='k =')\nsns.pointplot(data=sgd_epoch_results_df, x='i_epoch', y='ndcg_mean', hue='k', ax=ax1)\nax1.set_ylabel('NDCG@{}'.format(k_recs))\nax1.set_xlabel('Epoch')\nax1.legend_.remove()\nfig.subplots_adjust(hspace=0.1)\nplt.show()", "_____no_output_____" ], [ "lists_dict = {\n 'i_fold': np.arange(n_splits),\n 'learning_rate': [0.001, 0.01],\n 'reg': [0.0, 0.001, 0.01],\n}\nk_recs = 3\nk = 50\nmax_epochs = 30\nsgd_results_df = prep_results_df(lists_dict)\ncols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt']\nfor c in cols:\n sgd_results_df[c] = np.nan\nfor i_fold, (train_inds, test_inds) in enumerate(splits):\n train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds]\n sgd_results_i = sgd_results_df[sgd_results_df['i_fold']==i_fold]\n for index, row in sgd_results_i[['learning_rate', 'reg']].iterrows():\n learning_rate, reg = row['learning_rate'], row['reg']\n model = SGDRecommender(k=k, learning_rate=learning_rate, max_epochs=max_epochs,\n damping_factor=10, verbose=False,\n user_reg=reg, item_reg=reg, user_bias_reg=reg, item_bias_reg=reg)\n print('i_fold={}, learning_rate={}, reg={}'.format(i_fold, learning_rate, reg))\n t1 = datetime.now()\n preds = model.fit(train_df).predict(test_df[['user_id', 'item_id']])\n dt = (datetime.now() - t1).total_seconds()\n test_err = mean_absolute_error(test_df['rating'], preds)\n user_map, ndcg_array = ndcg_from_df(test_df, preds, k=k_recs)\n ndcg_mean, ndcg_std = np.mean(ndcg_array), np.std(ndcg_array)\n cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt']\n sgd_results_df.loc[index, cols] = test_err, ndcg_mean, ndcg_std, dt", "i_fold=0, learning_rate=0.001, reg=0.0\ni_fold=0, learning_rate=0.001, reg=0.001\ni_fold=0, learning_rate=0.001, reg=0.01\ni_fold=0, learning_rate=0.01, reg=0.0\ni_fold=0, learning_rate=0.01, reg=0.001\ni_fold=0, learning_rate=0.01, reg=0.01\ni_fold=1, learning_rate=0.001, reg=0.0\ni_fold=1, learning_rate=0.001, reg=0.001\ni_fold=1, learning_rate=0.001, reg=0.01\ni_fold=1, learning_rate=0.01, reg=0.0\ni_fold=1, learning_rate=0.01, reg=0.001\ni_fold=1, learning_rate=0.01, reg=0.01\ni_fold=2, learning_rate=0.001, reg=0.0\ni_fold=2, learning_rate=0.001, reg=0.001\ni_fold=2, learning_rate=0.001, reg=0.01\ni_fold=2, learning_rate=0.01, reg=0.0\ni_fold=2, learning_rate=0.01, reg=0.001\ni_fold=2, learning_rate=0.01, reg=0.01\n" ], [ "fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True)\nsns.pointplot(data=sgd_results_df, x='reg', y='test_err', hue='learning_rate', ax=ax0)\nax0.set_ylabel('MAE')\nax0.set_xlabel('')\nax0.legend(loc='upper left', bbox_to_anchor=(1.02, 1.0), title='Learning Rate')\nsns.pointplot(data=sgd_results_df, x='reg', y='ndcg_mean', hue='learning_rate', ax=ax1)\nax1.set_ylabel('NDCG@{}'.format(k_recs))\nax1.set_xlabel('Regularization Parameter')\nax1.legend_.remove()\nfig.subplots_adjust(hspace=0.1)\nplt.show()", "_____no_output_____" ], [ "reg = 0.01\nbest_sgd_model = SGDRecommender(k=50, learning_rate=0.01, max_epochs=30, damping_factor=10,\n user_reg=reg, item_reg=reg, user_bias_reg=reg, item_bias_reg=reg)", "_____no_output_____" ] ], [ [ "### Compare the top methods of each category", "_____no_output_____" ] ], [ [ "final_models = [best_user_model, best_item_model, best_als_model, best_sgd_model]\nfinal_model_names = ['user', 'item', 'als', 'sgd']\nfinal_results = pd.DataFrame(columns=['model', 'i_fold', 'test_err', 'ndcg_mean', 'ndcg_std', 'dt'])\nfor model, name in zip(final_models, final_model_names):\n for i_fold, (train_inds, test_inds) in enumerate(splits):\n print(\"i_fold={}, model={}\".format(i_fold, name))\n train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds]\n if name in ['als', 'sgd']:\n model.verbose = False\n t1 = datetime.now()\n preds = model.fit(train_df).predict(test_df[['user_id', 'item_id']])\n dt = (datetime.now() - t1).total_seconds()\n test_err = mean_absolute_error(test_df['rating'], preds)\n user_map, ndcg_array = ndcg_from_df(test_df, preds, k=3)\n ndcg_mean, ndcg_std = np.mean(ndcg_array), np.std(ndcg_array)\n final_results.loc[len(final_results), :] = name, i_fold, test_err, ndcg_mean, ndcg_std, dt", "i_fold=0, model=user\ni_fold=1, model=user\ni_fold=2, model=user\ni_fold=0, model=item\ni_fold=1, model=item\ni_fold=2, model=item\ni_fold=0, model=als\ni_fold=1, model=als\ni_fold=2, model=als\ni_fold=0, model=sgd\ni_fold=1, model=sgd\ni_fold=2, model=sgd\n" ], [ "fig, (ax0, ax1, ax2) = plt.subplots(3, 1, sharex=True)\nsns.stripplot(data=final_results, x='model', y='test_err', ax=ax0, jitter=True)\nsns.stripplot(data=final_results, x='model', y='ndcg_mean', ax=ax1, jitter=True)\nsns.stripplot(data=final_results, x='model', y='dt', ax=ax2, jitter=True)\nax0.set_ylabel('MAE')\nax0.set_xlabel('')\nax1.set_ylabel('NDCG@3')\nax1.set_xlabel('')\nax2.set_ylabel(r'time [$s$]')\nax2.set(yscale='log')\nax2.set_yticks([1, 10, 100])\nplt.setp(ax0.collections, sizes=[50])\nplt.setp(ax1.collections, sizes=[50])\nplt.setp(ax2.collections, sizes=[50])\nplt.show()", "_____no_output_____" ] ], [ [ "### Fetching posters", "_____no_output_____" ] ], [ [ "n_splits = 3\nskf = StratifiedKFold(n_splits=n_splits, random_state=0)\nsplits = [\n (train_inds, test_inds)\n for train_inds, test_inds in skf.split(ratings_df, ratings_df['user_id'])\n]", "/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_split.py:296: FutureWarning: Setting a random_state has no effect since shuffle is False. This will raise an error in 0.24. You should leave random_state to its default (None), or set shuffle=True.\n FutureWarning\n" ], [ "baseline_algo = DampedUserMovieBaselineModel(damping_factor=10)\nreg = 0.0\nmodels_dict = {\n 'user': KNNRecommender(mode='user', k=50, baseline_algo=baseline_algo),\n 'item': KNNRecommender(mode='item', k=20, baseline_algo=baseline_algo),\n 'als': ALSRecommender(k=50, lmbda=0.1, max_epochs=15, baseline_algo=baseline_algo, verbose=False),\n 'sgd': SGDRecommender(k=50, learning_rate=0.01, max_epochs=30, damping_factor=10,\n user_reg=reg, item_reg=reg, user_bias_reg=reg, item_bias_reg=reg,\n verbose=False)\n}", "_____no_output_____" ], [ "def get_poster_url(movieId, base_url, links_df, api_key):\n movieId = str(int(movieId))\n # Get IMDB movie ID\n tmdbId = links_df.loc[movieId, 'tmdbId']\n \n # Query themoviedb.org API for movie poster path.\n movie_url = 'http://api.themoviedb.org/3/movie/{:}/images'.format(tmdbId)\n headers = {'Accept': 'application/json'}\n payload = {'api_key': api_key} \n response = requests.get(movie_url, params=payload, headers=headers)\n file_path = json.loads(response.text)['posters'][0]['file_path']\n \n return base_url + file_path\n\ndef display_posters(movieIds, base_url, links_df, api_key):\n poster_urls = [get_poster_url(movieId, base_url, links_df, api_key) for movieId in movieIds]\n TABLE = \"<table style='width: 100%; align: center;'><tr>{}</tr></table>\"\n CELL = \"<td align='center'><img style='float: left; width: 120px' src={}></td>\"\n table = TABLE.format(''.join([CELL.format(url) for url in poster_urls]))\n display(HTML(table))\n\ndef recommend(model, train_df, user, pretrained=False, k=3):\n train_df = train_df.iloc[:, :3].copy()\n train_df.columns = ['user', 'item', 'rating']\n if not pretrained:\n model.fit(train_df)\n seen_movies = train_df[train_df['user'] == user]['item'].unique()\n unseen_movies = list(set(train_df['item'].unique()) - set(seen_movies))\n user_movie_df = pd.DataFrame({'user': [user]*len(unseen_movies), 'item': unseen_movies})\n user_movie_df = user_movie_df[['user', 'item']]\n user_movie_df['pred'] = model.predict(user_movie_df)\n user_movie_df = user_movie_df.sort_values('pred', ascending=False)\n movies, preds = user_movie_df[['item', 'pred']].values[:k, :].T\n return movies, preds", "_____no_output_____" ] ], [ [ "Movies this user likes", "_____no_output_____" ] ], [ [ "user = 10\ntrain_inds, test_inds = splits[0]\ntrain_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds]\nfavorite_movies = (\n train_df[train_df['user_id']==user]\n .sort_values('rating', ascending=False)\n .iloc[:5, 1]\n .values\n)\ndisplay_posters(favorite_movies, base_url, links_df, api_key)", "_____no_output_____" ] ], [ [ "Recommended movies", "_____no_output_____" ] ], [ [ "model = models_dict['als']\nmovies, preds = recommend(model, train_df, user, pretrained=True, k=5)\ndisplay_posters(movies, base_url, links_df, api_key)", "_____no_output_____" ] ], [ [ "## Non-Negative Matrix Factorization (NMF, scikit-learn package)\n\nFind two non-negative matrices (W, H) whose product approximates the non-negative matrix R.\nNOTE: since the values of matrix R MUST be all positive, we CAN'T do mean-centering normalization here (although this would improve the accuracy).", "_____no_output_____" ] ], [ [ "from sklearn.decomposition import NMF\n\nnmf_model = NMF(n_components=20) # starts with 20 latents factors\n\n# Matrix factorization # V ~ W.H (Find two non-negative matrices (W, H) whose product approximates the non- negative matrix X. )\nnmf_model.fit(rating_matrix) # R can be array-like or sparse, here it is array-like (dense)\nTheta = nmf_model.transform(rating_matrix) # user latent factors (= W, called the features matrix)\nM = nmf_model.components_.T # item latent factors (= H.T) (H is called the coefficient matrix)\n\n# Making the predictions\nR_pred = M.dot(Theta.T) # See http://stackoverflow.com/questions/24739121/nonnegative-matrix-factorization-in-sklearn\nR_pred = R_pred.T # same dimensions as R\n\nprint('Item features - M:', M.shape)\nprint('User features - Theta:', Theta.shape)\nprint()\nprint('R ~ M * Theta.T:')\nprint(R_pred.round(2))\nprint(R_pred.shape)", "Item features - M: (1682, 20)\nUser features - Theta: (943, 20)\n\nR ~ M * Theta.T:\n[[4.5 2.06 1.41 ... 0. 0.02 0.03]\n [2.34 0.03 0.17 ... 0.02 0. 0. ]\n [0.19 0.02 0.1 ... 0.01 0. 0. ]\n ...\n [1.98 0.05 0.3 ... 0. 0. 0. ]\n [1.39 0.1 0.03 ... 0.01 0.01 0. ]\n [1.6 2.24 1.38 ... 0. 0.03 0.02]]\n(943, 1682)\n" ] ], [ [ "Estimating the error (RMSE) before tuning the hyperparameters", "_____no_output_____" ] ], [ [ "from sklearn.metrics import mean_squared_error\n\ndef get_rmse(pred, actual):\n pred = pred[actual.nonzero()].flatten() # Ignore nonzero terms\n actual = actual[actual.nonzero()].flatten() # Ignore nonzero terms\n return np.sqrt(mean_squared_error(pred, actual))\n\nget_rmse(R_pred, rating_matrix)", "_____no_output_____" ] ], [ [ "When the predictive model is satisfying, save it to a file", "_____no_output_____" ] ], [ [ "import pickle\n\nwith open('nnmf_sklearn.pickle', 'wb') as f:\n pickle.dump(nmf_model, f)", "_____no_output_____" ] ], [ [ "Item recommendation for an active user (given its rating history)", "_____no_output_____" ] ], [ [ "def make_recommendation_activeuser(R, prediction, user_idx, k=5):\n '''\n user_idx ...... select an active user\n k ............ number of movies to recommend\n '''\n rated_items_df_user = pd.DataFrame(R).iloc[user_idx, :] # get the list of actual ratings of user_idx (seen movies)\n user_prediction_df_user = pd.DataFrame(prediction).iloc[user_idx,:] # get the list of predicted ratings of user_idx (unseen movies)\n reco_df = pd.concat([rated_items_df_user, user_prediction_df_user, item_info], axis=1) # merge both lists with the movie's title\n reco_df.columns = ['rating','prediction','title']\n\n print('Preferred movies for user #', user_idx)\n print(reco_df.sort_values(by='rating', ascending=False)[:k]) # returns the 5 seen movies with the best actual ratings\n print('Recommended movies for user #', user_idx)\n reco_df = reco_df[ reco_df['rating'] == 0 ]\n print(reco_df.sort_values(by='prediction', ascending=False)[:k]) # returns the 5 unseen movies with the best predicted ratings\n print()\n print()\n\nmake_recommendation_activeuser(rating_matrix, R_pred, user_idx=50, k=5)\nmake_recommendation_activeuser(rating_matrix, R_pred, user_idx=130, k=5)", "Preferred movies for user # 50\n rating prediction title\n171 5.0 1.432029 NaN\n180 5.0 2.277069 NaN\n172 5.0 1.017789 NaN\n49 5.0 2.753084 NaN\n143 5.0 0.695904 NaN\nRecommended movies for user # 50\n rating prediction title\n55 0.0 1.527510 NaN\n173 0.0 1.494889 NaN\n256 0.0 1.303158 NaN\n97 0.0 1.295984 NaN\n11 0.0 1.275150 NaN\n\n\nPreferred movies for user # 130\n rating prediction title\n285 5.0 4.312692 NaN\n312 5.0 1.897457 NaN\n535 5.0 0.144333 NaN\n99 5.0 4.945202 NaN\n13 5.0 3.207845 NaN\nRecommended movies for user # 130\n rating prediction title\n474 0.0 2.188917 NaN\n12 0.0 2.137406 NaN\n115 0.0 2.114974 NaN\n282 0.0 2.080887 NaN\n236 0.0 1.831747 NaN\n\n\n" ] ], [ [ "Item recommendation for a new user (wih rating history)", "_____no_output_____" ] ], [ [ "# creating a new user profile:\nmy_ratings = np.zeros((1682,1), dtype=int)\nmy_ratings[0] = 4 \nmy_ratings[1] = 4 \nmy_ratings[10] = 1 \nmy_ratings[15] = 3\nmy_ratings[27] = 4\nmy_ratings[34] = 1\nmy_ratings[49] = 1\nmy_ratings[55] = 1\nmy_ratings[61] = 1\nmy_ratings[68] = 5\nmy_ratings[70] = 4\nmy_ratings[81] = 4\nmy_ratings[87] = 2\nmy_ratings[94] = 4\nmy_ratings[120] = 2\nmy_ratings[171] = 1\nmy_ratings[173] = 4\nmy_ratings[175] = 1\nmy_ratings[182] = 1\nmy_ratings[194] = 2\nmy_ratings[203] = 5\nmy_ratings[209] = 5\nmy_ratings[221] = 1\nmy_ratings[234] = 2\nmy_ratings[312] = 3\nmy_ratings[317] = 3\nmy_ratings[322] = 3\nmy_ratings[342] = 1\nmy_ratings[378] = 1\nmy_ratings[379] = 1\nmy_ratings[392] = 3\nmy_ratings[404] = 2\nmy_ratings[422] = 4\nmy_ratings[542] = 4\n\nfor i in range(len(my_ratings)):\n print(i, my_ratings[i], item_info.iloc[i]['title'])", "_____no_output_____" ], [ "# Adding a new user to the R matrix \nnewR = np.concatenate((rating_matrix, my_ratings.T))\n\n# Recompute the Matrix factorization\nnewTheta = estimator.fit_transform(newR) \nnewX = estimator.components_.T \n\n# Making the predictions\nnewR_pred = newX.dot(newTheta.T).T\n\n# clipping values \nnewR_pred[newR_pred > R.max()] = R.max() # clips ratings above 5 \nnewR_pred[newR_pred < R.min() + 1] = R.min() +1 # clips ratings below 1\n\n# Making the recommendation\nmake_recommendation_activeuser(newR, newR_pred, user_idx=newR.shape[0]-1, k=10)", "_____no_output_____" ] ], [ [ "Item recommendation for a new user (wihout rating history)", "_____no_output_____" ] ], [ [ "def make_recommendation_newuser(item_sim, item_idx, k=5):\n '''\n item_idx ...... select an item\n k ............ number of movies to recommend\n '''\n reco_item_df = pd.DataFrame(item_sim).iloc[item_idx, :] \n reco_item_df = pd.concat([reco_item_df, item_info], axis=1) # merge list with the movie's title\n reco_item_df.columns = ['similarity','title']\n reco_item_df = reco_item_df.sort_values(by='similarity',ascending=False)\n\n print('Recommended movies for a new user (without rating history), currently looking at movie:', reco_item_df.iloc[0]['title'])\n print(reco_item_df[1:k+1]) # returns the 5 movies the most similar to item_idx\n print()\n\nfrom sklearn.metrics.pairwise import cosine_similarity\nitem_sim = cosine_similarity(M) # Use item features matrix to compute movie-to-movie similarity matrices\nmake_recommendation_newuser(item_sim, item_idx=1, k=5)\nmake_recommendation_newuser(item_sim, item_idx=20, k=5)\nmake_recommendation_newuser(item_sim, item_idx=500, k=5)", "_____no_output_____" ] ], [ [ "## Hybrid (using processed dataset)\n1. Run Content based filtering and determine the movies which we want to recommend to the user.\n2. Filter and sort the recommendations of CF using SVD predicted ratings.", "_____no_output_____" ], [ "Setup", "_____no_output_____" ] ], [ [ "!git clone https://github.com/vivdalal/movie-recommender-system.git\n\nfrom math import sqrt\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel\n\n# Reading ratings file\nhratings = pd.read_csv('movie-recommender-system/ratings.csv', sep=',', encoding='latin-1', usecols=['userId','movieId','rating','timestamp'])\ndisplay(ratings.head())\n\n# Reading movies file\nhmovies = pd.read_csv('movie-recommender-system/movies.csv', sep=',', encoding='latin-1', usecols=['movieId','title','genres'])\ndisplay(movies.head())", "fatal: destination path 'movie-recommender-system' already exists and is not an empty directory.\n" ] ], [ [ "Content-based model", "_____no_output_____" ] ], [ [ "tfihmovies_genres = TfidfVectorizer(token_pattern = '[a-zA-Z0-9\\-]+')\nhmovies['genres'] = hmovies['genres'].replace(to_replace=\"(no genres listed)\", value=\"\")\ntfihmovies_genres_matrix = tfihmovies_genres.fit_transform(hmovies['genres'])\ncosine_sim_movies = linear_kernel(tfihmovies_genres_matrix, tfihmovies_genres_matrix)\n\ndef get_recommendations_based_on_genres(movie_title, cosine_sim_movies=cosine_sim_movies):\n \"\"\"\n Calculates top 2 movies to recommend based on given movie titles genres. \n :param movie_title: title of movie to be taken for base of recommendation\n :param cosine_sim_movies: cosine similarity between movies \n :return: Titles of movies recommended to user\n \"\"\"\n # Get the index of the movie that matches the title\n idx_movie = hmovies.loc[hmovies['title'].isin([movie_title])]\n idx_movie = idx_movie.index\n \n # Get the pairwsie similarity scores of all movies with that movie\n sim_scores_movies = list(enumerate(cosine_sim_movies[idx_movie][0]))\n \n # Sort the movies based on the similarity scores\n sim_scores_movies = sorted(sim_scores_movies, key=lambda x: x[1], reverse=True)\n\n # Get the scores of the 10 most similar movies\n sim_scores_movies = sim_scores_movies[1:3]\n \n # Get the movie indices\n movie_indices = [i[0] for i in sim_scores_movies]\n \n # Return the top 2 most similar movies\n return hmovies['title'].iloc[movie_indices]\n\ndef get_recommendation_content_model(userId):\n \"\"\"\n Calculates top movies to be recommended to user based on movie user has watched. \n :param userId: userid of user\n :return: Titles of movies recommended to user\n \"\"\"\n recommended_movie_list = []\n movie_list = []\n df_rating_filtered = hratings[hratings[\"userId\"]== userId]\n for key, row in df_rating_filtered.iterrows():\n movie_list.append((hmovies[\"title\"][row[\"movieId\"]==hmovies[\"movieId\"]]).values) \n for index, movie in enumerate(movie_list):\n for key, movie_recommended in get_recommendations_based_on_genres(movie[0]).iteritems():\n recommended_movie_list.append(movie_recommended)\n\n # removing already watched movie from recommended list \n for movie_title in recommended_movie_list:\n if movie_title in movie_list:\n recommended_movie_list.remove(movie_title)\n \n return set(recommended_movie_list)\n\nlist(get_recommendation_content_model(1))[:10]", "_____no_output_____" ] ], [ [ "SVD Collaborative model", "_____no_output_____" ] ], [ [ "# !pip install -q surprise\n\nfrom surprise import Reader, Dataset, SVD\nfrom surprise.model_selection import cross_validate\n\nreader = Reader()\ndata = Dataset.load_from_df(ratings_df[['user_id', 'item_id', 'rating']], reader)\n\nsvd = SVD()\ncross_validate(svd, data, measures=['RMSE', 'MAE'], cv=5, verbose=True)\n\ntrainset = data.build_full_trainset()\nsvd.fit(trainset)", "Evaluating RMSE, MAE of algorithm SVD on 5 split(s).\n\n Fold 1 Fold 2 Fold 3 Fold 4 Fold 5 Mean Std \nRMSE (testset) 0.9328 0.9330 0.9455 0.9341 0.9322 0.9355 0.0050 \nMAE (testset) 0.7339 0.7376 0.7440 0.7374 0.7330 0.7372 0.0039 \nFit time 5.14 5.14 5.24 5.28 5.26 5.21 0.06 \nTest time 0.16 0.27 0.16 0.27 0.16 0.20 0.05 \n" ] ], [ [ "Hybrid model", "_____no_output_____" ] ], [ [ "def hybrid_content_svd_model(userId):\n recommended_movies_by_content_model = get_recommendation_content_model(userId)\n recommended_movies_by_content_model = hmovies[hmovies.apply(lambda movie: movie[\"title\"] in recommended_movies_by_content_model, axis=1)]\n for key, columns in recommended_movies_by_content_model.iterrows():\n predict = svd.predict(userId, columns[\"movieId\"])\n recommended_movies_by_content_model.loc[key, \"svd_rating\"] = predict.est\n# if(predict.est < 2):\n# recommended_movies_by_content_model = recommended_movies_by_content_model.drop([key])\n return recommended_movies_by_content_model.sort_values(\"svd_rating\", ascending=False).iloc[0:11]\n \nuser_id = 50\nhybrid_content_svd_model(user_id)", "/usr/local/lib/python3.6/dist-packages/pandas/core/indexing.py:1596: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n self.obj[key] = _infer_fill_value(value)\n/usr/local/lib/python3.6/dist-packages/pandas/core/indexing.py:1763: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n isetter(loc, value)\n" ] ], [ [ "## LightFM - BPR & WARP", "_____no_output_____" ] ], [ [ "# !pip install -q lightfm\n\nimport numpy as np\nfrom lightfm.datasets import fetch_movielens\nfrom lightfm import LightFM\nfrom lightfm.evaluation import precision_at_k\nfrom lightfm.evaluation import auc_score\n\nmovielens = fetch_movielens()\n\nfor key, value in movielens.items():\n print(key, type(value), value.shape)", "train <class 'scipy.sparse.coo.coo_matrix'> (943, 1682)\ntest <class 'scipy.sparse.coo.coo_matrix'> (943, 1682)\nitem_features <class 'scipy.sparse.csr.csr_matrix'> (1682, 1682)\nitem_feature_labels <class 'numpy.ndarray'> (1682,)\nitem_labels <class 'numpy.ndarray'> (1682,)\n" ], [ "lfm_train = movielens['train']\nlfm_test = movielens['test']", "_____no_output_____" ], [ "model = LightFM(learning_rate=0.05, loss='bpr')\nmodel.fit(lfm_train, epochs=10)\n\ntrain_precision = precision_at_k(model, lfm_train, k=10).mean()\ntest_precision = precision_at_k(model, lfm_test, k=10, train_interactions=lfm_train).mean()\n\ntrain_auc = auc_score(model, lfm_train).mean()\ntest_auc = auc_score(model, lfm_test, train_interactions=lfm_train).mean()\n\nprint('Precision: train %.2f, test %.2f.' % (train_precision, test_precision))\nprint('AUC: train %.2f, test %.2f.' % (train_auc, test_auc))", "Precision: train 0.59, test 0.20.\nAUC: train 0.90, test 0.88.\n" ] ], [ [ "BPR optimises for ROC. The WARP model, on the other hand, optimises for precision@k---we should expect its performance to be better on precision.", "_____no_output_____" ] ], [ [ "model = LightFM(learning_rate=0.05, loss='warp')\nmodel.fit_partial(lfm_train, epochs=10)\n\ntrain_precision = precision_at_k(model, lfm_train, k=10).mean()\ntest_precision = precision_at_k(model, lfm_test, k=10, train_interactions=lfm_train).mean()\n\ntrain_auc = auc_score(model, lfm_train).mean()\ntest_auc = auc_score(model, lfm_test, train_interactions=lfm_train).mean()\n\nprint('Precision: train %.2f, test %.2f.' % (train_precision, test_precision))\nprint('AUC: train %.2f, test %.2f.' % (train_auc, test_auc))", "Precision: train 0.60, test 0.22.\nAUC: train 0.94, test 0.93.\n" ] ], [ [ "## Microsoft Library - FastAI-Collab", "_____no_output_____" ] ], [ [ "%cd /content/4CED0278/4CED0278", "/content/4CED0278/4CED0278\n" ], [ "import os\nimport sys\nimport time\nimport itertools\nimport numpy as np\nimport pandas as pd\n\nimport torch, fastai\nfrom fastai.collab import EmbeddingDotBias, collab_learner, CollabDataBunch, load_learner\n\nfrom reco_utils.dataset.python_splitters import python_stratified_split\nfrom reco_utils.recommender.fastai.fastai_utils import cartesian_product, score\nfrom reco_utils.evaluation.python_evaluation import map_at_k, ndcg_at_k, precision_at_k, recall_at_k\nfrom reco_utils.evaluation.python_evaluation import rmse, mae, rsquared, exp_var", "_____no_output_____" ], [ "USER, ITEM, RATING, TIMESTAMP, PREDICTION, TITLE = 'UserId', 'MovieId', 'Rating', 'Timestamp', 'Prediction', 'Title'\n\n# top k items to recommend\nTOP_K = 10\n\n# select movieLens data size: 100k, 1m, 10m, or 20m\nMOVIELENS_DATA_SIZE = '100k'\n\n# model parameters\nN_FACTORS = 40\nEPOCHS = 5\n\nratings = pd.read_csv('./data/ml-100k/ratings.csv')", "_____no_output_____" ], [ "ratings.head()", "_____no_output_____" ], [ "# split the dataset\ntrain_valid_df, test_df = python_stratified_split(ratings, \n ratio=0.75, \n min_rating=5, \n filter_by=\"item\", \n col_user=USER, \n col_item=ITEM )", "_____no_output_____" ], [ "data = CollabDataBunch.from_df(train_valid_df, \n user_name=USER, \n item_name=ITEM, \n rating_name=RATING, \n valid_pct=0)\ndata.show_batch()", "_____no_output_____" ] ], [ [ "Now we will create a `collab_learner` for the data, which by default uses \nthe `EmbeddingDotBias` model. We will be using 40 latent factors. This will \ncreate an embedding for the users and the items that will map each of these \nto 40 floats as can be seen below. Note that the embedding parameters are not \npredefined, but are learned by the model.\n\nAlthough ratings can only range from 1-5, we are setting the range of possible \nratings to a range from 0 to 5.5 -- that will allow the model to predict values \naround 1 and 5, which improves accuracy. Lastly, we set a value for weight-decay \nfor regularization.", "_____no_output_____" ] ], [ [ "learn = collab_learner(data, n_factors=N_FACTORS, y_range=[0,5.5], wd=1e-1)\nlearn.model", "_____no_output_____" ] ], [ [ "Now train the model for 5 epochs setting the maximal learning rate. The learner will reduce the learning rate with each epoch using cosine annealing", "_____no_output_____" ] ], [ [ "learn.fit_one_cycle(EPOCHS, max_lr=5e-3)", "_____no_output_____" ], [ "# save the learner\nlearn.export('movielens_model.pkl')", "_____no_output_____" ] ], [ [ "Evaluation", "_____no_output_____" ] ], [ [ "# load the learner\nlearner = load_learner(path=\".\", file='movielens_model.pkl')\n\n# get all users and items that the model knows\ntotal_users, total_items = learner.data.train_ds.x.classes.values()\ntotal_items = total_items[1:]\ntotal_users = total_users[1:]\n\n# get all users from the test set and remove any users that were now in the training set\ntest_users = test_df[USER].unique()\ntest_users = np.intersect1d(test_users, total_users)\n\n# build the cartesian product of test set users and all items known to the model\nusers_items = cartesian_product(np.array(test_users),np.array(total_items))\nusers_items = pd.DataFrame(users_items, columns=[USER,ITEM])\n\n# remove the user/items combinations that are in the training set\n# we don't want to propose a movie that the user has already watched\ntraining_removed = pd.merge(users_items, train_valid_df.astype(str), on=[USER, ITEM], how='left')\ntraining_removed = training_removed[training_removed[RATING].isna()][[USER, ITEM]]", "_____no_output_____" ], [ "# score the model to find the top K recommendation\ntop_k_scores = score(learner, \n test_df=training_removed,\n user_col=USER, \n item_col=ITEM, \n prediction_col=PREDICTION)\n\n# MAP\neval_map = map_at_k(test_df, top_k_scores, col_user=USER, col_item=ITEM, \n col_rating=RATING, col_prediction=PREDICTION, \n relevancy_method=\"top_k\", k=TOP_K)\n\n# NDCG\neval_ndcg = ndcg_at_k(test_df, top_k_scores, col_user=USER, col_item=ITEM, \n col_rating=RATING, col_prediction=PREDICTION, \n relevancy_method=\"top_k\", k=TOP_K)\n\n# Precision\neval_precision = precision_at_k(test_df, top_k_scores, col_user=USER, col_item=ITEM, \n col_rating=RATING, col_prediction=PREDICTION, \n relevancy_method=\"top_k\", k=TOP_K)\n\n# Recall\neval_recall = recall_at_k(test_df, top_k_scores, col_user=USER, col_item=ITEM, \n col_rating=RATING, col_prediction=PREDICTION, \n relevancy_method=\"top_k\", k=TOP_K)\n\nprint(\"Model:\\t\" + learn.__class__.__name__,\n \"Top K:\\t%d\" % TOP_K,\n \"MAP:\\t%f\" % eval_map,\n \"NDCG:\\t%f\" % eval_ndcg,\n \"Precision@K:\\t%f\" % eval_precision,\n \"Recall@K:\\t%f\" % eval_recall, sep='\\n')", "_____no_output_____" ], [ "# calculate scores for test user-item pairs\nscores = score(learner, \n test_df=test_df.copy(), \n user_col=USER, \n item_col=ITEM, \n prediction_col=PREDICTION)\n\n# calculate some regression metrics\neval_r2 = rsquared(test_df, scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION)\neval_rmse = rmse(test_df, scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION)\neval_mae = mae(test_df, scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION)\neval_exp_var = exp_var(test_df, scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION)\n\nprint(\"Model:\\t\" + learn.__class__.__name__,\n \"RMSE:\\t%f\" % eval_rmse,\n \"MAE:\\t%f\" % eval_mae,\n \"Explained variance:\\t%f\" % eval_exp_var,\n \"R squared:\\t%f\" % eval_r2, sep='\\n')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0a91babcdac2ba06b246d065418ddef9a99a8ce
30,593
ipynb
Jupyter Notebook
_notebooks/2020-03-16-Covid.ipynb
alephthoughts/alephthoughts
fa4079df67fc261d950d3d1ed232a73739982288
[ "Apache-2.0" ]
null
null
null
_notebooks/2020-03-16-Covid.ipynb
alephthoughts/alephthoughts
fa4079df67fc261d950d3d1ed232a73739982288
[ "Apache-2.0" ]
1
2021-09-28T01:26:40.000Z
2021-09-28T01:26:40.000Z
_notebooks/2020-03-16-Covid.ipynb
alephthoughts/alephthoughts
fa4079df67fc261d950d3d1ed232a73739982288
[ "Apache-2.0" ]
null
null
null
123.358871
1,546
0.496813
[ [ [ "# \"Covid-19, आपका समुदाय और आप - एक डेटा विज्ञान परिप्रेक्ष्य\"\n> \"लिखित: 09 मार्च 2020 जेरेमी हावर्ड और रेचल थॉमस द्वारा\"\n\n- toc: false \n- badges: false\n- comments: true\n- categories: [ai-in-society]\n- image: images/coronavirus.jpg", "_____no_output_____" ], [ "> हम डेटा वैज्ञानिक हैं - अर्थात, हमारा काम यह समझना है कि हम डेटा का विश्लेषण और व्याख्या कैसे करें। जब हम Covid-19 के डेटा का विश्लेषण करते हैं, तो हम बहुत चिंतित होते हैं। समाज के सबसे कमजोर हिस्सों, बुजुर्गों और गरीबों को सबसे अधिक खतरा है, लेकिन बीमारी के प्रसार और प्रभाव को नियंत्रित करने के लिए हम सभी को अपने व्यवहार को बदलने की आवश्यकता है। अपने हाथों को अच्छी तरह से और नियमित रूप से धोएं, समूहों और भीड़ से बचें, कार्यक्रमों को रद्द करें, और अपना चेहरा न छुएं। इस लेख में, हम बताते हैं कि हम क्यों चिंतित हैं, और आपको भी क्यों चिंतित होना चाहिए। आपको जो महत्वपूर्ण जानकारी जानने की आवश्यकता है, उसके एक उत्कृष्ट सारांश के लिए, ईथन एले (एक गैर-लाभकारी संस्था के अध्यक्ष जो महामारियों से जोखिम को कम करने के लिए प्रौद्योगिकियों का विकास करते है) द्वारा लिखित [Corona in Brief](https://docs.google.com/document/u/1/d/1vumYWoiV7NlVoc27rMQvmVkVu5cOAbnaW_RKkq2RMaQ/mobilebasic) पढ़ें।", "_____no_output_____" ], [ "### विषय-सूची\n* [हमें एक सुचारू चिकित्सा प्रणाली की आवश्यकता है](#हमें-एक-सुचारू-चिकित्सा-प्रणाली-की-आवश्यकता-है)\n* [यह फ्लू जैसा नहीं है](#यह-फ्लू-जैसा-नहीं-है)\n* [“घबराएँ नहीं। शान्ति बनाये रखें।\" यह मददगार नहीं है](#घबराएँ-नहीं-शान्ति-बनाये-रखें-यह-मददगार-नहीं-है)\n* [यह केवल आपके बारे में नहीं है](#यह-केवल-आपके-बारे-में-नहीं-है)\n* [हमें वक्र को समतल करने की आवश्यकता है](#हमें-वक्र-को-समतल-करने-की-आवश्यकता-है)\n* [समाज की जवाबी कार्यवाही से ही सारा अंतर पड़ता है](#समाज-की-जवाबी-कार्यवाही-से-ही-सारा-अंतर-पड़ता-है)\n* [अमेरिका में हमारे पास पर्याप्त जानकारी नहीं है](#अमेरिका-में-हमारे-पास-पर्याप्त-जानकारी-नहीं-है)\n* [निष्कर्ष](#निष्कर्ष)", "_____no_output_____" ], [ "### हमें एक सुचारू चिकित्सा प्रणाली की आवश्यकता है\nअभी 2 वर्ष पहले हममें से एक (रेचल) को मस्तिष्क में संक्रमण हुआ था, जिसके एक चौथाई मरीज़ो की मृत्यु हो जाती है, और एक तिहाई मरीज़ो को स्थायी संज्ञानात्मक क्षति पहुँचती है। कई अन्य लोगों की दृष्टि और श्रवण-शक्ति स्थायी रूप से नष्ट हो जाती है। अस्पताल की पार्किंग में पहुँचने तक ही रेचल बेसुध सी हो जाती। रेचल भाग्यशाली रही की उसे तुरंत देखभाल, निदान और उपचार प्राप्त हुआ। इस घटना से कुछ समय पहले तक रेचल काफी स्वस्थ थी। आपातकालीन कक्ष तुरंत मिल पाने के कारण ही शायद उसकी जान बच पायी।\n\nअब ज़रा Covid-19 के बारे में बात करते हैं, और आने वाले हफ्तों और महीनों में रेचल जैसी स्थिति में लोगों के साथ क्या हो सकता है। Covid-19 से संक्रमित पाए जाने वाले लोगों की संख्या हर 3 से 6 दिनों में दोगुनी हो जाती है। तीन दिनों में दोगुनी दर, इसका अर्थ यह है कि संक्रमित पाए जाने वाले लोगों की संख्या तीन सप्ताह में 100 गुना तक बढ़ सकती है (यह वास्तव में इतना सरल नहीं है, लेकिन तकनीकी विवरणों से भटकने से बचते हैं)। [10 संक्रमित लोगों में से एक](https://www.who.int/docs/default-source/coronaviruse/who-china-joint-mission-on-covid-19-final-report.pdf) को कई हफ्तों तक अस्पताल में भर्ती रहने की आवश्यकता होती है, और इनमें से अधिकांश को ऑक्सीजन की भी आवश्यकता होती है। हालांकि इस वायरस के अभी बहुत शुरुआती दिन हैं, अभी से ही ऐसे क्षेत्र हैं जहां अस्पताल पूरी तरह से खत्म हो चुके हैं, और लोग अब उस उपचार को प्राप्त करने में सक्षम नहीं हैं जिनकी उन्हें आवश्यकता है (न केवल Covid-19 के लिए, बल्कि किसी और चीज के लिए भी, जैसे कि रेचल की जरूरत की जीवन रक्षक देखभाल)। उदाहरण के लिए, इटली में, जहां सिर्फ एक हफ्ते पहले अधिकारी कह रहे थे कि सब कुछ ठीक है, अब सोलह मिलियन लोगों को लॉक-डाउन पर रखा गया है (अपडेट: इसे पोस्ट करने के 6 घंटे बाद, इटली ने पूरे देश को लॉक-डाउन में डाल दिया), और इस तरह के तम्बू मरीजों की आमद को संभालने में मदद के लिए लगाए जा रहे हैं:", "_____no_output_____" ], [ "![](my_icons/italymedicaltent.jpg \"इटली में प्रयोग किया जा रहा एक चिकित्सा तम्बू\")", "_____no_output_____" ], [ "डॉ एंटोनियो पेसेन्ती, इटली के एक अत्यंत-प्रभावित क्षेत्र में क्षेत्रीय संकट पजवाबी कार्यवाही इकाई के प्रमुख ने [कहा](https://www.reuters.com/article/us-health-coronavirus-italy/alarmed-italy-locks-down-north-to-prevent-spread-of-coronavirus-idUSKBN20V06R) है, \"अब हम कॉरिडोर में गहन देखभाल उपचार स्थापित करने के लिए मजबूर हो रहे हैं, ऑपरेटिंग थिएटरों में, रिकवरी रूम में ... दुनिया की एक सबसे अच्छी स्वास्थ्य प्रणाली, लोम्बार्डी में पतन से एक कदम दूर है।”\n### यह फ्लू जैसा नहीं है\nफ्लू से संक्रमित लोगों की मृत्यु दर लगभग 0.1% है। हार्वर्ड में सेंटर फॉर कम्युनिकेबल डिजीज डायनेमिक्स के निदेशक मार्क लिप्सिच का [अनुमान है](https://www.washingtonpost.com/opinions/2020/03/06/why-its-so-hard-pin-down-risk-dying-coronavirus/) कि Covid-19 के लिए यह दर 1-2% है। [नवीनतम महामारी विज्ञान प्रतिरूपण](https://www.medrxiv.org/content/10.1101/2020.03.04.20031104v1.full.pdf) ने फरवरी में चीन में इस दर को 1.6% पाया, जो कि फ्लू की तुलना में सोलह गुना अधिक है (हालांकि यह काफी अनुदार संख्या हो सकती है, क्योंकि जब चिकिस्ता प्रणाली हालात का सामना नहीं कर पाती तब यह दर तेज़ी से बढ़ती है)। वर्तमान के सबसे अच्छे अनुमानों की उम्मीद है कि Covid-19 इस साल फ्लू की तुलना में इस साल 10 गुना अधिक लोगों की मृत्यु का कारण बनेगा (और एयरबीएनबी में डेटा विज्ञान के पूर्व निदेशक, [एलेना ग्रेवाल का प्रतिरूपण](https://docs.google.com/spreadsheets/d/1ktSfdDrX_uJsdM08azBflVOm4Z5ZVE75nA0lGygNgaA/edit#gid=0) दर्शाता है कि यह सबसे खराब स्थिति में 100 गुना अधिक हो सकता है)। यह चिकित्सा प्रणाली पर भारी प्रभाव को नज़रअंदांज़ करते हुए है, जैसे कि ऊपर वर्णित है। ज़ाहिर है कि कुछ लोग खुद को समझाने की कोशिश कर रहे हैं कि यह कोई नई बात नहीं है, फ्लू जैसी बीमारी है, क्योंकि वास्तविकता को स्वीकार करना अत्यंत कठिन है और ये हालात काफी असामान्य हैं।\n\nहमारे मस्तिष्क की संरचना इस तरह की नहीं है कि हम घातांकीय रूप से बढ़ने वाली इस संक्रमित लोगों की संख्या को सहज रूप से समझ सकें। इसलिए हमें वैज्ञानिकों के रूप में इसका विश्लेषण करना होगा, न कि अपने सहज-ज्ञान का उपयोग करना होगा।", "_____no_output_____" ], [ "![](my_icons/coronachart.png \"यह दो सप्ताह में कहाँ होगा? दो महीनों में?\")", "_____no_output_____" ], [ "प्रत्येक व्यक्ति जिसे फ्लू है, औसतन, वह 1.3 अन्य लोगों को संक्रमित करता है। जिसे फ्लू के लिए \"R0\" कहा जाता है। यदि R0 1.0 से कम है, तो संक्रमण फैलना बंद हो जाता है और मर जाता है। यदि यह 1.0 से अधिक है, तो यह फैलता है। वर्तमान में चीन के बाहर Covid-19 के लिए R0 2-3 है। अंतर सुनने में छोटा लग सकता है, लेकिन संक्रमित लोगों की 20 \"पीढ़ियां\" जब संक्रमण आगे बढ़ाती हैं, 1.3 के R0 का परिणाम 146 संक्रमण होगा, लेकिन 2.5 के R0 का परिणाम होगा 3.6 करोड़! (यह निश्चित रूप से अधूरा है और कई वास्तविक दुनिया के प्रभावों को नजरअंदाज करता है, लेकिन यह Covid-19 और फ़्लू के बीच सापेक्ष अंतर का एक उचित चित्रण है, अन्य सभी तथ्यों को समान मानते हुए)।\n\nध्यान दें कि R0 किसी बीमारी का एक मौलिक लक्षण नहीं है। यह जवाबी कार्यवाही पर बहुत निर्भर करता है, और यह समय अनुसार बदल सकता है। अतिविशेष रूप से, चीन में Covid-19 के लिए R0 में काफी कमी आई है, और अब यह 1.0 आ रहा है! आप पूछेंगे, कैसे? ऐसे बड़े पैमानों पर कदम उठाना जिनकी अमेरिका जैसे देश में कल्पना करना भी मुश्किल है- उदाहरण के लिए, कई विशाल शहरों को पूरी तरह से बंद कर देना, और एक परीक्षण प्रक्रिया विकसित करना जो एक सप्ताह में दस लाख से अधिक लोगों का परीक्षण मुमकिन बनाता है।\n\nएक चीज जो सोशल मीडिया पर बहुत अधिक उभरती है (एलोन मस्क जैसे अत्यधिक फ़ॉलो किए गए खातों से भी) कि लोगों में लॉजिस्टिक और घातीय वृद्धि के बीच अंतर की गलतफहमी है। \"लॉजिस्टिक\" वृद्धि महामारी के \"s-आकार\" विकास के स्वरुप को संदर्भित करता है। यकीनन घातीय वृद्धि हमेशा के लिए नहीं हो सकती, क्योंकि अन्यथा दुनिया में लोगों की संख्या से अधिक संख्या संक्रमित लोगों की हो जाएगी! इसलिए, अंत में, संक्रमण दर हमेशा कम होनी चाहिए, जिसके परिणामस्वरूप समय के साथ एक s-आकार (जो सिग्मॉइड के रूप में जाना जाता है) की वृद्धि दर बनती है। हालाँकि, घटती वृद्धि दर केवल एक कारण से होती है-यह कोई जादू नहीं है। मुख्य कारण हैं:\n\n* बड़े पैमाने पर और प्रभावी सामुदायिक जवाबी कार्यवाही, या\n* इतने बड़े प्रतिशत में लोग संक्रमित हैं कि फैलने के लिए कम असंक्रमित लोग बचे हैं।\n\nइसलिए, महामारी के \"नियंत्रण\" के लिए लॉजिस्टिक वृद्धि के स्वरुप पर निर्भर होने में कोई तार्किक समझदारी नहीं है।\n\nएक और बात जो आपके स्थानीय समुदाय में Covid-19 के प्रभाव को सहज रूप से समझना कठिन बना देती है, वह यह है कि संक्रमण और अस्पताल में भर्ती होने के बीच बहुत महत्वपूर्ण देरी होती है - आम तौर पर लगभग 11 दिन। यह शायद एक लम्बा समय न प्रतीत हो, लेकिन जब आप इसकी तुलना तब तक संक्रमित लोगों की संख्या से करते हैं, तो इसका मतलब है कि जब तक आप ध्यान देंगे कि अस्पताल में बिस्तर भर चुके हैं, तब तक सामुदायिक संक्रमण इस स्तर पर पहुंच जायेगा कि निपटने के लिए 5-10 गुना अधिक लोग होंगे।\n\nध्यान दें कि कुछ शुरुआती संकेत ऐसे भी हैं कि आपके स्थानीय क्षेत्र में प्रभाव कम से कम कुछ हद तक जलवायु पर निर्भर हो सकता है। [Covid-19 के संभावित प्रसार और मौसम-तत्व के पूर्वानुमान के लिए तापमान और अक्षांश विश्लेषण](https://poseidon01.ssrn.com/delivery.php?ID=091071099092098096101097074089104068104013035023062021010031112088025099126064001093097030102106046016114116082016095089113023126034089078012119081090111118122007110026000085123071022022127025026080005029001020025126022000066075021086079031101116126112&EXT=pdf) बताता है कि यह बीमारी अब तक सुहावने जलवायु में फैल रही है (दुर्भाग्य से हमारे लिए, सैन फ्रांसिस्को में तापमान सीमा, जहां हम रहते हैं, उस सीमा के लिए सही है; यह लंदन सहित यूरोप के मुख्य जनसंख्या केंद्रों को भी शामिल करता है।)\n\n### “घबराएँ नहीं। शान्ति बनाये रखें।\" यह मददगार नहीं है\n\nएक आम प्रतिक्रिया जो हमने सोशल मीडिया पर उन लोगों के लिए देखी है जो चिंतित होने के कारणों की ओर इशारा कर रहे हैं, \"घबराओ मत\" या \"शांत रहो\"। कम से कम कहने में तो यह मददगार नहीं है। कोई यह नहीं कह रहा है कि आतंकित होना एक उपयुक्त प्रतिक्रिया है। किसी कारणवष, हालांकि, \"शांत रहें\" कुछ मंडलियों में बहुत लोकप्रिय प्रतिक्रिया है (लेकिन महामारी विज्ञानियों के बीच नहीं, जिनका काम इन चीजों पर नज़र रखना करना है)। शायद \"शांत रहें\" कुछ लोगों को अपनी निष्क्रियता के बारे में बेहतर महसूस करने में मदद करता है, या उन्हें ऐसे लोगों से बेहतर महसूस कराता है जिनकी वे कल्पना करते हैं कि वे बिना सिर की मुर्गी की तरह इधर-उधर भाग रहे हैं।\n\nलेकिन \"शांत रहें\" काफी आसानी से आपकी तैयारी और जवाबी कार्यवाही की विफलता का कारण बन सकता है। चीन में, दसियों लाख को लॉक-डाउन पर रखा गया था और दो नए अस्पतालों का निर्माण उस समय तक कर लिया गया था जब उनकी स्तिथि वैसी थी जैसी अमेरिका की अब है। इटली ने बहुत लंबा इंतजार किया, और आज (रविवार 8 मार्च को) उन्होंने 1692 लोगों को बंद करने के बावजूद 1492 नए मामले और 133 नई मौतें दर्ज कीं। इस समय सबसे अच्छी जानकारी के आधार पर हम यह पता लगा सकते हैं कि 2-3 सप्ताह पहले इटली भी उसी स्थिति में था,जो अमेरिका और ब्रिटेन में आज (संक्रमण के आंकड़ों के संदर्भ में) है।\n\nध्यान दें कि इस समय पर Covid-19 के बारे में लगभग सब कुछ हवा में है। हम वास्तव में इसकी संक्रमण की गति या मृत्यु दर नहीं जानते हैं, हम यह नहीं जानते हैं कि यह सतहों पर कितनी देर तक सक्रिय रहता है, हम नहीं जानते कि यह गर्म परिस्थितियों में जीवित रह सकता है, फैल सकता है या नहीं। हमारे पास जो कुछ भी है वो इस समय तक लोगों द्वारा जुटाई गयी जानकारियों के आधार पर हमारे सर्वोत्तम अनुमान हैं। और याद रखें, इस जानकारी का अधिकांश हिस्सा चीन में, चीनी में है। वर्तमान में, चीन के अनुभव को समझने का सबसे अच्छा तरीका है कि चीन, जर्मनी, जापान, कोरिया, नाइजीरिया, रूस, सिंगापुर, अमेरिका और विश्व स्वास्थ्य संगठन (WHO) के 25 राष्ट्रीय और अंतर्राष्ट्रीय विशेषज्ञों के संयुक्त मिशन के आधार पर, [WHO-चीन संयुक्त मिशन की उत्कृष्ट रिपोर्ट को कोरोनावायरस रोग 2019 पर](https://www.who.int/docs/default-source/coronaviruse/who-china-joint-mission-on-covid-19-final-report.pdf) पढ़ें।\n\nजब यह अनिश्चित है कि, यह वैश्विक महामारी बनेगा या नहीं, और शायद सब कुछ *बस* ठीक से गुज़र जाये बिना अस्पताल तंत्र के ढहे हुए, तब इसका अर्थ ये बिलकुल नहीं है कि कुछ न करना ही उचित जवाबी कार्यवाही है। यह जूए के समान होगा और किसी भी खतरे के प्रतिरूप के परिदृश्य में यह हमारी ईष्टतम प्रतिक्रिया नहीं होगी। यह भी काफी असंभव सा है कि इटली और चीन जैसे देश बिना किसी अच्छे कारण के अपनी अर्थव्यवस्था के बड़े हिस्से को प्रभावी ढंग से बंद कर देंगे। यह उन वास्तविक प्रभावों के साथ भी तर्कसंगत नहीं है जो हम संक्रमित क्षेत्रों में जमीन पर देख रहे हैं, जहां चिकित्सा प्रणाली सामना करने में असमर्थ है (उदाहरण के लिए, इटली \"प्री-ट्राइएज\" के लिए 462 टेंट का उपयोग कर रहा है, और अभी भी [आईसीयू रोगियों को स्थानांतरित करना है संक्रमित क्षेत्रों से](https://www.repubblica.it/cronaca/2020/03/08/news/coronavirus_situazione_italia-250665818/?ref=RHPPTP-BH-I250661466-C12-P5-S1.12-T1))।\n\nइसके बजाय, विचारशील, उचित प्रतिक्रिया उन चरणों का पालन करना है जो विशेषज्ञों द्वारा संक्रमण फैलाने से बचने के लिए अनुशंसित हैं:\n\n* बड़े समूहों और भीड़ से बचें\n* कार्यक्रमों को रद्द करें \n* यदि संभव हो तो घर से काम करें\n* घर से आते और जाते समय और बाहर आने पर बार-बार हाथ धोएं\n* अपने चेहरे को छूने से बचें, खासकर जब आपके घर के बाहर (आसान नहीं!)\n* सतहों और सामान को कीटाणु रहित करें (यह संभव है कि वायरस सतहों पर 9 दिनों तक सक्रिय रह सकता है, हालांकि यह अभी भी निश्चित रूप से ज्ञात नहीं है)।\n\n### यह केवल आपके बारे में नहीं है\n\nयदि आप 50 वर्ष से कम उम्र के हैं, और जोखिम कारक नहीं हैं, जैसे कि कमज़ोर प्रतिरक्षा प्रणाली, हृदय रोग, धूम्रपान का इतिहास, या अन्य पुरानी बीमारियां, तो आपको कुछ आराम हो सकता है कि Covid-19 की आपको मारने की संभावना बहुत कम है। लेकिन इसके प्रति आपकी प्रतिकिरिया अभी भी बहुत मायने रखती है। आपके पास अभी भी संक्रमित होने की अधिक संभावना है, और यदि होते हैं, तो बस दूसरों को संक्रमित करने की भी उतनी ही सम्भावना है। औसतन, प्रत्येक संक्रमित व्यक्ति दो से अधिक लोगों को संक्रमित कर रहा है, और वे लक्षण दिखाने से पहले संक्रामक हो जाते हैं। यदि आपके पास ऐसे माता-पिता हैं जिनकी आप परवाह करते हैं, या दादा-दादी, और उनके साथ समय बिताने की योजना बनाते हैं, और बाद में पता चलता है कि आप उन्हें Covid-19 के साथ संक्रमित करने के लिए जिम्मेदार हैं, तो आपको इस बोझ के साथ जीना पड़ेगा।\n\nयहां तक कि अगर आप 50 से अधिक की उम्र के लोगों के संपर्क में नहीं हैं, तो यह संभावना है कि आपके पास ऐसे सहकर्मी और परिचित हैं जिनकी गंभीर बीमारियों का आपको अनुमान भी नहीं है। [अनुसंधान से पता चलता है](https://www.talentinnovation.org/_private/assets/DisabilitiesInclusion_KeyFindings-CTI.pdf) कि कुछ लोग कार्यस्थल में अपने स्वास्थ्य की स्थिति का खुलासा करने से बचते हैं, यदि वो बच सकते हैं, क्योंकि उन्हें [भेदभाव का डर](https://medium.com/@racheltho/the-tech-industry-is-failing-people-with-disabilities-and-chronic-illnesses-8e8aa17937f3) होता है। हम दोनों ही उच्च जोखिम की श्रेणियों में हैं, लेकिन कई लोग जिनसे हम नियमित रूप से बातचीत करते हैं, वे शायद यह नहीं जानते होंगे।\n\nऔर हाँ, यह केवल आपके आस पास के लोगो की बात नहीं है। यह एक अत्यधिक महत्वपूर्ण नैतिक मुद्दा है। प्रत्येक व्यक्ति जो वायरस के प्रसार को नियंत्रित करने में योगदान करने की पूरी कोशिश करता है, वह संक्रमण की दर को धीमा करने में अपने पूरे समुदाय की मदद कर रहा है। जैसा कि ज़ीनेप तुफैकी ने [साइंटिफिक अमेरिकन में लिखा है](https://blogs.scientificamerican.com/observations/preparing-for-coronavirus-to-strike-the-u-s/): \"इस वायरस के लगभग अवश्यंभावी वैश्विक प्रसार के लिए तैयारी ... समाज के लिए सबसे अधिक परोपकारी चीजों में से एक है जो आप कर सकते हैं\"। वह लिखती हैं:\n\n> हमें तैयार होना चाहिए, इसलिए नहीं कि हम व्यक्तिगत रूप से जोखिम महसूस कर सकते हैं, बल्कि इसलिए ताकि हम सभी के लिए जोखिम कम करने में मदद कर सकें। हमें तैयार नहीं होना चाहिए क्योंकि हम अपने नियंत्रण से बाहर एक कयामत के दिन का सामना कर रहे हैं, लेकिन क्योंकि हम इस जोखिम के हर पहलू को बदल सकते हैं जो हम एक समाज के रूप में सामना करते हैं। यह सही है, आपको तैयार रहना चाहिए क्योंकि आपके पड़ोसियों को आपको तैयार करने की आवश्यकता है - विशेष रूप से आपके बुजुर्ग पड़ोसी, आपके पड़ोसी जो अस्पतालों में काम करते हैं, आपके पड़ोसी पुरानी बीमारियों के साथ, और आपके पड़ोसी जिनके पास साधन या समय नहीं हैं तैयारी का जिसका कारण संसाधनों की कमी हो या समय की।\n\nइसने हमें व्यक्तिगत रूप से प्रभावित किया है। हमारा सबसे बड़ा और सबसे महत्वपूर्ण कोर्स फ़ास्ट.एआई, जो हमारे लिए वर्षों के काम की परिणति का प्रतिनिधित्व करता है, एक सप्ताह में सैन फ्रांसिस्को विश्वविद्यालय में शुरू होने वाला था। पिछले बुधवार (4 मार्च) को, हमने पूरी बात ऑनलाइन स्थानांतरित करने का निर्णय लिया। हम ऑनलाइन स्थानांतरित करने वाले पहले बड़े पाठ्यक्रमों में से एक थे। हमने ऐसा क्यों किया? क्योंकि हमें पिछले हफ्ते की शुरुआत में एहसास हुआ कि अगर हम इस कोर्स को चलाते हैं, तो हम अनुमानित रूप से सैकड़ों लोगों को एक संलग्न स्थान पर एक साथ आने के लिए प्रोत्साहित कर रहे थे, कई बार एक बहु-सप्ताह की अवधि में। संलग्न स्थानों में समूहों को एक साथ लाना सबसे खराब काम है जो इस समय किया जा सकता है। हमने यह सुनिश्चित करने के लिए नैतिक रूप से बाध्य महसूस किया कि कम से कम इस मामले में, यह नहीं हुआ। यह दिल तोड़ने वाला फैसला था। हमारे छात्रों के साथ सीधे काम करने में हमारा समय हर साल महान सुखों और सबसे अधिक उत्पादक अवधियों में से एक रहा है। और हमारे पास छात्रों को दुनिया भर से उड़ान भरने की योजना थी, जिन्हें हम वास्तव में निराश नहीं करना चाहते थे।\n\nलेकिन हमें पता था कि यह करना सही है, क्योंकि अन्यथा हम अपने समुदाय में बीमारी के प्रसार को बढ़ा सकते हैं।\n\n### हमें वक्र को समतल करने की आवश्यकता है\n\nयह अत्यंत महत्वपूर्ण है, क्योंकि अगर हम किसी समुदाय में संक्रमण की दर को धीमा कर सकते हैं, तो हम उस समुदाय के अस्पतालों को दोनों, संक्रमित रोगियों से निपटने में, और नियमित रोगी भार में साथ देते हैं जिसे उन्हें संभालने की आवश्यकता है। इसे \"वक्र को समतल करना\" के रूप में वर्णित किया गया है, और इस चित्रमय मानचित्र में स्पष्ट रूप से दिखाया गया है:", "_____no_output_____" ], [ "![](my_icons/chart.jpeg \"बिंदु-रेखा के नीचे रहना ही सब-कुछ है\")", "_____no_output_____" ], [ "हेल्थ आईटी के पूर्व नेशनल कोऑर्डिनेटर फ़रज़ाद मोस्तश्री ने स्पष्ट किया: “हर दिन नए मामलों की पहचान की जा रही है, जिनका यात्रा इतिहास या किसी ज्ञात मामले से कोई संबंध नहीं है, और हम जानते हैं कि ये सिर्फ बड़ी समस्या की एक छोटी सी झलक है जिसका कारण है परिक्षण में हो रही देरी। इसका मतलब है कि अगले दो हफ्तों में निदान किए गए मामलों की संख्या में विस्फोट होगा ... जब समुदाय में घातांकीय रूप से फैलाव हो रहा हो तब उसकी रोकथाम की कोशिश करना इस प्रकार है जैसे जलते हुए घर में चिंगारियों पर ध्यान लगाना। जब ऐसा होता है, तो हमें शमन करने के लिए रणनीतियों को बदलने की आवश्यकता होती है - स्वास्थ्य देखभाल पर चरम प्रभाव को कम करना और कम करने के लिए सुरक्षात्मक उपाय करना।\" यदि हम बीमारी के फैलाव को कम रख सकते हैं कि हमारे अस्पताल भार को संभाल सकें, तो लोग उपचार तक पहुंच सकते हैं। लेकिन अगर मामले बहुत जल्दी आते हैं, तो ज़रूरतमंदो को अस्पताल में भर्ती होने नहीं मिलेगा।\n\n[लिज़ स्पैट के अनुसार](https://twitter.com/LizSpecht/status/1236095186737852416) यहां गणित कुछ ऐसा दिख सकता है:\n\n>अमेरिका में प्रति 1000 लोगों पर लगभग 2.8 अस्पताल बेड हैं। 330M की आबादी के साथ, यह ~ 1M बेड है। किसी भी समय, उन बिस्तरों में से 65% पर पहले से ही कब्जा होता है। यह देश भर में उपलब्ध 330k बिस्तरों को छोड़ता है (शायद नियमित फ्लू के मौसम के कारण ये आँकड़ा इस समय थोड़ा कम होगा)। आइए इटली की संख्या पर निर्भर हो कर मान लें कि लगभग 10% मामले अस्पताल में भर्ती होने के लिए गंभीर हैं। (ध्यान रखें कि कई रोगियों को अस्पताल में हफ्तों तक रहना पड़ता है - दूसरे शब्दों में, यह प्रक्रिया बहुत धीमी होगी क्योंकि बिस्तर COVID19 रोगियों से भर जायेंगे)। इस अनुमान के अनुसार, 8 मई तक, अमेरिका में सभी खुले अस्पताल बेड भर जाएंगे। (ये यह बिलकुल नहीं बताता है कि यह बिस्तर अत्यधिक संक्रामक रोगो के अलगाव के लिए उपयुक्त होंगे या नहीं।) अगर हम गंभीर मामलों के अंश के बारे में दो घटक से ग़लत हैं, तो यह अस्पताल में बिस्तरों के ख़त्म होने की समयरेखा को मात्र 6 दिन से बदलता है जो किसी भी दिशा में हो सकता है। यदि 20% मामलों में अस्पताल में भर्ती होने की आवश्यकता होती है, तो बिस्तरों की ख़त्म होने की तारीख होगी ~ 2 मई, यदि केवल 5% मामलों में ही इसकी आवश्यकता होती है, तो हम इसे ~ 14 मई तक चला पाएंगे। 2.5% हमें 20 मई तक ले जायेगा। यह गणना बिलकुल ये मान के की गयी है कि गैर-Covid19 मामलो से बिस्तरों की मांग में कोई उतार चढ़ाव नहीं होगा, जो कि काफी संशयात्मक प्रतीत होता है। जैसे-जैसे स्वास्थ्य सेवा प्रणाली तेजी से बोझिल होती जा रही है, Rx की कमी इत्यादि बढ़ती जा रही है, गंभीर रोगों से जूझते लोगों की चिकित्सा जो सामान्य रूप से अच्छी तरह से प्रबंधित होती हैं, वह चिकित्सा संकट की स्तिथि में बदल सकती है जिसमें ऐसे लोगों को गहन देखभाल और अस्पताल भर्ती की ज़रूरत होगी।\n\n### समाज की जवाबी कार्यवाही से ही सारा अंतर पड़ता है \n\nजैसा कि हमने चर्चा की है, यह गणित निश्चित नहीं है - चीन पहले ही दिखा चुका है कि ठोस कदम उठाकर प्रसार को कम करना संभव है। एक सफल जवाबी कार्यवाही का एक और शानदार उदाहरण वियतनाम है, जहां अन्य बातों के अलावा, एक राष्ट्रव्यापी विज्ञापन अभियान (एक आकर्षक गीत सहित) ने तेजी से समाज को उचित जवाबी कार्यवाही के लिए एकजुट किया और यह सुनिश्चित किया कि लोग अपने व्यवहार को उचित रूप से समायोजित करें।\n\nयह केवल एक काल्पनिक स्थिति नहीं है - यह 1918 फ्लू महामारी में स्पष्ट रूप से प्रदर्शित किया गया था। अमेरिका में दो शहरों ने महामारी के लिए बहुत अलग जवाबी कार्यवाही प्रदर्शित कीं: फिलाडेल्फिया में 200,000 लोगों की विशाल परेड का आयोजन किया गया ताकि युद्ध के लिए पैसे जुटा सकें। लेकिन सेंट लुइस ने बड़े संपर्कों को रद्द करने के साथ-साथ वायरस के प्रसार को कम करने के लिए सामाजिक संपर्कों को कम करने के लिए सावधानीपूर्वक तैयार की। यहाँ प्रत्येक शहर में मौतों की संख्या कितनी है, यह [नेशनल एकेडमी ऑफ साइंसेज की कार्यवाही](https://www.pnas.org/content/104/18/7582) में दिखाया गया है:", "_____no_output_____" ], [ "![](my_icons/fluchart.jpeg \"1918 फ्लू महामारी में प्रतिकिर्याओं में अंतर का असर\")", "_____no_output_____" ], [ "फिलाडेल्फिया में स्थिति बेहद गंभीर हो गई, यहां तक कि एक समय पर ऐसे स्थान भी थे जहां फ्लू से मृतकों की भारी संख्या को संभालने के लिए [पर्याप्त ताबूत या मुर्दाघर](https://www.history.com/news/spanish-flu-pandemic-dead) नहीं थे।\n\nरिचर्ड बेस्सर, जो कि रोग नियंत्रण और रोकथाम केंद्रों के कार्यवाहक निदेशक थे 2019 H1N1 महामारी के समय, ने [कहा है](https://www.washingtonpost.com/opinions/as-coronavirus-spreads-the-bill-for-our-public-health-failures-is-due/2020/03/05/9da09ed6-5f10-11ea-b29b-9db42f7803a7_story.html?utm_campaign=wp_week_in_ideas&utm_medium=email&utm_source=newsletter&wpisrc=nl_ideas) कि अमेरिका में \"रोग के जोखिम से बचने की और अपने परिवार को बचाने की क्षमता निर्भर करती है, आय पर, स्वस्थ्य देखभाल तक पहुँच पर, और आव्रजन स्तिथि पर, और भी अन्य कारणों के साथ\", वे बताते हैं:\n\n> बुजुर्गों और विकलांगों को विशेष जोखिम होता है जब उनके दैनिक जीवन और समर्थन प्रणाली बाधित होती है। ग्रामीण और मूल निवासी समुदायों सहित स्वास्थ्य देखभाल की आसान पहुंच के बिना, जरूरत के समय कठिन दूरी का सामना करना पड़ सकता है। पास के क्वार्टर में रहने वाले लोग - चाहे सार्वजनिक आवास, नर्सिंग होम, जेल, आश्रय या यहां तक कि सड़कों पर बेघर - लहरों में पीड़ित हो सकते हैं, जैसा कि हम पहले ही वाशिंगटन राज्य में देख चुके हैं। और कम वेतन वाले गिग इकॉनमी की कमजोरियां, गैर-वेतनभोगी श्रमिकों और अनिश्चित काम की सारणी, इस संकट के दौरान सभी के सामने उजागर हो जाएगी। 60 प्रतिशत अमेरिकी श्रम शक्ति से पूछें जो प्रति घंटा वेतन उन्हें मिलता है उससे ज़रूरत के समय छुट्टी लेना कितना आसान है।\n\nयूएस ब्यूरो ऑफ लेबर स्टैटिस्टिक्स से पता चलता है कि सबसे कम आय वाले बैंड में [एक तिहाई से भी कम](https://www.bls.gov/opub/ted/2018/higher-wage-workers-more-likely-than-lower-wage-workers-to-have-paid-leave-benefits-in-2018.htm) लोगों को भुगतान के साथ बीमारी के लिए अवकाश मिलता है:", "_____no_output_____" ], [ "![](my_icons/uspaidleave.png \"ज़्यादातर गरीब अमेरिकियों को भुगतान सहित रोग-अवकाश नहीं मिलता, तो उन्हें काम पर जाना पड़ता है\")", "_____no_output_____" ], [ "### अमेरिका में हमारे पास पर्याप्त जानकारी नहीं है\n\nयूएस में एक बड़ा मुद्दा यह है कि बहुत कम परीक्षण किया जा रहा है, और परीक्षण के परिणाम ठीक से साझा नहीं किए जा रहे हैं, जिसका अर्थ है कि हम नहीं जानते कि वास्तव में क्या हो रहा है। पिछले FDA कमिश्नर स्कॉट गोटलिब ने बताया कि सिएटल में बेहतर परीक्षण किया गया है, और हम वहाँ संक्रमण देख रहे हैं: “स्वतंत्र वैज्ञानिको के प्रहरी निगरानी कार्य के कारण हम सीएटल में के प्रकोप के बारे में जल्दी जान पाए हैं। अन्य शहरों में ऐसी निगरानी पूरी तरह से कभी नहीं हुई। अन्य अमेरिकी अतिप्रभावित क्षेत्रों का अभी तक पूरी तरह से पता नहीं लगाया जा सकता है। [द अटलांटिक](https://www.theatlantic.com/health/archive/2020/03/how-many-americans-have-been-tested-coronavirus/607597/) के अनुसार, उपराष्ट्रपति माइक पेंस ने वादा किया कि इस सप्ताह \"लगभग 1.5 मिलियन परीक्षण\" उपलब्ध होंगे, लेकिन इस समय तक पूरे अमेरिका में 2,000 से कम लोगों का परीक्षण किया गया है। [COVID ट्रैकिंग प्रोजेक्ट](https://docs.google.com/spreadsheets/u/1/d/e/2PACX-1vRwAqp96T9sYYq2-i7Tj0pvTf6XVHjDSMIKBdZHXiCGGdNC0ypEU9NbngS8mxea55JuCFuua1MUeOj5/pubhtml) पर आधारित, द अटलांटिक के रॉबिन्सन मेयर और एलेक्सिस मेड्रिगल, ने कहा:\n\n> हमने जो आंकड़े इकट्ठे किए हैं, उनका सुझाव है कि Covid-19 और इससे होने वाली बीमारी, COVID-19, के प्रति अमेरिकी जवाबी कार्यवाही विशेष रूप से अन्य विकसित देशों के साथ तुलनात्मक रूप से सुस्त रही है। सीडीसी ने आठ दिन पहले पुष्टि की थी कि वायरस संयुक्त राज्य में सामुदायिक संचरण में था - यह उन अमेरिकियों को संक्रमित कर रहा था, जो न तो विदेश यात्रा पर थे और न ही उन लोगों के संपर्क में थे। दक्षिण कोरिया में, सामुदायिक प्रसारण के पहले मामले के एक सप्ताह के भीतर 66,650 से अधिक लोगों का परीक्षण किया गया था, और वह जल्दी से एक दिन में 10,000 लोगों का परीक्षण करने में सक्षम हो गया।\n\nसमस्या का हिस्सा यह है कि यह एक राजनीतिक मुद्दा बन गया है। विशेष रूप से, राष्ट्रपति डोनाल्ड ट्रम्प ने यह स्पष्ट कर दिया है कि वह \"संख्या\" (जो कि अमेरिका में संक्रमित लोगों की संख्या है) को कम रखना चाहते हैं। यह एक उदाहरण है, जहां अनुकूलन मेट्रिक्स असल में अच्छे परिणाम प्राप्त करने में हस्तक्षेप करता है। (इस मुद्दे पर अधिक जानकारी के लिए, डेटा विज्ञान की नैतिकता पेपर देखें [मैट्रिक्स की समस्या](https://arxiv.org/abs/2002.08512) एआई के लिए एक मौलिक समस्या है)। Google के AI के प्रमुख [जेफ डीन ने ट्वीट कर](https://twitter.com/JeffDean/status/1236489084870119427) राजनीतिक विघटन की समस्याओं के बारे में अपनी चिंता व्यक्त की:\n\n> जब मैंने WHO में काम किया, तो मैं AIDS (अब UNAIDS) पर वैश्विक कार्यक्रम का हिस्सा था, जो दुनिया को HIV/AIDS महामारी से निपटने में मदद करने के लिए बनाया गया था। वहां के कर्मचारी समर्पित डॉक्टर थे और वैज्ञानिकों ने उस संकट के समाधान में मदद करने के लिए तीव्रता से ध्यान केंद्रित किया। संकट के समय में, स्पष्ट और सटीक जानकारी सभी को उचित और सूचित निर्णय लेने में मदद करने के लिए महत्वपूर्ण है (देश, राज्य और स्थानीय सरकारें, कंपनियां, गैर सरकारी संगठन, स्कूल, परिवार और व्यक्ति)। सर्वोत्तम चिकित्सा और वैज्ञानिक विशेषज्ञों को सुनने के लिए सही जानकारी और नीतियों के साथ, हम सभी HIV/AIDS या Covid-19 द्वारा प्रस्तुत की गई चुनौतियों से पार पाएंगे। राजनीतिक हितों से प्रेरित दुष्प्रचार का वास्तविक जोखिम यह है की हम जल्दी काय्रवाही न कर के हालातों को बद से बत्तर बनाएंगे और यह सुनिश्चि करेंगे के महामारी से झूझते हुए हम ऐसे व्यहवार को बढ़ावा दे बैठे जिससे कि रोग और फैले। ऐसी स्तिथि को बनते हुए देखना बेहद दर्दनाक है।\n\nऐसा लगता तो नहीं है के पारदर्शिता के सन्दर्भ में हालत को बदलने की राजनीतिक इच्छाशक्ति यहां है। [वायर्ड के अनुसार](https://www.wired.com/story/trumps-coronavirus-press-event-was-even-worse-than-it-looked/), स्वास्थ्य और मानव सेवा सचिव एलेक्स अजार ने कहा, \"उन परीक्षणों के बारे में बात करना शुरू कर दिया जो स्वास्थ्य देखभाल कार्यकर्ता यह निर्धारित करने के लिए उपयोग करते हैं कि कोई नए कोरोनोवायरस से संक्रमित है या नहीं। उन किटों की कमी का मतलब अमेरिका में बीमारी के प्रसार और गंभीरता के बारे में महामारी विज्ञान संबंधी जानकारी का एक खतरनाक अभाव है, जो सरकार की ओर से अस्पष्टता से बढ़ा है। अज़ार ने यह कहने की कोशिश की कि अधिक परीक्षण किये जायेंगे पर गुणवत्ता नियंत्रण लंबित है।” लेकिन, उन्होंने जारी रखा:\n> तब ट्रम्प ने अजार की बात काटते हुए कहा \"लेकिन मुझे लगता है, महत्वपूर्ण बात यह है, किसी को भी, अभी और कल, यदि परीक्षण की ज़रूरत पड़ी है तो उसका परीक्षण हुआ है। वह हैं, उनके परीक्षण हुए हैं, और परीक्षण बहुत अच्छे हैं। ट्रम्प ने कहा कि जिसे परीक्षण की ज़रूरत है, उसे परीक्षण मिलेगा। यह असत्य है। उपराष्ट्रपति पेंस ने गुरुवार को संवाददाताओं से कहा कि अमेरिका के पास मांग को पूरा करने के लिए पर्याप्त परीक्षण किट नहीं हैं।\n\nअन्य देश अमेरिका की तुलना में बहुत अधिक तेजी से और महत्वपूर्ण रूप से जवाबी कार्यवाही कर रहे हैं। दक्षिण-पूर्वी एशिया के कई देश ताइवान सहित उत्तम परिणाम दिखा रहे हैं, जहां R0 अभी 0.3 से नीचे है, और सिंगापुर, जिसे द मॉडल फॉर Covid-19 रिस्पांस के रूप में प्रस्तावित किया जा रहा है। यह सिर्फ एशिया में ही नहीं है; उदाहरण के लिए, फ्रांस में, 1000 लोगों से अधिक का इकट्ठा होना मना है, और स्कूल अब तीन जिलों में बंद हैं।\n\n### निष्कर्ष\n\nCovid-19 एक महत्वपूर्ण सामाजिक मुद्दा है, और हमें बीमारी के प्रसार को कम करने के लिए काम करना चाहिए। इसका मतलब है की:\n\n* बड़े समूहों और भीड़ से बचना\n* कार्यक्रमों को रद्द करना\n* घर से काम करना, यदि संभव हो तो\n* घर से आते और जाते समय और बाहर आने पर बार-बार हाथ धोएं\n* अपने चेहरे को छूने से बचें, खासकर जब आपके घर के बाहर \n\nध्यान दें: इसे छापने की जल्दबाज़ी में, इसमें उपयोग की गयी जानकारियों का हवाला और श्रेय देने में हमने उतनी सावधानी नहीं बरती है जितना हम आम तौर पर बरतते हैं । अगर हमसे कुछ भूल हुई हो तो कृपया हमे बतायें।\n\nप्रतिपुष्टि और टिप्पणियों के लिए सिल्वेन गुग्गर और एलेक्सिस गैलाघर को धन्यवाद।\n\n*यह एक महत्वपूर्ण अंग्रेजी लेख का अनुवाद है, असली लेख अंग्रेजी में पढ़ने के लिए [यहां क्लिक करें](https://www.fast.ai/2020/03/09/coronavirus/)।* ", "_____no_output_____" ], [ "<a href=\"https://www.buymeacoffee.com/alephthoughts\" target=\"_blank\"><img src=\"https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png\" alt=\"Buy Me A Coffee\" style=\"height: 60px !important;width: 217px !important;\" ></a>", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0a927ef18170a2b2e85b49e9e59a2bb06773bb2
43,205
ipynb
Jupyter Notebook
Visuals.ipynb
JustinBonus/Borja-Amies
5560879df9bc0cc8da2f6273b161c30f82702e1e
[ "MIT" ]
null
null
null
Visuals.ipynb
JustinBonus/Borja-Amies
5560879df9bc0cc8da2f6273b161c30f82702e1e
[ "MIT" ]
null
null
null
Visuals.ipynb
JustinBonus/Borja-Amies
5560879df9bc0cc8da2f6273b161c30f82702e1e
[ "MIT" ]
null
null
null
49.320776
239
0.477722
[ [ [ "# Visuals\n\nThis notebook contains visual output functions for Constitutive Models, more specifically for a bounding surface model by Borjas & Amies, 1994. The functions are developed and maintained by Justin Bonus (University of Washington).\n\nUse ``%run YOURPATH/'Bounding Surface'/Visuals.ipynb`` at the start of your notebook to import these functions.", "_____no_output_____" ] ], [ [ "def visBorjas3D(R_B, R_F, Stress0):\n #========================================================================\n # Visualizing the Borjas constitutive model in 3D\n #========================================================================\n #Base code for generic cylinder plotting:\n #Created on Sun Oct 2 18:33:10 2016\n #Modified from https://stackoverflow.com/questions/38076682/how-to-add-\n #colors-to-each-individual-face-of-a-cylinder-using-matplotlib\n #Author: astrokeat\n #\n #Edited to produce multiple Von Mises cylinders, axis, kappa contours,\n #and interactivity\n #Jul 15 2019\n #Author: Justin Bonus\n #========================================================================\n import numpy as np\n from collections import namedtuple\n from scipy.linalg import norm\n\n # Inputs\n Su = 0.061\n RR = R_B\n #Stress0 = np.array([0,0,0,.1,.07,.03]) #Last unloading point AKA \\alpha\n proj_Stress0 = hydroProjector(Stress0,2) #Projected onto plane normal to the hydrostatic axis, centered on origin\n norm_proj_Stress0 = float(normS(proj_Stress0))\n \n # Broken, currently scaling down dev_Stress0 outside of function\n if norm_proj_Stress0 >= float(R_B):\n reduction = float(0.99 * (float(R_B) / norm_projStress0))\n adj_proj_Stress0 = reduction * proj_Stress0\n adj_norm_proj_Stress0 = float(normS(adj_proj_Stress0))\n else:\n adj_norm_proj_Stress0 = norm_proj_Stress0\n adj_proj_Stress0 = proj_Stress0\n \n \n #R_B = Su*(8/3)**0.5 #Radius of bounding surface, (8/3)^{1/2}Su\n p0 = np.array([-R_B, -R_B, -R_B]) #Point at one end, [\\sigma_1, \\sigma_2, \\sigma_3]\n p1 = np.array([R_B, R_B, R_B]) #Point at other end, [\\sigma_1, \\sigma_2, \\sigma_3]\n\n #R_F = (2/5)*R_B #Radius of yield surface AKA \\zeta', MPa\n p0_F = np.array([p0[0] + proj_Stress0[2], p0[1] + proj_Stress0[1], p0[2] + proj_Stress0[0]])\n p1_F = np.array([p1[0] + proj_Stress0[2], p1[1] + proj_Stress0[1], p1[2] + proj_Stress0[0]])\n\n\n \n def kappaCon(kappa, proj_Stress0): \n norm_center = (kappa * norm_proj_Stress0)/(kappa + 1)\n small = 1e-10\n if norm_proj_Stress0 < small:\n rat = 0\n else:\n rat = norm_center / adj_norm_proj_Stress0\n center = rat * adj_proj_Stress0\n north = ((RR - kappa*adj_norm_proj_Stress0)/(kappa + 1))\n south = -((RR + kappa*adj_norm_proj_Stress0)/(kappa + 1))\n rad = np.abs((north - south)/2)\n return center, rad\n \n kappa_BKOne = 5\n kappa_BKTwo = 2\n kappa_BKThree = 1\n kappa_BKFour = 0.3\n center_BKOne, R_BKOne = kappaCon(kappa_BKOne, adj_proj_Stress0)\n center_BKTwo, R_BKTwo = kappaCon(kappa_BKTwo, adj_proj_Stress0)\n center_BKThree, R_BKThree = kappaCon(kappa_BKThree, adj_proj_Stress0)\n center_BKFour, R_BKFour = kappaCon(kappa_BKFour, adj_proj_Stress0)\n \n p0_BKOne = np.array([p0[0] + center_BKOne[2], p0[1] + center_BKOne[1], p0[2] + center_BKOne[0]])\n p1_BKOne = np.array([p1[0] + center_BKOne[2], p1[1] + center_BKOne[1], p1[2] + center_BKOne[0]])\n p0_BKTwo = np.array([p0[0] + center_BKTwo[2], p0[1] + center_BKTwo[1], p0[2] + center_BKTwo[0]])\n p1_BKTwo = np.array([p1[0] + center_BKTwo[2], p1[1] + center_BKTwo[1], p1[2] + center_BKTwo[0]])\n p0_BKThree = np.array([p0[0] + center_BKThree[2], p0[1] + center_BKThree[1], p0[2] + center_BKThree[0]])\n p1_BKThree = np.array([p1[0] + center_BKThree[2], p1[1] + center_BKThree[1], p1[2] + center_BKThree[0]])\n p0_BKFour = np.array([p0[0] + center_BKFour[2], p0[1] + center_BKFour[1], p0[2] + center_BKFour[0]])\n p1_BKFour = np.array([p1[0] + center_BKFour[2], p1[1] + center_BKFour[1], p1[2] + center_BKFour[0]])\n \n #Vector in direction of axis\n v = p1 - p0\n v_F = p1_F - p0_F\n \n v_BKOne = p1_BKOne - p0_BKOne\n v_BKTwo = p1_BKTwo - p0_BKTwo\n v_BKThree = p1_BKThree - p0_BKThree\n v_BKFour = p1_BKFour - p0_BKFour\n \n #Unit vector in direction of axis\n mag = norm(v)\n v = v / mag\n mag_F = norm(v_F)\n v_F = v_F / mag_F\n \n mag_BKOne = norm(v_BKOne)\n v_BKOne = v_BKOne / mag_BKOne\n mag_BKTwo = norm(v_BKTwo)\n v_BKTwo = v_BKTwo / mag_BKTwo\n mag_BKThree = norm(v_BKThree)\n v_BKThree = v_BKThree / mag_BKThree\n mag_BKFour = norm(v_BKFour)\n v_BKFour = v_BKFour / mag_BKFour\n \n #Make some of the vectors not in the same direction as v\n not_v = np.array([1, 0, 0])\n if (v == not_v).all():\n not_v = np.array([0, 1, 0])\n not_v_F = np.array([1, 0, 0])\n if (v_F == not_v_F).all():\n not_v_F = np.array([0, 1, 0]) \n \n not_v_BKOne = np.array([1,0,0])\n if (v_BKOne == not_v_BKOne).all():\n not_v_BKOne = np.array([0,1,0])\n not_v_BKTwo = np.array([1,0,0])\n if (v_BKTwo == not_v_BKTwo).all():\n not_v_BKTwo = np.array([0,1,0])\n not_v_BKThree = np.array([1,0,0])\n if (v_BKThree == not_v_BKThree).all():\n not_v_BKThree = np.array([0,1,0])\n not_v_BKFour = np.array([1,0,0])\n if (v_BKFour == not_v_BKFour).all():\n not_v_BKFour = np.array([0,1,0])\n \n #Make vector perpendicular to v, normalize n1\n n1 = np.cross(v, not_v)\n n1 /= norm(n1)\n n1_F = np.cross(v_F, not_v_F)\n n1_F /= norm(n1_F)\n\n n1_BKOne = np.cross(v_BKOne, not_v_BKOne)\n n1_BKOne /= norm(n1_BKOne)\n n1_BKTwo = np.cross(v_BKTwo, not_v_BKTwo)\n n1_BKTwo /= norm(n1_BKTwo)\n n1_BKThree = np.cross(v_BKThree, not_v_BKThree)\n n1_BKThree /= norm(n1_BKThree)\n n1_BKFour = np.cross(v_BKFour, not_v_BKFour)\n n1_BKFour /= norm(n1_BKFour)\n \n #Make unit vector perpendicular to v and n1\n n2 = np.cross(v, n1)\n n2_F = np.cross(v_F, n1_F)\n \n n2_BKOne = np.cross(v_BKOne, n1_BKOne)\n n2_BKTwo = np.cross(v_BKTwo, n1_BKTwo)\n n2_BKThree = np.cross(v_BKThree, n1_BKThree)\n n2_BKFour = np.cross(v_BKFour, n1_BKFour)\n \n #Surface ranges over t from 0 to length of axis and 0 to 2*pi\n t = np.linspace(0, mag, 2)\n theta = np.linspace(0, 2 * np.pi, 100)\n rsample = np.linspace(0, R_B, 2)\n t_F = np.linspace(0, mag_F, 2)\n theta_F = np.linspace(0, 2 * np.pi, 100)\n rsample_F = np.linspace(0, R_F, 2)\n \n t_BKOne = np.linspace(0, mag_BKOne, 2)\n theta_BKOne = np.linspace(0, 2 * np.pi, 100)\n rsample_BKOne = np.linspace(0, R_BKOne, 2)\n t_BKTwo = np.linspace(0, mag_BKTwo, 2)\n theta_BKTwo = np.linspace(0, 2 * np.pi, 100)\n rsample_BKTwo = np.linspace(0, R_BKTwo, 2)\n t_BKThree = np.linspace(0, mag_BKThree, 2)\n theta_BKThree = np.linspace(0, 2 * np.pi, 100)\n rsample_BKThree = np.linspace(0, R_BKThree, 2)\n t_BKFour = np.linspace(0, mag_BKFour, 2)\n theta_BKFour = np.linspace(0, 2 * np.pi, 100)\n rsample_BKFour = np.linspace(0, R_BKFour, 2)\n \n #Use meshgrid to make 2d arrays\n t, theta2 = np.meshgrid(t, theta)\n rsample,theta = np.meshgrid(rsample, theta)\n t_F, theta2_F = np.meshgrid(t_F, theta_F)\n rsample_F,theta_F = np.meshgrid(rsample_F, theta_F)\n\n t_BKOne, theta2_BKOne = np.meshgrid(t_BKOne, theta_BKOne)\n rsample_BKOne, theta_BKOne = np.meshgrid(rsample_BKOne, theta_BKOne)\n t_BKTwo, theta2_BKTwo = np.meshgrid(t_BKTwo, theta_BKTwo)\n rsample_BKTwo, theta_BKTwo = np.meshgrid(rsample_BKTwo, theta_BKTwo)\n t_BKThree, theta2_BKThree = np.meshgrid(t_BKThree, theta_BKThree)\n rsample_BKThree, theta_BKThree = np.meshgrid(rsample_BKThree, theta_BKThree)\n t_BKFour, theta2_BKFour = np.meshgrid(t_BKFour, theta_BKFour)\n rsample_BKFour, theta_BKFour = np.meshgrid(rsample_BKFour, theta_BKFour)\n \n #Generate coordinates for surface\n # Bounding and Yield Surfaces\n X, Y, Z = [p0[i] + v[i] * t + R_B * np.sin(theta2) * n1[i] + R_B * np.cos(theta2) * n2[i] for i in [0, 1, 2]]\n X_F, Y_F, Z_F = [p0_F[i] + v_F[i] * t_F + R_F * np.sin(theta2_F) * n1_F[i] + R_F * np.cos(theta2_F) * n2_F[i] for i in [0, 1, 2]]\n \n # Lines\n X_hydro, Y_hydro, Z_hydro = [p0[i] + v[i]*t for i in [0, 1, 2]]\n X_F0, Y_F0, Z_F0 = [p0_F[i] + v[i]*t for i in [0, 1, 2]] \n X_alpha = np.array([p1[0], p1_F[0]]); Y_alpha = np.array([p1[1], p1_F[1]]); Z_alpha = np.array([p1[2], p1_F[2]]).reshape(1,2)\n \n # \\kappa Contour Surfaces\n X_BKOne, Y_BKOne, Z_BKOne = [p0_BKOne[i] + v_BKOne[i] * t_BKOne + R_BKOne * np.sin(theta2_BKOne) * n1_BKOne[i] + R_BKOne * np.cos(theta2_BKOne) * n2_BKOne[i] for i in [0, 1, 2]]\n X_BKTwo, Y_BKTwo, Z_BKTwo = [p0_BKTwo[i] + v_BKTwo[i] * t_BKTwo + R_BKTwo * np.sin(theta2_BKTwo) * n1_BKTwo[i] + R_BKTwo * np.cos(theta2_BKTwo) * n2_BKTwo[i] for i in [0, 1, 2]]\n X_BKThree, Y_BKThree, Z_BKThree = [p0_BKThree[i] + v_BKThree[i] * t_BKThree + R_BKThree * np.sin(theta2_BKThree) * n1_BKThree[i] + R_BKThree * np.cos(theta2_BKThree) * n2_BKThree[i] for i in [0, 1, 2]]\n X_BKFour, Y_BKFour, Z_BKFour = [p0_BKFour[i] + v_BKFour[i] * t_BKFour + R_BKFour * np.sin(theta2_BKFour) * n1_BKFour[i] + R_BKFour * np.cos(theta2_BKFour) * n2_BKFour[i] for i in [0, 1, 2]]\n \n # \"Bottom\"\n #X2, Y2, Z2 = [p0[i] + rsample[i] * np.sin(theta) * n1[i] + rsample[i] * np.cos(theta) * n2[i] for i in [0, 1, 2]]\n # \"Top\"\n #X3, Y3, Z3 = [p0[i] + v[i]*mag + rsample[i] * np.sin(theta) * n1[i] + rsample[i] * np.cos(theta) * n2[i] for i in [0, 1, 2]]\n\n #Factor fixes projection issue when viewing in 3D\n Mc = np.sqrt(3/2)*R_B\n refLine1x = np.array([0, Mc]); refLine1y = np.array([0, 0]); refLine1z = np.array([0, 0]).reshape(1,2);\n refLine2x = np.array([0, 0]); refLine2y = np.array([0, Mc]); refLine2z = np.array([0, 0]).reshape(1,2);\n refLine3x = np.array([0, 0]); refLine3y = np.array([0, 0]); refLine3z = np.array([0, Mc]).reshape(1,2);\n\n \n out = namedtuple('out',['refLine1x','refLine1y','refLine1z',\n 'refLine2x','refLine2y','refLine2z',\n 'refLine3x','refLine3y','refLine3z',\n 'X_hydro','Y_hydro','Z_hydro',\n 'X','Y','Z',\n 'X_F','Y_F','Z_F',\n 'X_F0','Y_F0','Z_F0',\n 'X_alpha','Y_alpha','Z_alpha',\n 'X_BKOne','Y_BKOne','Z_BKOne',\n 'X_BKTwo','Y_BKTwo','Z_BKTwo',\n 'X_BKThree','Y_BKThree','Z_BKThree',\n 'X_BKFour','Y_BKFour','Z_BKFour'\n ])\n result = out(refLine1x,refLine1y,refLine1z,\n refLine2x,refLine2y,refLine2z,\n refLine3x,refLine3y,refLine3z,\n X_hydro,Y_hydro,Z_hydro,\n X,Y,Z,\n X_F,Y_F,Z_F,\n X_F0,Y_F0,Z_F0,\n X_alpha,Y_alpha,Z_alpha,\n X_BKOne,Y_BKOne,Z_BKOne,\n X_BKTwo,Y_BKTwo,Z_BKTwo,\n X_BKThree,Y_BKThree,Z_BKThree,\n X_BKFour,Y_BKFour,Z_BKFour)\n return result", "_____no_output_____" ], [ "def borjasInteractive():\n #===============================\n # PLOTTING INTERACTIVE 3D MODEL\n # -----\n %matplotlib notebook \n import matplotlib\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n from matplotlib.widgets import Slider, Button, RadioButtons\n\n # Basic Setup\n font = {'size': 14}\n matplotlib.rc('font', **font)\n fig_bound = plt.figure()\n fig_bound.set_figheight(6)\n fig_bound.set_figwidth(6)\n ax = fig_bound.gca(projection='3d', proj_type = 'persp')\n plt.ion()\n #----------------------------\n\n # Initial Stress Settings\n Su = 0.061\n R_B = Su*(8/3)**0.5 #Radius of bounding surface, (8/3)^{1/2}Su\n R_F = 0 #Radius of yield surface AKA \\zeta', MPa\n Stress0 = np.array([.05,.05,.05]) #Last unloading point AKA \\alpha\n #----------------------------\n\n # Slider Settings\n B0 = R_B\n axB = plt.axes([0.3, 0.1, 0.4, 0.015])\n sB = Slider(axB, '$R$', 0, 10*B0, valinit=B0)\n\n F0 = R_F\n axF = plt.axes([0.3, 0.125, 0.4, 0.015])\n sF = Slider(axF, '$r$', 0, 10*B0, valinit=F0)\n\n S1 = Stress0[0]; S2 = Stress0[1]; S3 = Stress0[2]\n\n axS1 = plt.axes([0.3, 0.15, 0.4, 0.015])\n axS2 = plt.axes([0.3, 0.175, 0.4, 0.015])\n axS3 = plt.axes([0.3, 0.2, 0.4, 0.015])\n sS1 = Slider(axS1, '$\\sigma_1$', -10*Stress0[0], 10*Stress0[0], valinit=S1)\n sS2 = Slider(axS2, '$\\sigma_2$', -10*Stress0[1], 10*Stress0[1], valinit=S2)\n sS3 = Slider(axS3, '$\\sigma_3$', -10*Stress0[2], 10*Stress0[2], valinit=S3)\n #----------------------------\n\n\n def draw(R_B,R_F,Stress0):\n # Prevent Stress0 from leaving bounding surface\n dev_Stress0 = dev(Stress0.reshape(3,1))\n norm_dev_Stress0 = normS(dev_Stress0)\n if R_B <= 0:\n R_B = 0.001\n if normS(dev_Stress0) >= R_B:\n reduction = 0.99 * (R_B / normS(dev_Stress0))\n dev_Stress0 = reduction * dev_Stress0\n Stress0[0] = dev_Stress0[0]\n Stress0[1] = dev_Stress0[1]\n Stress0[2] = dev_Stress0[2]\n\n # Retrieve cylinders and axis for stress state\n vis = visBorjas3D(R_B, R_F, Stress0)\n\n # Plot reference lines\n ax.plot_wireframe(vis.refLine1x, vis.refLine1y, vis.refLine1z, color = 'black', label='$\\sigma$ Axis') \n ax.plot_wireframe(vis.refLine2x, vis.refLine2y, vis.refLine2z, color = 'black') \n ax.plot_wireframe(vis.refLine3x, vis.refLine3y, vis.refLine3z, color = 'black')\n\n # Plot axis and lines\n ax.plot_wireframe(vis.X_hydro, vis.Y_hydro, vis.Z_hydro, color='red', label = 'Hydrostatic Axis') #Plots hydrostatic axis\n ax.plot_wireframe(vis.X_F0, vis.Y_F0, vis.Z_F0, color='orange', label = 'Unloading Axis') #Plots axes that goes through last unloading point, F0\n ax.plot_wireframe(vis.X_alpha, vis.Y_alpha, vis.Z_alpha, color='grey', label = 'alpha')\n\n # Plot cylinders\n ax.plot_surface(vis.X_F, vis.Y_F, vis.Z_F, color='blue') #Plots yield surface, F\n ax.plot_surface(vis.X_BKOne, vis.Y_BKOne, vis.Z_BKOne, color='yellow')\n ax.plot_surface(vis.X_BKTwo, vis.Y_BKTwo, vis.Z_BKTwo, color='orange')\n ax.plot_surface(vis.X_BKThree, vis.Y_BKThree, vis.Z_BKThree, color='red')\n ax.plot_surface(vis.X_BKFour, vis.Y_BKFour, vis.Z_BKFour, color='purple')\n ax.plot_wireframe(vis.X, vis.Y, vis.Z, color='black', label = 'Bounding Surface') #Plots bounding surface, B\n #ax.plot_surface(X2, Y2, Z2, color='blue')\n #ax.plot_surface(X3, Y3, Z3, color='blue')\n\n #plt.xlabel('$\\sigma_1$')\n #plt.ylabel('$\\sigma_2$')\n #plt.zlabel('\\sigma_3')\n #plt.legend(bbox_to_anchor=(.75,1), loc=\"lower right\") # Legend outside plot\n #plt.legend()\n #plt.tight_layout()\n\n #Make the panes transparent\n ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n\n #Make the grid lines transparent\n ax.set_axis_off()\n ax.xaxis._axinfo[\"grid\"]['color'] = (1,1,1,0)\n ax.yaxis._axinfo[\"grid\"]['color'] = (1,1,1,0)\n ax.zaxis._axinfo[\"grid\"]['color'] = (1,1,1,0)\n\n ax.set_title('Model in $\\sigma$ Space', fontdict=None, loc='center', pad=None)\n plt.tight_layout()\n plt.show()\n #----------------------------\n\n # Initialize plot \n draw(R_B,R_F,Stress0)\n\n def update(val): \n R_B = sB.val \n R_F = sF.val \n Stress0 = np.array([sS1.val, sS2.val, sS3.val])\n ax.clear()\n\n draw(R_B, R_F, Stress0)\n fig.canvas.draw_idle() \n return R_B, R_F, Stress0\n\n def viewDev(self):\n #Set view to deviatoric space\n ax.clear()\n R_B = sB.val \n R_F = sF.val \n Stress0 = np.array([sS1.val, sS2.val, sS3.val])\n draw(R_B, R_F, Stress0)\n ax.view_init(azim=45., elev=38)\n plt.show()\n\n def viewX(self):\n #Set view to deviatoric space\n ax.clear()\n R_B = sB.val \n R_F = sF.val \n Stress0 = np.array([sS1.val, sS2.val, sS3.val])\n draw(R_B, R_F, Stress0)\n ax.view_init(azim=0, elev=0)\n plt.show()\n\n\n sB.on_changed(update)\n sF.on_changed(update)\n sS1.on_changed(update)\n sS2.on_changed(update)\n sS3.on_changed(update)\n\n #axprev = plt.axes([0.7, 0.05, 0.1, 0.075])\n axDev = plt.axes([0.1, 0.1, 0.1, 0.05])\n bDev = Button(axDev, '$\\Pi$-plane')\n bDev.label.set_fontsize(10)\n bDev.on_clicked(viewDev)\n\n axX = plt.axes([0.1, 0.15, 0.1, 0.05])\n bX = Button(axX, 'X-View')\n bX.label.set_fontsize(10)\n bX.on_clicked(viewX)\n\n #bprev = Button(axprev, 'Previous')\n #bprev.on_clicked(callback.prev)\n\n plt.show()", "_____no_output_____" ], [ "def drawDeviatoricSurface_function(Stress0, CurStress, Su):\n import numpy as np\n from scipy.linalg import norm\n\n def GetSurfaceInfo(Mc, Me, princ_dev_Stress0, princ_dev_CurStress, SType):\n # Mc = max(q/p) in compresion\n # Me = max(q/p) in extension\n # SType = type of surface \n # aRatio = MMe/MMc\n # theta = Lode angle\n \n lode = np.linspace(0, 2*np.pi, 2*np.pi/0.02)\n c = Me / Mc\n aSurf = np.zeros((len(lode),3))\n for i in range(0,len(lode)):\n lodeMap = GetLodeMapping(lode[i])\n if SType == 'MC':\n q = MohrCoulomb(Mc, c, lodeMap) \n elif SType == 'MN':\n q = MatsuokaNakai(Mc, c, lodeMap)\n elif SType == 'WB':\n q = Mc * g(lodeMap, c)\n elif SType == 'VM':\n q = VonMises(Mc, c, lodeMap)\n elif SType == 'B':\n q = Borjas(Mc, c, lodeMap)\n elif SType == 'BK':\n q = BorjasKappa(Mc, c, VMSurf, kappa)\n else:\n q = 1.0\n \n if SType != 'BK':\n # For MC, MN, WB, VM\n aSurf[i,0] = 2/3 * q * np.cos(lode[i])\n aSurf[i,1] = 2/3 * q * np.cos(lode[i] - 2*np.pi/3)\n aSurf[i,2] = 2/3 * q * np.cos(lode[i] - 4*np.pi/3)\n\n if SType == 'B':\n # For B\n aSurf[i,0] = np.sqrt(3/2)*(aSurf[i,0]) + (princ_dev_Stress0[0])\n aSurf[i,1] = np.sqrt(3/2)*(aSurf[i,1]) + (princ_dev_Stress0[1]) \n aSurf[i,2] = np.sqrt(3/2)*(aSurf[i,2]) + (princ_dev_Stress0[2]) \n \n elif SType == 'BK':\n # For BK\n small = 1e-10\n norm_center = (kappa * norm_dev_Stress0)/(kappa + 1)\n if norm_dev_Stress0 < small:\n rat = 0\n else:\n rat = norm_center / norm_dev_Stress0\n center = rat * princ_dev_Stress0\n\n aSurf[i,0] = 2/3 * q * np.cos(lode[i])\n aSurf[i,1] = 2/3 * q * np.cos(lode[i] - 2*np.pi/3)\n aSurf[i,2] = 2/3 * q * np.cos(lode[i] - 4*np.pi/3)\n aSurf[i,0] = np.sqrt(3/2)*(aSurf[i,0]) + (center[0])\n aSurf[i,1] = np.sqrt(3/2)*(aSurf[i,1]) + (center[1]) \n aSurf[i,2] = np.sqrt(3/2)*(aSurf[i,2]) + (center[2])\n \n return aSurf\n #********************************************************************************************************\n def GetLodeMapping(theta):\n if theta >=0 and theta < np.pi/3:\n theRegion = 1\n elif theta >= np.pi/3 and theta < 2*np.pi/3:\n theRegion = 2\n elif theta >=2*np.pi/3 and theta < np.pi:\n theRegion = 3\n elif theta >= np.pi and theta < 4*np.pi/3:\n theRegion = 4\n elif theta >=4*np.pi/3 and theta < 5*np.pi/3:\n theRegion = 5\n elif theta >=5*np.pi/3 and theta <= 2*np.pi:\n theRegion = 6\n else:\n theRegion = 7\n \n #Pseudo-switch \n switch_region = {\n 1: theta,\n 2: (2*np.pi/3 - theta),\n 3: (theta - 2*np.pi/3),\n 4: (4*np.pi/3 - theta),\n 5: (theta - 4*np.pi/3),\n 6: (6*np.pi/3 -theta)\n } \n lodemap = switch_region.get(theRegion, 'Check GetLodeMapping function')\n return lodemap \n #********************************************************************************************************\n def g(theta, c):\n term1 = 4.0 * (1.0 - c*c) * np.cos(theta - np.pi/3.0) * np.cos(theta - np.pi/3.0) + 5.0 * c * c - 4.0 * c\n aNumer = 2.0 * (1.0 - c*c) * np.cos(theta - np.pi/3.0) + (2.0*c - 1.0) * np.sqrt(term1)\n aDeno = 4.0 * (1.0 - c*c) * np.cos(theta - np.pi/3.0) * np.cos(theta - np.pi/3.0) + (1.0 - 2.0*c) * (1.0 - 2.0*c)\n result = aNumer / aDeno\n return result\n #********************************************************************************************************\n def MohrCoulomb(MM, aRatio, theta):\n # MM = q/p slope in compresion\n # aRatio = MMe/MMc\n # theta = Lode angle\n c=0\n p=1.0\n # phi = asin(3.0*(1.0-aRatio)/(1+aRatio))*180.0/pi\n #phi = ((np.arcsin(( np.divide(np.multiply( np.sqrt(3)*MM, np.sin(0+np.pi/3) ), (np.multiply( 3*1+MM, (np.cos(0+np.pi/3) )) )) )))).max()*180/np.pi\n phi = ((np.arcsin((np.sqrt(3)*MM*np.sin(0+np.pi/3))/(3*1+MM*(np.cos(0+np.pi/3)))))).max()*180/np.pi\n # term1 = 0.5*(sqrt(3)*(1+sin(phi*pi/180.0))*sin(theta)+(3-sin(phi*pi/180.0))*cos(theta))\n # result = 3.0*(c * cos(phi*pi/180.0)+p*sin(phi*pi/180.0))/term1*sqrt(3.0/2.0)\n result = 3*(p*np.sin(phi*np.pi/180)/(np.sqrt(3)*np.sin(theta+np.pi/3)-np.cos(theta+np.pi/3)*np.sin(phi*np.pi/180)))\n return result\n #********************************************************************************************************\n def MatsuokaNakai(MM, aRatio, theta):\n # MM = q/p slope in compresion\n # aRatio = MMe/MMc\n # theta = Lode angle\n c=0\n p=1.0\n # phi = asin(3.0*(1.0-aRatio)/(1+aRatio))*180.0/pi;\n phi = ((np.arcsin((np.sqrt(3)*MM*np.sin(0+np.pi/3))/(3*1+MM*(np.cos(0+np.pi/3)))))).max()*180/np.pi\n eta=2*np.sin(phi*np.pi/180)/np.sqrt(4 - np.cos(phi*np.pi/180)**2)\n xi=np.sin(phi*np.pi/180)*(np.cos(phi*np.pi/180)**2 + 8)/np.sqrt((4 - np.cos(phi*np.pi/180)**2)**3)\n g=2*np.sqrt(3)*np.cos(np.arccos(xi*(-np.cos(3*theta)))/3)\n result = 3*np.sqrt(3)*eta*p/g\n return result\n #********************************************************************************************************\n def M_MatsuokaNakai(MM, aRatio, lode):\n # MM = q/p slope in compresion\n # aRatio = MMe/MMc\n # theta = Lode angle\n c=0\n p=1.0\n # phi = asin(3.0*(1.0-aRatio)/(1+aRatio))*180.0/pi;\n phi = max((np.arcsin((np.sqrt(3)*MM*np.sin(0+np.pi/3))/(3*1+MM*(np.cos(0+np.pi/3))))))*180/np.pi\n eta=2*np.sin(phi*np.pi/180)/np.sqrt(4-np.cos(phi*np.pi/180)**2)\n xi=np.sin(phi*np.pi/180)*(np.cos(phi*np.pi/180)**2+8)/np.sqrt((4-np.cos(phi*np.pi/180)**2)**3)\n for i in range(0,len(lode)):\n lodeMap = GetLodeMapping(lode[i])\n g=2*np.sqrt(3)*np.cos(np.arccos(xi*(-np.cos(3*lode[i])))/3)\n result = 3*np.sqrt(3)*eta/g\n return result\n #********************************************************************************************************\n def VonMises(MM, aRatio, theta):\n #Creates circle of radius MM around the origin\n R = MM\n result = R\n return result\n #********************************************************************************************************\n def Borjas(MM, aRatio, theta):\n #Determines the yield function radius and constructs a circle around point of last unloading\n result = eucli\n return result\n #********************************************************************************************************\n # Output kappa contour\n def BorjasKappa(MM, aRatio, theta, frac):\n # kappa defined outside of function\n small = 1e-10\n RR = MM\n \n norm_dev_Stress0 = float(normS(princ_dev_Stress0))\n norm_center = (kappa * norm_dev_Stress0)/(kappa + 1)\n if norm_dev_Stress0 < small:\n rat = 0\n else:\n rat = norm_center / norm_dev_Stress0\n center = rat * princ_dev_Stress0\n north = ((RR - kappa*norm_dev_Stress0)/(kappa + 1))\n south = -((RR + kappa*norm_dev_Stress0)/(kappa + 1))\n rad = np.abs((north - south)/2)\n return rad\n #********************************************************************************************************\n #Mc being used as a proxy for R\n Mc = ((8/3)**0.5)*Su\n Me = Mc * 0.65\n aRatio=Me/Mc\n theta0=0\n\n phi = np.arcsin(3.0*(1.0-aRatio)/(1+aRatio))*180.0/np.pi\n phi2 = ((np.arcsin((np.sqrt(3)*Mc*np.sin(theta0+np.pi/3))/(3*1+Mc*(np.cos(theta0+np.pi/3))))))*180/np.pi\n\n #======================================\n # Project vectors (6x1, 3x1) onto flattened deviatoric plane\n # U_{proj-plane} = U - ((dot(U,n))/||n||^2)n\n # U is arbitrary vector, n is vector normal to plane to project on\n # dev() will automatically perform this projection\n #======================================\n \n if CurStress.shape == (6,1) or CurStress.shape == (6,) or CurStress.shape == (1,6):\n Stress0 = Stress0.reshape(6,1)\n CurStress = CurStress.reshape(6,1)\n dev_Stress0 = dev(Stress0)\n dev_CurStress = dev(CurStress)\n norm_dev_Stress0 = normS(dev_Stress0)\n norm_dev_CurStress = normS(dev_CurStress)\n\n #Solve eigenvalues for normal principal stress\n p1,p2,p3 = princVal(dev_Stress0,0)\n princ_dev_Stress0 = np.array([p1,p2,p3])\n p1,p2,p3 = princVal(dev_CurStress,0)\n princ_dev_CurStress = np.array([p1,p2,p3])\n \n elif CurStress.shape == (3,1) or CurStress.shape == (3,) or CurStress.shape == (1,3):\n Stress0 = Stress0.reshape(3,1)\n CurStress = CurStress.reshape(3,1)\n dev_Stress0 = dev(Stress0).reshape(3,1)\n dev_CurStress = dev(CurStress).reshape(3,1)\n norm_dev_Stress0 = normS(dev_Stress0)\n norm_dev_CurStress = normS(dev_CurStress)\n \n princ_dev_Stress0 = dev_Stress0\n princ_dev_CurStress = dev_CurStress\n\n \n #Euclidian distance\n eucli = ((princ_dev_CurStress[2]-princ_dev_Stress0[2])**2 +\n (princ_dev_CurStress[1]-princ_dev_Stress0[1])**2 +\n (princ_dev_CurStress[0]-princ_dev_Stress0[0])**2)**0.5\n \n #Get projected constitutive shapes (MC, MN , WB, VM, B)\n #MCSurf = GetSurfaceInfo(Mc, Me, proj_dev_Stress0, proj_dev_CurStress, 'MC')\n #MNSurf = GetSurfaceInfo(Mc, Me, proj_dev_Stress0, proj_dev_CurStress, 'MN')\n #K1=MNSurf[:,0]*3/2\n #WBSurf = GetSurfaceInfo(Mc, Me, proj_dev_Stress0, proj_dev_CurStress, 'WB')\n #MCSurf5 = GetSurfaceInfo(Mc, Me, proj_Stress0, proj_CurStress, 'B')\n \n # Create bounding and yield surface\n BSurf = GetSurfaceInfo(Mc, Me, princ_dev_Stress0, princ_dev_CurStress, 'B')\n proj_R = np.sqrt(3/2)*Mc\n VMSurf = GetSurfaceInfo(proj_R, Me, princ_dev_Stress0, princ_dev_CurStress, 'VM')\n\n \n # Create contours of equal \\kappa value\n kappa = 5\n BKSurfOne = GetSurfaceInfo(Mc, Me, princ_dev_Stress0, princ_dev_CurStress, 'BK')\n kappa = 2\n BKSurfTwo = GetSurfaceInfo(Mc, Me, princ_dev_Stress0, princ_dev_CurStress, 'BK')\n kappa = 1\n BKSurfThree = GetSurfaceInfo(Mc, Me, princ_dev_Stress0, princ_dev_CurStress, 'BK')\n kappa = 0.2\n BKSurfFour = GetSurfaceInfo(Mc, Me, princ_dev_Stress0, princ_dev_CurStress, 'BK')\n \n # Lines for the hydrostatic axis and reference axis\n hydroLinex = np.array([0,proj_R]); hydroLiney = np.array([0,proj_R]); hydroLinez = np.array([0,proj_R]).reshape(1,2) \n refLine1x = np.array([0, proj_R]); refLine1y = np.array([0, 0]); refLine1z = np.array([0, 0]).reshape(1,2);\n refLine2x = np.array([0, 0]); refLine2y = np.array([0, proj_R]); refLine2z = np.array([0, 0]).reshape(1,2);\n refLine3x = np.array([0, 0]); refLine3y = np.array([0, 0]); refLine3z = np.array([0, proj_R]).reshape(1,2);\n\n # Projected lines for last unloading point (alpha) and current stress point (active), and connector (zeta)\n dev_alphaLinex = np.array([0,float(princ_dev_Stress0[2])]); \n dev_alphaLiney = np.array([0,float(princ_dev_Stress0[1])]); \n dev_alphaLinez = np.array([0,float(princ_dev_Stress0[0])]).reshape(1,2)\n dev_stressLinex = np.array([0,float(princ_dev_CurStress[2])]); \n dev_stressLiney = np.array([0,float(princ_dev_CurStress[1])]); \n dev_stressLinez = np.array([0,float(princ_dev_CurStress[0])]).reshape(1,2) \n ## AKA \\zeta' = \\sigma' - alpha \n dev_zetaLinex = np.array([float(princ_dev_Stress0[2]),float(princ_dev_CurStress[2])]); \n dev_zetaLiney = np.array([float(princ_dev_Stress0[1]),float(princ_dev_CurStress[1])]); \n dev_zetaLinez = np.array([float(princ_dev_Stress0[0]),float(princ_dev_CurStress[0])]).reshape(1,2) \n \n # Draw from stress point to projection plane\n dev_alphaAxisx = np.array([princ_dev_Stress0[2],princ_dev_Stress0[2]+Mc/4]); \n dev_alphaAxisy = np.array([princ_dev_Stress0[1],princ_dev_Stress0[1]+Mc/4]); \n dev_alphaAxisz = np.array([princ_dev_Stress0[0],princ_dev_Stress0[0]+Mc/4]).reshape(1,2) \n dev_stressAxisx = np.array([princ_dev_CurStress[2],princ_dev_CurStress[2]+Mc/4]); \n dev_stressAxisy = np.array([princ_dev_CurStress[1],princ_dev_CurStress[1]+Mc/4]); \n dev_stressAxisz = np.array([princ_dev_CurStress[0],princ_dev_CurStress[0]+Mc/4]).reshape(1,2) \n\n \n # Store all wireframes in a namedtuple, similar to class structure\n # Makes it convenient to access when calling this function\n result = namedtuple('result', ['VMSurf','BSurf', 'BKSurfOne', 'BKSurfTwo', 'BKSurfThree', 'BKSurfFour',\n 'hydroLinex', 'hydroLiney', 'hydroLinez', \n 'refLine1x', 'refLine1y', 'refLine1z',\n 'refLine2x', 'refLine2y', 'refLine2z',\n 'refLine3x', 'refLine3y', 'refLine3z',\n 'dev_alphaLinex', 'dev_alphaLiney', 'dev_alphaLinez',\n 'dev_stressAxisx', 'dev_stressAxisy', 'dev_stressAxisz',\n 'dev_alphaAxisx', 'dev_alphaAxisy', 'dev_alphaAxisz',\n 'dev_stressLinex', 'dev_stressLiney', 'dev_stressLinez',\n 'dev_zetaLinex', 'dev_zetaLiney', 'dev_zetaLinez'])\n \n result = result(VMSurf,BSurf,BKSurfOne,BKSurfTwo, BKSurfThree, BKSurfFour,\n hydroLinex, hydroLiney, hydroLinez, \n refLine1x, refLine1y, refLine1z,\n refLine2x, refLine2y, refLine2z,\n refLine3x, refLine3y, refLine3z,\n dev_alphaLinex, dev_alphaLiney, dev_alphaLinez,\n dev_stressAxisx, dev_stressAxisy, dev_stressAxisz,\n dev_alphaAxisx, dev_alphaAxisy, dev_alphaAxisz,\n dev_stressLinex, dev_stressLiney, dev_stressLinez,\n dev_zetaLinex, dev_zetaLiney, dev_zetaLinez)\n\n return result", "_____no_output_____" ], [ "def visHardening():\n #import numpy as np\n %matplotlib notebook\n %matplotlib notebook\n import matplotlib.pyplot as plt\n from matplotlib.widgets import Slider, Button, RadioButtons\n\n R = 1\n G = 7\n hh = G\n mm = 1.1\n A = 0.9\n theta = 0.5\n nPoints = 501\n x = np.linspace(-R,R,nPoints)\n\n # -----Initial Setting-----\n R0 = 1\n S0 = R0/2\n kappa = []\n for xx in x:\n if xx >= S0:\n result = float(np.abs((R0 - xx)/(xx - S0)))\n kappa.append(result)\n else:\n result = float(np.abs((R0 + xx)/(xx - S0)))\n kappa.append(result)\n #theta = []\n expH, hypH, davH = [], [], []\n for k in kappa:\n expH.append( hh * k ** mm )\n hypH.append( 3*G * (k**2)/(1 + 2*k) )\n #theta.append(1/k)\n davH.append( 3*G * ((1 + theta)/(1 + theta + 1)*(1 + 1/theta)**A) - 1 )\n\n # -------------------------\n\n fig, ax = plt.subplots()\n plt.subplots_adjust(left=0.25, bottom=0.25)\n #t = np.arange(0.0, 1.0, 0.001)\n #a0 = 5\n\n l, = plt.plot(x, kappa, lw=2, label='$\\kappa')\n e, = plt.plot(x, expH, lw=2, label='$Exponential H\\'$')\n h, = plt.plot(x, hypH, lw=2, label='$Hyperbolic H\\'$')\n d, = plt.plot(x, davH, lw=2, label='$Davidenkov H\\'$')\n\n ax.margins(x=0)\n ax.set_yscale('log')\n ax.set_xticks((-R,-R/2,0,R/2,R),('-R','-R/2','0','R/2','R'))\n #ax.set_xticklabels(('-R','-R/2','0','R/2','R'))\n axcolor = 'lightgoldenrodyellow'\n axS = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)\n axR = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor=axcolor)\n\n\n sStress0 = Slider(axS, '$\\sigma_0$', -R, R, valinit=S0)\n sR = Slider(axR, 'R', 0.01, 10.0, valinit=R0)\n\n\n def update(val):\n Stress0 = sStress0.val\n R = sR.val\n #ax.set_xticks((-R,-R/2,0,R/2,R),('-R','-R/2','0','R/2','R'))\n #ax.set_xticklabels(('-R','-R/2','0','R/2','R'))\n x = np.linspace(-R,R,nPoints)\n kappa = []\n for xx in x:\n if xx >= Stress0:\n result = float(np.abs((R - xx)/(xx - Stress0)))\n kappa.append(result)\n else:\n result = float(np.abs((R + xx)/(xx - Stress0)))\n kappa.append(result)\n #theta = []\n expH, hypH, davH = [], [], []\n for k in kappa:\n expH.append( hh * k ** mm )\n hypH.append( 3*G * (k**2)/(1 + 2*k) )\n #theta.append(1/k)\n davH.append( 3*G * ((1 + theta)/(1 + theta + 1)*(1 + 1/theta)**A) - 1 )\n\n l.set_ydata(kappa); l.set_xdata(x)\n e.set_ydata(expH); e.set_xdata(x)\n h.set_ydata(hypH); h.set_xdata(x)\n d.set_ydata(davH); d.set_xdata(x)\n ax.set_xlim((-R,R))\n fig.canvas.draw_idle()\n\n sStress0.on_changed(update)\n sR.on_changed(update)\n\n resetax = plt.axes([0.8, 0.025, 0.1, 0.04])\n button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')\n\n\n def reset(event):\n sStress0.reset()\n sR.reset()\n button.on_clicked(reset)\n\n #rax = plt.axes([0.025, 0.5, 0.15, 0.15], facecolor=axcolor)\n #radio = RadioButtons(rax, ('sand', 'clay', active=0)\n\n\n #def colorfunc(label):\n # l.set_color(label)\n # fig.canvas.draw_idle()\n #radio.on_clicked(colorfunc)\n\n plt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ] ]
d0a92cea1eb184fe9a9859c9082f28a711847662
26,569
ipynb
Jupyter Notebook
baxter/notebooks/Lab1Part3.ipynb
woudie/undergrad-labs
acee18f09190f45c6a6433ecf9de8e3eb167ffd7
[ "Apache-2.0" ]
null
null
null
baxter/notebooks/Lab1Part3.ipynb
woudie/undergrad-labs
acee18f09190f45c6a6433ecf9de8e3eb167ffd7
[ "Apache-2.0" ]
null
null
null
baxter/notebooks/Lab1Part3.ipynb
woudie/undergrad-labs
acee18f09190f45c6a6433ecf9de8e3eb167ffd7
[ "Apache-2.0" ]
null
null
null
35.14418
742
0.453197
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d0a9316c1ed49d0dd592661e03b2886d2279fbce
284,362
ipynb
Jupyter Notebook
notebooks/Hematopoiesis.ipynb
ManchesterBioinference/BranchedGP
dd2166b9093dd34940f35d1a96484bbe0edb629f
[ "Apache-2.0" ]
23
2017-07-22T19:59:28.000Z
2021-08-03T01:44:55.000Z
notebooks/Hematopoiesis.ipynb
ManchesterBioinference/BranchedGP
dd2166b9093dd34940f35d1a96484bbe0edb629f
[ "Apache-2.0" ]
36
2017-08-01T14:32:53.000Z
2022-02-10T02:20:07.000Z
notebooks/Hematopoiesis.ipynb
ManchesterBioinference/BranchedGP
dd2166b9093dd34940f35d1a96484bbe0edb629f
[ "Apache-2.0" ]
5
2017-10-16T02:40:14.000Z
2020-08-17T21:00:19.000Z
516.083485
110,092
0.933346
[ [ [ "Branching GP Regression on hematopoietic data\n--\n\n*Alexis Boukouvalas, 2017*\n\n**Note:** this notebook is automatically generated by [Jupytext](https://jupytext.readthedocs.io/en/latest/index.html), see the README for instructions on working with it.\n\ntest change\n\nBranching GP regression with Gaussian noise on the hematopoiesis data described in the paper \"BGP: Gaussian processes for identifying branching dynamics in single cell data\".\n\nThis notebook shows how to build a BGP model and plot the posterior model fit and posterior branching times.", "_____no_output_____" ] ], [ [ "import time\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nimport BranchedGP\n\nplt.style.use(\"ggplot\")\n%matplotlib inline", "_____no_output_____" ] ], [ [ "### Read the hematopoiesis data. This has been simplified to a small subset of 23 genes found to be branching.\nWe have also performed Monocle2 (version 2.1) - DDRTree on this data. The results loaded include the Monocle estimated pseudotime, branching assignment (state) and the DDRTree latent dimensions.", "_____no_output_____" ] ], [ [ "Y = pd.read_csv(\"singlecelldata/hematoData.csv\", index_col=[0])\nmonocle = pd.read_csv(\"singlecelldata/hematoMonocle.csv\", index_col=[0])", "_____no_output_____" ], [ "Y.head()", "_____no_output_____" ], [ "monocle.head()", "_____no_output_____" ], [ "# Plot Monocle DDRTree space\ngenelist = [\"FLT3\", \"KLF1\", \"MPO\"]\nf, ax = plt.subplots(1, len(genelist), figsize=(10, 5), sharex=True, sharey=True)\nfor ig, g in enumerate(genelist):\n y = Y[g].values\n yt = np.log(1 + y / y.max())\n yt = yt / yt.max()\n h = ax[ig].scatter(\n monocle[\"DDRTreeDim1\"],\n monocle[\"DDRTreeDim2\"],\n c=yt,\n s=50,\n alpha=1.0,\n vmin=0,\n vmax=1,\n )\n ax[ig].set_title(g)", "_____no_output_____" ], [ "def PlotGene(label, X, Y, s=3, alpha=1.0, ax=None):\n fig = None\n if ax is None:\n fig, ax = plt.subplots(1, 1, figsize=(5, 5))\n for li in np.unique(label):\n idxN = (label == li).flatten()\n ax.scatter(X[idxN], Y[idxN], s=s, alpha=alpha, label=int(np.round(li)))\n return fig, ax", "_____no_output_____" ] ], [ [ "### Fit BGP model\nNotice the cell assignment uncertainty is higher for cells close to the branching point.\n", "_____no_output_____" ] ], [ [ "def FitGene(g, ns=20): # for quick results subsample data\n t = time.time()\n Bsearch = list(np.linspace(0.05, 0.95, 5)) + [\n 1.1\n ] # set of candidate branching points\n GPy = (Y[g].iloc[::ns].values - Y[g].iloc[::ns].values.mean())[\n :, None\n ] # remove mean from gene expression data\n GPt = monocle[\"StretchedPseudotime\"].values[::ns]\n globalBranching = monocle[\"State\"].values[::ns].astype(int)\n d = BranchedGP.FitBranchingModel.FitModel(Bsearch, GPt, GPy, globalBranching)\n print(g, \"BGP inference completed in %.1f seconds.\" % (time.time() - t))\n # plot BGP\n fig, ax = BranchedGP.VBHelperFunctions.PlotBGPFit(\n GPy, GPt, Bsearch, d, figsize=(10, 10)\n )\n # overplot data\n f, a = PlotGene(\n monocle[\"State\"].values,\n monocle[\"StretchedPseudotime\"].values,\n Y[g].values - Y[g].iloc[::ns].values.mean(),\n ax=ax[0],\n s=10,\n alpha=0.5,\n )\n # Calculate Bayes factor of branching vs non-branching\n bf = BranchedGP.VBHelperFunctions.CalculateBranchingEvidence(d)[\"logBayesFactor\"]\n\n fig.suptitle(\"%s log Bayes factor of branching %.1f\" % (g, bf))\n return d, fig, ax\n\n\nd, fig, ax = FitGene(\"MPO\")", "MPO BGP inference completed in 58.0 seconds.\n" ], [ "d_c, fig_c, ax_c = FitGene(\"CTSG\")", "CTSG BGP inference completed in 64.9 seconds.\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0a93598268199b809729edcd64d4964beb1f75f
67,308
ipynb
Jupyter Notebook
scipy-stats.ipynb
killfoley/ML-and-Stats
f872e4cab71e97a2fb94b1346d66b64d28cacd65
[ "MIT" ]
null
null
null
scipy-stats.ipynb
killfoley/ML-and-Stats
f872e4cab71e97a2fb94b1346d66b64d28cacd65
[ "MIT" ]
null
null
null
scipy-stats.ipynb
killfoley/ML-and-Stats
f872e4cab71e97a2fb94b1346d66b64d28cacd65
[ "MIT" ]
null
null
null
78.265116
37,344
0.80977
[ [ [ "### A Jupyter Notebook exploring the Scipy.Stats module for Python. [scipy.stats offfical](https://docs.scipy.org/doc/scipy/reference/stats.html)\nThe Scipy.Stats module for Python offers a wide array of probability distributions, summary and frequency statistics, correlation functions and statistical tests, masked statistics, kernel density estimation, quasi-Monte Carlo functionality, and more. Since statistics is such a large discipline and covers many areas, there are other Python modules for areas such as machine learning, classification, regression, model selection and so on.<br> \nOne particular area of interest for the purpose of this demonstration is statistical testing.\n#### ANOVA Testing\nOne-way analysis of variance (ANOVA) testing is performed on 2 or more independent groups to determine if there are any statistically significant differences between the means of the groups. The test is generally performed on three or more groups with a t-test being performed when there are two groups, there are three independent variables in this example so a one-way ANOVA will be performed. [Laerd Statistics](https://statistics.laerd.com/spss-tutorials/one-way-anova-using-spss-statistics.php)<br>\n#### Assumptions\nAs part of the one-way ANOVA process, the data must be checked against 6 assumptions to ensure that the data can actually be analysed using a one-way ANOVA. Each of the 6 assumptions will be explored further in this notebook.\n***\nImport Python modules", "_____no_output_____" ] ], [ [ "# import modules\n# numerical operations\nimport numpy as np\n\n# general plotting\nimport matplotlib.pyplot as plt\n\n# data frames\nimport pandas as pd\n\n# statistical operations\nimport scipy.stats as ss\n\n# statistical plots\nimport seaborn as sns", "_____no_output_____" ] ], [ [ "#### Example One-Way ANOVA: Golf Ball driving distance dataset\n***\nPremium golf ball manufacturers are constantly looking at ways to develop and improve their golf balls for both professional and amateur players. One attribute of a premium golf ball that is extremely important to professional and amateur golfers alike is the distance the ball travels particular with a driver. In this example one specific golf ball manufacturer is testing to see that there is no significant change in driving distance between the current golf ball design and the new golf ball design, a prototype model is also being tested for a future release.\n* **Null Hypothesis** (desired outcome) - The change in golf ball design has no effect on driving distance (mean of the current and new ball (and prototype) are almost the same)\n<br><br>\n* **Alternative Hypothesis** - The change in golf ball design has a significant effect on driving distance (mean of the current and new (and prototype) ball are significantly different).", "_____no_output_____" ] ], [ [ "# read in the dataset\ndf = pd.read_csv('https://raw.githubusercontent.com/killfoley/ML-and-Stats/main/data/golf_ball.csv')\ndf", "_____no_output_____" ] ], [ [ "<br>\n\n#### Assumption 1 - The dependent variable should be measured at the interval or ratio level \n(in this case metres probably actually yards)\n***", "_____no_output_____" ] ], [ [ "# dependent variable\nv_dep = df['distance']\nv_dep", "_____no_output_____" ], [ "# describe the data\nv_dep.describe()", "_____no_output_____" ] ], [ [ "<br>\n\n#### Assumption 2 - The independent variable should consist of two or more categorical, independent groups.\n***", "_____no_output_____" ] ], [ [ "# independent variabl\nv_indep = df['ball']\nv_indep", "_____no_output_____" ] ], [ [ "Note: There are three independent categories 'current', 'new', and 'prototype'", "_____no_output_____" ], [ "<br>\n\n**Assumption 3** - There should be independence of observations, which means that there is no relationship between the observations in each group or between the groups themselves.<br>\n<br>\nThis cannot really be shown using the data, it is more of an experiment design issue. Considering the nature of this study it is safe to say that the three different types of golf ball are totally independent of each other so assumption 3 is satisfied in this case.\n***\n<br>\n\n**Assumption 4** - There should be no significant outliers.\n***\nData visualisation is a very effective way to identify any outliers [Medium: detecting outliers](https://medium.com/analytics-vidhya/outliers-in-data-and-ways-to-detect-them-1c3a5f2c6b1e). Outliers are single data points that do not follow the general trend of the rest of the data. For this a box plot will be used from the Seaborn package [Seaborn: Boxplot](https://seaborn.pydata.org/generated/seaborn.boxplot.html). \n", "_____no_output_____" ] ], [ [ "# Boxplot of variables\nsns.boxplot(x=v_dep, y=v_indep)", "_____no_output_____" ] ], [ [ "There appear to be no outliers in this dataset.\n***", "_____no_output_____" ], [ "<br>\n\n**Assumption 5** - The dependent variable should be approximately normally distributed for each category of the independent variable.\nTesting for normality is performed using the Shapiro-Wilk test of normality. \n[Scipy.Stats](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.shapiro.html)\n***", "_____no_output_____" ] ], [ [ "# Get the distance values for the current golf ball using pandas query\nx_current = df.query('ball==\"current\"')['distance']\nx_current", "_____no_output_____" ], [ "# get the distance values for the new ball\nx_new = df.query('ball==\"new\"')['distance']\nx_new", "_____no_output_____" ], [ "# get the distance values for the new ball\nx_proto = df.query('ball==\"prototype\"')['distance']\nx_proto", "_____no_output_____" ], [ "# Perform shapiro test on both sets of data\nshapiro_test_current = ss.shapiro(x_current)\nshapiro_test_current", "_____no_output_____" ], [ "# Peform for new golf ball\nshapiro_test_new = ss.shapiro(x_new)\nshapiro_test_new", "_____no_output_____" ], [ "# Peform for prototype golf ball\nshapiro_test_proto = ss.shapiro(x_proto)\nshapiro_test_proto", "_____no_output_____" ] ], [ [ "All three datasets have a high probability of being normally distributed with p values significantly > 0.05\n***", "_____no_output_____" ], [ "**Data Visualisation Plot**\n\nA distribution plot from Seaborn is one of the most effective ways to display datasets. Display each of the datasets with the kernel density estimation. This is a nice way of visualising the probability distribution of variables together.", "_____no_output_____" ] ], [ [ "# KDEs of the three types of golf ball.\nsns.displot(x=v_dep, hue=v_indep, kind=\"kde\")", "_____no_output_____" ] ], [ [ "The plot above shows each of the variables. Each one is observed to display a decent normal distribution\n***", "_____no_output_____" ], [ "<br>\n\n**Assumption 6** - there needs to be homogeneity of variances. This can be tested using Levene test in scipy.stats. [Scipy.Stats.Levene](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.levene.html)\n***", "_____no_output_____" ] ], [ [ "# test the 3 variables for homegeneity of variance\nstat, p = ss.levene(x_current, x_new, x_proto)\nprint(f\"Levene test values: Stat={stat}, pvalue={p}\")", "Levene test values: Stat=0.2117145198685934, pvalue=0.8095049922609396\n" ] ], [ [ "Since the p value is greater than 0.05 it can be accepted that the 3 variables display close enough to equal variance to proceed with the onw-way ANOVA.\n***\n### One-Way ANOVA\nSince the data have satisfied the 6 assumptions required to perform a One-Way ANOVA, the test can now be performed on the data. ", "_____no_output_____" ] ], [ [ "# One-way ANOVA scipy.stats\nss.f_oneway(\n x_current,\n x_new,\n x_proto)", "_____no_output_____" ] ], [ [ "### Interpretting the results\nTo determine whether any of the differences between the means are statistically significant, a p-value > 0.05 would be required to accept the null hypothesis. In this case the p value of 0.158 means that there is no significant difference in mean distance between each golf ball and the manufacturer can proceed with the release of the new golf ball.\n<br><br>\nDisplay each of the golf ball mean distances using Numpy.", "_____no_output_____" ] ], [ [ "print(f\"Mean Current: {np.mean(x_current)}, Mean New: {np.mean(x_new)}, Mean Prototype: {np.mean(x_proto)}\")", "Mean Current: 270.275, Mean New: 267.5, Mean Prototype: 271.4\n" ] ], [ [ "**Conclusion** The mean distance of each golf ball does not differ significantly\n***\n#### END", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d0a937ef70263f3b4dbbdb18aa9108fc27bc1d53
19,719
ipynb
Jupyter Notebook
doc/source/notebooks/monitor-tensorboard.ipynb
codelover-without-talent/GPflow
1af7b1ca7da6687974150a1440d821a106b2159d
[ "Apache-2.0" ]
1
2018-08-22T06:34:59.000Z
2018-08-22T06:34:59.000Z
doc/source/notebooks/monitor-tensorboard.ipynb
jackroos/GPflow
2c4cf398a248242298fd1c45125392839fe90b05
[ "Apache-2.0" ]
null
null
null
doc/source/notebooks/monitor-tensorboard.ipynb
jackroos/GPflow
2c4cf398a248242298fd1c45125392839fe90b05
[ "Apache-2.0" ]
2
2019-03-09T11:46:11.000Z
2021-12-20T10:22:34.000Z
40.741736
526
0.621228
[ [ [ "%matplotlib inline\nimport itertools\nimport os\nos.environ['CUDA_VISIBLE_DEVICES']=\"\"\nimport numpy as np\nimport gpflow\nimport gpflow.training.monitor as mon\nimport numbers\nimport matplotlib.pyplot as plt\nimport tensorflow as tf", "_____no_output_____" ] ], [ [ "# Demo: `gpflow.training.monitor`\nIn this notebook we'll demo how to use `gpflow.training.monitor` for logging the optimisation of a GPflow model.\n\n## Creating the GPflow model\nWe first generate some random data and create a GPflow model.\n\nUnder the hood, GPflow gives a unique name to each model which is used to name the Variables it creates in the TensorFlow graph containing a random identifier. This is useful in interactive sessions, where people may create a few models, to prevent variables with the same name conflicting. However, when loading the model, we need to make sure that the names of all the variables are exactly the same as in the checkpoint. This is why we pass name=\"SVGP\" to the model constructor, and why we use gpflow.defer_build().", "_____no_output_____" ] ], [ [ "np.random.seed(0)\nX = np.random.rand(10000, 1) * 10\nY = np.sin(X) + np.random.randn(*X.shape)\nXt = np.random.rand(10000, 1) * 10\nYt = np.sin(Xt) + np.random.randn(*Xt.shape)\n\nwith gpflow.defer_build():\n m = gpflow.models.SVGP(X, Y, gpflow.kernels.RBF(1), gpflow.likelihoods.Gaussian(),\n Z=np.linspace(0, 10, 5)[:, None],\n minibatch_size=100, name=\"SVGP\")\n m.likelihood.variance = 0.01\nm.compile()", "_____no_output_____" ] ], [ [ "Let's compute log likelihood before the optimisation", "_____no_output_____" ] ], [ [ "print('LML before the optimisation: %f' % m.compute_log_likelihood())", "LML before the optimisation: -1271605.621944\n" ] ], [ [ "We will be using a TensorFlow optimiser. All TensorFlow optimisers have a support for `global_step` variable. Its purpose is to track how many optimisation steps have occurred. It is useful to keep this in a TensorFlow variable as this allows it to be restored together with all the parameters of the model.\n\nThe code below creates this variable using a monitor's helper function. It is important to create it before building the monitor in case the monitor includes a checkpoint task. This is because the checkpoint internally uses the TensorFlow Saver which creates a list of variables to save. Therefore all variables expected to be saved by the checkpoint task should exist by the time the task is created.", "_____no_output_____" ] ], [ [ "session = m.enquire_session()\nglobal_step = mon.create_global_step(session)", "_____no_output_____" ] ], [ [ "## Construct the monitor\n\nNext we need to construct the monitor. `gpflow.training.monitor` provides classes that are building blocks for the monitor. Essengially, a monitor is a function that is provided as a callback to an optimiser. It consists of a number of tasks that may be executed at each step, subject to their running condition.\n\nIn this example, we want to:\n- log certain scalar parameters in TensorBoard,\n- log the full optimisation objective (log marginal likelihood bound) periodically, even though we optimise with minibatches,\n- store a backup of the optimisation process periodically,\n- log performance for a test set periodically.\n\nWe will define these tasks as follows:", "_____no_output_____" ] ], [ [ "print_task = mon.PrintTimingsTask().with_name('print')\\\n .with_condition(mon.PeriodicIterationCondition(10))\\\n .with_exit_condition(True)\n\nsleep_task = mon.SleepTask(0.01).with_name('sleep').with_name('sleep')\n\nsaver_task = mon.CheckpointTask('./monitor-saves').with_name('saver')\\\n .with_condition(mon.PeriodicIterationCondition(10))\\\n .with_exit_condition(True)\n\nfile_writer = mon.LogdirWriter('./model-tensorboard')\n\nmodel_tboard_task = mon.ModelToTensorBoardTask(file_writer, m).with_name('model_tboard')\\\n .with_condition(mon.PeriodicIterationCondition(10))\\\n .with_exit_condition(True)\n\nlml_tboard_task = mon.LmlToTensorBoardTask(file_writer, m).with_name('lml_tboard')\\\n .with_condition(mon.PeriodicIterationCondition(100))\\\n .with_exit_condition(True)", "_____no_output_____" ] ], [ [ "As the above code shows, each task can be assigned a name and running conditions. The name will be shown in the task timing summary.\n\nThere are two different types of running conditions: `with_condition` controls execution of the task at each iteration in the optimisation loop. `with_exit_condition` is a simple boolean flag indicating that the task should also run at the end of optimisation.\nIn this example we want to run our tasks periodically, at every iteration or every 10th or 100th iteration.\n\nNotice that the two TensorBoard tasks will write events into the same file. It is possible to share a file writer between multiple tasks. However it is not possible to share the same event location between multiple file writers. An attempt to open two writers with the same location will result in error.\n", "_____no_output_____" ], [ "## Custom tasks\nWe may also want to perfom certain tasks that do not have pre-defined `Task` classes. For example, we may want to compute the performance on a test set. Here we create such a class by extending `BaseTensorBoardTask` to log the testing benchmarks in addition to all the scalar parameters.", "_____no_output_____" ] ], [ [ "class CustomTensorBoardTask(mon.BaseTensorBoardTask):\n def __init__(self, file_writer, model, Xt, Yt):\n super().__init__(file_writer, model)\n self.Xt = Xt\n self.Yt = Yt\n self._full_test_err = tf.placeholder(gpflow.settings.tf_float, shape=())\n self._full_test_nlpp = tf.placeholder(gpflow.settings.tf_float, shape=())\n self._summary = tf.summary.merge([tf.summary.scalar(\"test_rmse\", self._full_test_err),\n tf.summary.scalar(\"test_nlpp\", self._full_test_nlpp)])\n \n def run(self, context: mon.MonitorContext, *args, **kwargs) -> None:\n minibatch_size = 100\n preds = np.vstack([self.model.predict_y(Xt[mb * minibatch_size:(mb + 1) * minibatch_size, :])[0]\n for mb in range(-(-len(Xt) // minibatch_size))])\n test_err = np.mean((Yt - preds) ** 2.0)**0.5\n self._eval_summary(context, {self._full_test_err: test_err, self._full_test_nlpp: 0.0})\n\n \ncustom_tboard_task = CustomTensorBoardTask(file_writer, m, Xt, Yt).with_name('custom_tboard')\\\n .with_condition(mon.PeriodicIterationCondition(100))\\\n .with_exit_condition(True)", "_____no_output_____" ] ], [ [ "Now we can put all these tasks into a monitor.", "_____no_output_____" ] ], [ [ "monitor_tasks = [print_task, model_tboard_task, lml_tboard_task, custom_tboard_task, saver_task, sleep_task]\nmonitor = mon.Monitor(monitor_tasks, session, global_step)", "_____no_output_____" ] ], [ [ "## Running the optimisation\nWe finally get to running the optimisation.\n\nWe may want to continue a previously run optimisation by resotring the TensorFlow graph from the latest checkpoint. Otherwise skip this step.", "_____no_output_____" ] ], [ [ "if os.path.isdir('./monitor-saves'):\n mon.restore_session(session, './monitor-saves')", "_____no_output_____" ], [ "optimiser = gpflow.train.AdamOptimizer(0.01)\n\nwith mon.Monitor(monitor_tasks, session, global_step, print_summary=True) as monitor:\n optimiser.minimize(m, step_callback=monitor, maxiter=450, global_step=global_step)\n\nfile_writer.close()", "Iteration 10\ttotal itr.rate 12.98/s\trecent itr.rate 12.98/s\topt.step 10\ttotal opt.rate 14.72/s\trecent opt.rate 14.72/s\nIteration 20\ttotal itr.rate 19.32/s\trecent itr.rate 37.77/s\topt.step 20\ttotal opt.rate 28.96/s\trecent opt.rate 887.41/s\nIteration 30\ttotal itr.rate 24.44/s\trecent itr.rate 51.97/s\topt.step 30\ttotal opt.rate 42.54/s\trecent opt.rate 690.68/s\nIteration 40\ttotal itr.rate 27.92/s\trecent itr.rate 48.73/s\topt.step 40\ttotal opt.rate 55.70/s\trecent opt.rate 771.83/s\nIteration 50\ttotal itr.rate 30.70/s\trecent itr.rate 51.09/s\topt.step 50\ttotal opt.rate 68.73/s\trecent opt.rate 1068.65/s\nIteration 60\ttotal itr.rate 33.03/s\trecent itr.rate 53.21/s\topt.step 60\ttotal opt.rate 80.79/s\trecent opt.rate 658.99/s\nIteration 70\ttotal itr.rate 34.66/s\trecent itr.rate 49.25/s\topt.step 70\ttotal opt.rate 92.57/s\trecent opt.rate 741.85/s\nIteration 80\ttotal itr.rate 35.12/s\trecent itr.rate 38.67/s\topt.step 80\ttotal opt.rate 103.80/s\trecent opt.rate 687.96/s\nIteration 90\ttotal itr.rate 36.37/s\trecent itr.rate 50.92/s\topt.step 90\ttotal opt.rate 114.64/s\trecent opt.rate 696.17/s\n" ] ], [ [ "Now lets compute the log likelihood again. Hopefully we will see an increase in its value", "_____no_output_____" ] ], [ [ "print('LML after the optimisation: %f' % m.compute_log_likelihood())", "LML after the optimisation: -68705.124191\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d0a940b70dfbc470e877b0fa3764051d62312852
26,026
ipynb
Jupyter Notebook
Notebooks/folder%20with%20escaped%20spaces/helloWithEscapedSpaces.ipynb
microsoft/azuredatastudio-smoke-test-repo
5bf18a8b7692d9cd798eb5a2b394777ed4921b3c
[ "MIT" ]
1
2021-04-19T16:37:14.000Z
2021-04-19T16:37:14.000Z
Notebooks/hello.ipynb
microsoft/azuredatastudio-smoke-test-repo
5bf18a8b7692d9cd798eb5a2b394777ed4921b3c
[ "MIT" ]
2
2021-06-22T23:22:44.000Z
2021-06-25T19:55:03.000Z
Notebooks/hello.ipynb
microsoft/azuredatastudio-smoke-test-repo
5bf18a8b7692d9cd798eb5a2b394777ed4921b3c
[ "MIT" ]
null
null
null
218.705882
20,701
0.866825
[ [ [ "import sys\n!{sys.executable} -m pip install matplotlib\n\n# Make a legend for specific lines.\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nt1 = np.arange(0.0, 2.0, 0.1)\nt2 = np.arange(0.0, 2.0, 0.01)\n\n# note that plot returns a list of lines. The \"l1, = plot\" usage\n# extracts the first element of the list into l1 using tuple\n# unpacking. So l1 is a Line2D instance, not a sequence of lines\nl1, = plt.plot(t2, np.exp(-t2))\nl2, l3 = plt.plot(t2, np.sin(2 * np.pi * t2), '--go', t1, np.log(1 + t1), '.')\nl4, = plt.plot(t2, np.exp(-t2) * np.sin(2 * np.pi * t2), 'rs-.')\n\nplt.xlabel('time')\nplt.ylabel('volts')\nplt.title('Damped oscillation')\n\nplt.show()", "Requirement already satisfied: matplotlib in ./azuredatastudio-python/0.0.1/lib/python3.6/site-packages (3.3.2)\r\nRequirement already satisfied: numpy>=1.15 in ./azuredatastudio-python/0.0.1/lib/python3.6/site-packages (from matplotlib) (1.18.3)\r\nRequirement already satisfied: pillow>=6.2.0 in ./azuredatastudio-python/0.0.1/lib/python3.6/site-packages (from matplotlib) (8.0.1)\r\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.3 in ./azuredatastudio-python/0.0.1/lib/python3.6/site-packages (from matplotlib) (2.4.7)\r\nRequirement already satisfied: cycler>=0.10 in ./azuredatastudio-python/0.0.1/lib/python3.6/site-packages (from matplotlib) (0.10.0)\r\nRequirement already satisfied: certifi>=2020.06.20 in ./azuredatastudio-python/0.0.1/lib/python3.6/site-packages (from matplotlib) (2020.6.20)\r\nRequirement already satisfied: python-dateutil>=2.1 in ./azuredatastudio-python/0.0.1/lib/python3.6/site-packages (from matplotlib) (2.8.1)\r\nRequirement already satisfied: kiwisolver>=1.0.1 in ./azuredatastudio-python/0.0.1/lib/python3.6/site-packages (from matplotlib) (1.2.0)\r\nRequirement already satisfied: six in ./azuredatastudio-python/0.0.1/lib/python3.6/site-packages (from cycler>=0.10->matplotlib) (1.14.0)\r\n" ], [ "print(\"Hello World\")", "Hello World\n" ], [ "myName = \"Rudi2\"\n\ndef printName(name):\n print(\"Hello \" + name)\n\nprintName(myName)", "Hello Rudi2\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
d0a9575ef81607fcd684307102de98ae75dca69b
102,398
ipynb
Jupyter Notebook
Notebooks/TSQL/Jupiter/content/quickstarts/delta-lake.ipynb
buswrecker/Synapse
dc94db93659ccd9c478b22b406154e97aec8255e
[ "MIT" ]
256
2020-04-15T22:55:22.000Z
2022-03-28T19:40:28.000Z
Notebooks/TSQL/Jupiter/content/quickstarts/delta-lake.ipynb
buswrecker/Synapse
dc94db93659ccd9c478b22b406154e97aec8255e
[ "MIT" ]
27
2020-04-16T21:58:29.000Z
2022-03-28T18:51:28.000Z
Notebooks/TSQL/Jupiter/content/quickstarts/delta-lake.ipynb
buswrecker/Synapse
dc94db93659ccd9c478b22b406154e97aec8255e
[ "MIT" ]
241
2020-04-16T21:39:21.000Z
2022-03-26T19:24:18.000Z
55.290497
365
0.272183
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d0a9718631c4df6ac673214ea7d25145418710e2
560,874
ipynb
Jupyter Notebook
tutorials/T4 - Dynamical systems, chaos, Lorenz.ipynb
brajard/DAPPER
1a513b2f23041b15fb335aeb17906607bf2a5350
[ "MIT" ]
null
null
null
tutorials/T4 - Dynamical systems, chaos, Lorenz.ipynb
brajard/DAPPER
1a513b2f23041b15fb335aeb17906607bf2a5350
[ "MIT" ]
null
null
null
tutorials/T4 - Dynamical systems, chaos, Lorenz.ipynb
brajard/DAPPER
1a513b2f23041b15fb335aeb17906607bf2a5350
[ "MIT" ]
3
2020-01-25T16:35:00.000Z
2021-04-08T03:20:48.000Z
904.635484
93,914
0.941033
[ [ [ "from resources.workspace import *\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Dynamical systems\nare systems (sets of equations) whose variables evolve in time (the equations contains time derivatives). As a branch of mathematics, its theory is mainly concerned with understanding the behaviour of solutions (trajectories) of the systems.\n\n## Chaos\nis also known as the butterfly effect: \"a buttefly that flaps its wings in Brazil can 'cause' a hurricane in Texas\".\n\nAs opposed to the opinions of Descartes/Newton/Laplace, chaos effectively means that even in a deterministic (non-stochastic) universe, we can only predict \"so far\" into the future. This will be illustrated below using two toy-model dynamical systems made by Edward Lorenz.", "_____no_output_____" ], [ "---\n## The Lorenz (1963) attractor", "_____no_output_____" ], [ "The [Lorenz-63 dynamical system](resources/DA_intro.pdf#page=22) can be derived as an extreme simplification of *Rayleigh-Bénard convection*: fluid circulation in a shallow layer of fluid uniformly heated (cooled) from below (above).\nThis produces the following 3 *coupled* ordinary differential equations (ODE):\n\n$$\n\\begin{aligned}\n\\dot{x} & = \\sigma(y-x) \\\\\n\\dot{y} & = \\rho x - y - xz \\\\\n\\dot{z} & = -\\beta z + xy\n\\end{aligned}\n$$\n\nwhere the \"dot\" represents the time derivative, $\\frac{d}{dt}$. The state vector is $\\mathbf{x} = (x,y,z)$, and the parameters are typically set to", "_____no_output_____" ] ], [ [ "SIGMA = 10.0\nBETA = 8/3\nRHO = 28.0", "_____no_output_____" ] ], [ [ "The ODEs can be coded as follows", "_____no_output_____" ] ], [ [ "def dxdt(xyz, t0, sigma, beta, rho):\n \"\"\"Compute the time-derivative of the Lorenz-63 system.\"\"\"\n x, y, z = xyz\n return [\n sigma * (y - x),\n x * (rho - z) - y,\n x * y - beta * z\n ]", "_____no_output_____" ] ], [ [ "#### Numerical integration to compute the trajectories", "_____no_output_____" ], [ "Below is a function to numerically **integrate** the ODEs and **plot** the solutions.\n\n<!--\nThis function also takes arguments to control ($\\sigma$, $\\beta$, $\\rho$) and of the numerical integration (`N`, `T`).\n-->", "_____no_output_____" ] ], [ [ "from scipy.integrate import odeint # integrator\n\noutput_63 = [None]\n\n@interact( sigma=(0.,50), beta=(0.,5), rho=(0.,50), N=(0,50), eps=(0.01,1), T=(0.,30))\ndef animate_lorenz(sigma=SIGMA, beta=BETA, rho=RHO , N=2, eps=0.01, T=1.0): \n \n # Initial conditions: perturbations around some \"proto\" state\n seed(1)\n x0_proto = array([-6.1, 1.2, 32.5])\n x0 = x0_proto + eps*randn((N, 3))\n\n # Compute trajectories\n tt = linspace(0, T, int(100*T)+1) # Time sequence for trajectory\n dd = lambda x,t: dxdt(x,t, sigma,beta,rho) # Define dxdt(x,t) with fixed params.\n xx = array([odeint(dd, xn, tt) for xn in x0]) # Integrate\n \n # PLOTTING\n ax = plt.figure(figsize=(10,5)).add_subplot(111, projection='3d')\n ax.axis('off')\n colors = plt.cm.jet(linspace(0,1,N))\n for i in range(N):\n ax.plot(*(xx[i,:,:].T),'-' ,c=colors[i])\n #ax.scatter3D(*xx[i,0 ,:],s=20,c=colors[i],marker='<')\n ax.scatter3D(*xx[i,-1,:],s=40,c=colors[i])\n \n output_63[0] = xx", "Widget Javascript not detected. It may not be installed or enabled properly.\n" ] ], [ [ "**Exc 4.2**: \n* Move `T` (use your arrow keys). What does it control? \n* Set `T` to something small; move the sliders for `N` and `eps`. What do they control?\n* Visually investigate the system's (i.e. the trajectories') sensititivy to initial conditions by moving `T`, `N` and `eps`. Very roughtly, estimate its predictability (i.e. how far into the future one can forecasts for a fixed `eps` and a fixed skill level)?", "_____no_output_____" ], [ "### Averages", "_____no_output_____" ], [ "**Exc 4.8*:** Slide `N` and `T` to their upper bounds. Execute the code cell below. It computes the average location of the $i$-th component of the state in two ways. Do you think the histograms actually approximate the same distribution? If so, then the system is called [ergodic](https://en.wikipedia.org/wiki/Ergodic_theory#Ergodic_theorems). In that case, does it matter if one computes statistics (over the system dynamics) by using several short experiment runs or one long run?", "_____no_output_____" ] ], [ [ "xx = output_63[0][:,:,0] # state component index 0 (must be 0,1,2)\n\nplt.hist(xx[:,-1] ,normed=1,label=\"ensemble dist.\",alpha=1.0) # -1: last time\nplt.hist(xx[-1,:] ,normed=1,label=\"temporal dist.\",alpha=0.5) # -1: last ensemble member\n#plt.hist(xx.ravel(),normed=1,label=\"total distribution\",alpha=0.5)\nplt.legend();", "_____no_output_____" ] ], [ [ "---\n\n## The \"Lorenz-95\" model\n\nThe Lorenz-96 system\nis a \"1D\" model, designed to simulate atmospheric convection. Each state variable $\\mathbf{x}_i$ can be considered some atmospheric quantity at grid point at a fixed lattitude of the earth. The system \nis given by the coupled set of ODEs,\n$$\n\\frac{d \\mathbf{x}_i}{dt} = (\\mathbf{x}_{i+1} − \\mathbf{x}_{i-2}) \\mathbf{x}_{i-1} − \\mathbf{x}_i + F\n\\, ,\n\\quad \\quad i \\in \\{1,\\ldots,m\\}\n\\, ,\n$$\nwhere the subscript indices apply periodically.\n\nThis model is not derived from physics but has similar characterisics, such as\n<ul>\n <li> there is external forcing, determined by a parameter $F$;</li>\n <li> there is internal dissipation, emulated by the linear term;</li>\n <li> there is energy-conserving advection, emulated by quadratic terms.</li>\n</ul>\n[Further description](resources/DA_intro.pdf#page=23).", "_____no_output_____" ], [ "**Exc 4.10:** Show that the \"total energy\" $\\sum_{i=1}^{m} \\mathbf{x}_i^2$ is preserved by the quadratic terms in the ODE. \n", "_____no_output_____" ] ], [ [ "show_answer(\"Hint: Lorenz energy\")", "_____no_output_____" ], [ "show_answer(\"Lorenz energy\")", "_____no_output_____" ] ], [ [ "The model is animated below.", "_____no_output_____" ] ], [ [ "# For all i, any n: s(x,n) := x[i+n], circularly.\ndef s(x,n):\n return np.roll(x,-n)\n\noutput_95 = [None]\n\ndef animate_lorenz_95(m=40,Force=8.0,eps=0.01,T=0):\n # Initial conditions: perturbations\n x0 = zeros(m)\n x0[0] = eps\n \n def dxdt(x,t):\n return (s(x,1)-s(x,-2))*s(x,-1) - x + Force\n \n tt = linspace(0, T, int(40*T)+1)\n xx = odeint(lambda x,t: dxdt(x,t), x0, tt)\n output_95[0] = xx\n \n plt.figure(figsize=(7,4))\n \n # Plot last only\n #plt.plot(xx[-1],'b')\n\n # Plot multiple\n Lag = 8\n colors = plt.cm.cubehelix(0.1+0.6*linspace(0,1,Lag))\n for k in range(Lag,0,-1):\n plt.plot(xx[max(0,len(xx)-k)],c=colors[Lag-k])\n\n plt.ylim(-10,20)\n plt.show()\n \ninteract(animate_lorenz_95,eps=(0.01,3,0.1),T=(0.05,40,0.05),Force=(0,40,1),m=(5,60,1));", "Widget Javascript not detected. It may not be installed or enabled properly.\n" ] ], [ [ "**Exc 4.12:** Under which settings of the force `F` is the system chaotic?", "_____no_output_____" ], [ "---\n## Error/perturbation dynamics", "_____no_output_____" ], [ "**Exc 4.14*:** Suppose $x(t)$ and $z(t)$ are \"twins\": they evolve according to the same law $f$:\n\n$$\\frac{dx}{dt} = f(x) \\\\ \\frac{dz}{dt} = f(z) \\, .$$\n\n\n* a) Define the \"error\": $\\varepsilon(t) = x(t) - z(t)$. \nSuppose $z(0)$ is close to $x(0)$. \nLet $F = \\frac{df}{dx}(x(t))$. \nShow that the error evolves according to the ordinary differential equation (ODE)\n$$\\frac{d \\varepsilon}{dt} \\approx F \\varepsilon \\, .$$ \n* b) Show that the error grows exponentially: $\\varepsilon(t) = \\varepsilon(0) e^{F t} $.\n* c)\n * 1) Suppose $F<1$. \n What happens to the error? \n What does this mean for predictability?\n * 2) Now suppose $F>1$. \n Given that all observations are uncertain (i.e. $R_t>0$, if only ever so slightly), \n can we ever hope to estimate $x(t)$ with 0 uncertainty?\n* d) Consider the ODE derived above. \nHow might we change it in order to model (i.e. emulate) a saturation of the error at some level? \nCan you solve this equation?\n* e) Now suppose $z(t)$ evolves according to $\\frac{dz}{dt} = g(z)$, with $g \\neq f$. \nWhat is now the differential equation governing the evolution of the error, $\\varepsilon$?", "_____no_output_____" ] ], [ [ "show_answer(\"error evolution\")", "_____no_output_____" ] ], [ [ "**Exc 4.16*:** Recall the Lorenz-63 system. What is its doubling time (i.e. estimate how long does it take for two trajectories to grow twice as far apart as they were to start with) ?\n*Hint: Set `N=50, eps=0.01, T=1,` and compute the spread of the particles now as compared to how they started*", "_____no_output_____" ] ], [ [ "xx = output_63[0][:,-1] # Ensemble of particles at the end of integration\n### compute your answer here ###", "_____no_output_____" ], [ "show_answer(\"doubling time\")", "_____no_output_____" ] ], [ [ "The answer actually depends on where in \"phase space\" the particles started.\nTo get a universal answer one must average these experiments for many different initial conditions.", "_____no_output_____" ], [ "---\n## In summary: \nPrediction (forecasting) with these systems is challenging because they are chaotic: small errors grow exponentially.\n\nConversely: chaos means that there is a limit to how far into the future we can make predictions (skillfully).\n\nIt is therefore crucial to minimize the intial error as much as possible. This is a task for DA.", "_____no_output_____" ], [ "### Next: [Ensemble [Monte-Carlo] approach](T5 - Ensemble [Monte-Carlo] approach.ipynb)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
d0a97631fb9609279ab9a34aa02d280c27d75d68
14,434
ipynb
Jupyter Notebook
IUCN/IUCN_Extinct_species.ipynb
krajai/testt
3aaf5fd7fe85e712c8c1615852b50f9ccb6737e5
[ "BSD-3-Clause" ]
1
2022-03-24T07:46:45.000Z
2022-03-24T07:46:45.000Z
IUCN/IUCN_Extinct_species.ipynb
PZawieja/awesome-notebooks
8ae86e5689749716e1315301cecdad6f8843dcf8
[ "BSD-3-Clause" ]
null
null
null
IUCN/IUCN_Extinct_species.ipynb
PZawieja/awesome-notebooks
8ae86e5689749716e1315301cecdad6f8843dcf8
[ "BSD-3-Clause" ]
null
null
null
27.493333
279
0.523625
[ [ [ "<img width=\"10%\" alt=\"Naas\" src=\"https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160\"/>", "_____no_output_____" ], [ "# IUCN - Extinct species\n<a href=\"https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/IUCN/IUCN_Extinct_species.ipynb\" target=\"_parent\"><img src=\"https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg\"/></a>", "_____no_output_____" ], [ "**Tags:** #iucn #opendata #extinctspecies #analytics #plotly", "_____no_output_____" ], [ "**Author:** [Martin Delasalle](https://github.com/delasalle-sio-martin)", "_____no_output_____" ], [ "Source : https://www.iucnredlist.org/statistics", "_____no_output_____" ], [ "If you want another view of the data : Link : https://ourworldindata.org/extinctions", "_____no_output_____" ], [ "### History\nThe initial aim was to compare the number of threatened species per species over time (e.g. number of pandas per year).\n\nAfter a lot of research, it turns out that this kind of data is not available or it is only data from one year (2015 or 2018).\n\nTherefore, we decided to start another project: Number of threatened species per year, with details by category using data from this site : https://www.iucnredlist.org/resources/summary-statistics#Summary%20Tables\n\nSo we took the pdf from this site and turned it into a csv.\nBut the data was heavy and not easy to use. Moreover, we thought that this would not necessarily be viable and adaptable over time.\n\nSo we decided to take another datasource on a similar subject : *Extinct Species*, from this website : https://www.iucnredlist.org/statistics", "_____no_output_____" ], [ "### Links that we found during the course ", "_____no_output_____" ], [ "- https://donnees.banquemondiale.org/indicator/EN.MAM.THRD.NO (only 2018)", "_____no_output_____" ], [ "- https://www.eea.europa.eu/data-and-maps/data/european-red-lists-4/european-red-list/european-red-list-csv-files/view (old Dataset, last upload was in 2015)", "_____no_output_____" ], [ "- https://www.worldwildlife.org/species/directory?page=2 (the years are not available)", "_____no_output_____" ], [ "- https://www.worldwildlife.org/pages/conservation-science-data-and-tools (apart from the case)", "_____no_output_____" ], [ "- https://databasin.org/datasets/68635d7c77f1475f9b6c1d1dbe0a4c4c/ (we can't use it)", "_____no_output_____" ], [ "- https://gisandscience.com/2009/12/01/download-datasets-from-the-world-wildlife-funds-conservation-science-program/ (no datas about threatened species)", "_____no_output_____" ], [ "- https://data.world/datasets/tiger (only about tigers but there are no datas usefull)", "_____no_output_____" ], [ "## Input", "_____no_output_____" ], [ "### Import library", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport plotly.express as px", "_____no_output_____" ] ], [ [ "### Setup your variables", "_____no_output_____" ], [ "👉 Download data in [CSV](https://www.iucnredlist.org/statistics) and drop it on your root folder", "_____no_output_____" ] ], [ [ "# Input csv\ncsv_input = \"Table 3 Species by kingdom and class - show all.csv\"", "_____no_output_____" ] ], [ [ "## Model", "_____no_output_____" ], [ "### Get data from csv", "_____no_output_____" ] ], [ [ "# We load the csv file\ndata = pd.read_csv(csv_input, ',')\n\n# We set the column Name as index\ndata.set_index('Name', inplace = True)\n\n# Then we select the columns EX, EW and Name, and all the lines we want in the graph\ntable = data.loc[[\"Total\",\n \"GASTROPODA\",\n \"BIVALVIA\",\n \"AVES\",\n \"MAMMALIA\",\n \"ACTINOPTERYGII\",\n \"CEPHALASPIDOMORPHI\",\n \"INSECTA\",\n \"AMPHIBIA\",\n \"REPTILIA\",\n \"ARACHNIDA\",\n \"CLITELLATA\",\n \"DIPLOPODA\",\n \"ENOPLA\",\n \"TURBELLARIA\",\n \"MALACOSTRACA\",\n \"MAXILLOPODA\",\n \"OSTRACODA\"]# add species here\n ,\"EX\":\"EW\"]\ntable", "_____no_output_____" ], [ "# We add a new column 'CATEGORY' to our Dataframe\ntable[\"CATEGORY\"] = [\"Total\",\n \"Molluscs\",\n \"Molluscs\",\n \"Birds\",\n \"Mammals\",\n \"Fishes\",\n \"Fishes\",\n \"Insects\",\n \"Amphibians\",\n \"Reptiles\",\n \"Others\",\n \"Others\",\n \"Others\",\n \"Others\",\n \"Others\",\n \"Crustaceans\",\n \"Crustaceans\",\n \"Crustaceans\"]\ntable = table.loc[:,[\"CATEGORY\",\"EX\"]] # we drop the column \"EW\"\ntable\n# ---NOTE : If you want to add new species, you have to also add his category", "_____no_output_____" ], [ "# We groupby CATEGORIES :\ntable.reset_index(drop=True, inplace=True)\ntable = table.groupby(['CATEGORY']).sum().reset_index()\ntable.rename(columns = {'EX':'Extincted'}, inplace=True)\ntable", "_____no_output_____" ] ], [ [ "## Output", "_____no_output_____" ], [ "### Plot graph", "_____no_output_____" ] ], [ [ "# We use plotly to show datas with an horizontal bar chart\ndef create_barchart(table):\n Graph = table.sort_values('Extincted', ascending=False)\n fig = px.bar(Graph,\n x=\"Extincted\",\n y=\"CATEGORY\",\n color=\"CATEGORY\",\n orientation=\"h\")\n fig.update_layout(title_text=\"Number of species that have gone extinct since 1500\",\n title_x=0.5)\n fig.add_annotation(x=800,\n y=0,\n text=\"Source : IUCN Red List of Threatened Species<br>https://www.iucnredlist.org/statistics\",\n showarrow=False)\n fig.show()\n return fig\n \nfig = create_barchart(table)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d0a9a5056eab2d0f9d8ce38760a01aabc0867ff7
22,397
ipynb
Jupyter Notebook
CVMusicSynthesis/working_Airdrums.ipynb
dattasiddhartha/HackUST2019
cfe2ee84615a8e3010b683be9b9060e10bf3a9fd
[ "MIT" ]
null
null
null
CVMusicSynthesis/working_Airdrums.ipynb
dattasiddhartha/HackUST2019
cfe2ee84615a8e3010b683be9b9060e10bf3a9fd
[ "MIT" ]
null
null
null
CVMusicSynthesis/working_Airdrums.ipynb
dattasiddhartha/HackUST2019
cfe2ee84615a8e3010b683be9b9060e10bf3a9fd
[ "MIT" ]
null
null
null
46.855649
2,644
0.549091
[ [ [ "#importing modules\nimport cv2\nimport numpy as np\nimport math\nimport time\nimport _thread\nimport wave\nimport struct\n\ndef playSound(name):\n import simpleaudio as sa\n\n wave_obj = sa.WaveObject.from_wave_file(name)\n play_obj = wave_obj.play()\n\n\n ####CRASHES ON FAST INPUT####\n # import pyglet\n # player = pyglet.media.Player()\n # src = pyglet.media.load(name)\n # player.volume = 0.1\n # player.queue(src)\n # player.play()\n\n #####VERY SLOW####\n # import pygame.mixer\n # pm = pygame.mixer\n # pm.init()\n # sound = pm.Sound(name)\n # sound.set_volume(0.5)\n # sound.play()\n\n\n\ndef drawEllipse(contours, text):\n if(contours == None or len(contours) == 0):\n return ((-100,-100), None)\n c = max(contours, key=cv2.contourArea)\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n if(cv2.contourArea(c) < 500):\n return ((-100,-100), None)\n ellipse = cv2.fitEllipse(c)\n cv2.ellipse(img, ellipse, (0,0,0), 2)\n\n blank = np.zeros(img.shape[0:2])\n ellipseImage = cv2.ellipse(blank, ellipse, (255, 255, 255), -2)\n # cv2.imshow(\"ell\",ellipseImage)\n\n M = cv2.moments(c)\n if M[\"m00\"] == 0:\n return\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n if radius > 10:\n # draw the ellipse and centroid on the frame,\n # then update the list of tracked points\n # cv2.circle(img, (int(x), int(y)), int(radius),(0, 0, 0), 2)\n cv2.circle(img, center, 3, (0, 0, 255), -1)\n cv2.putText(img,text, (center[0]+10,center[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0, 0, 0),2)\n cv2.putText(img,\"(\"+str(center[0])+\",\"+str(center[1])+\")\", (center[0]+10,center[1]+15), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0, 0, 0),1)\n\n return (center, ellipseImage)\n\ndef detectCollision(imgA, imgB, velocity, touching, name):\n mA = cv2.moments(imgA, False)\n mB = cv2.moments(imgB, False)\n blank = np.zeros(img.shape[0:2])\n if type(imgA) == type(None) or type(imgB) == type(None):\n return\n intersection = cv2.bitwise_and(imgA, imgB)\n area = cv2.countNonZero(intersection)\n if area < 20:\n touching = False\n if area > 100 and not touching:\n # print(int(mA[\"m01\"] / mA[\"m00\"])< int(mB[\"m01\"] / mB[\"m00\"]))\n # print(area)\n if int(mA[\"m01\"] / mA[\"m00\"])< int(mB[\"m01\"] / mB[\"m00\"]):\n if velocity > 10:\n _thread.start_new_thread(playSound, (name,))\n # playSound(name)\n touching = True\n return touching\n\n#capturing video through webcam\ncap=cv2.VideoCapture(0)\nframeCount = 0\ntimeStart = time.time()\n\nb1 = (0,0)\nb2 = (0,0)\ncurrentBlueVelocity = 0\nr1 = (0,0)\nr2 = (0,0)\ncurrentRedVelocity = 0\n\nblueAndSnare = False\nblueAndHiHat = False\nredAndSnare = False\nredAndHiHat = False\nbooli = [False for i in range(2)]\n\nnumDrums = 0\ndrums = [None for i in range(2)]\ndef newDrum(pos, name):\n # pos = (x, y)\n drum = cv2.circle(img,pos, 50,(0,0, 0),5)\n cv2.putText(drum,name,pos,cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2)\n blank = np.zeros(img.shape[0:2])\n drum_image = cv2.circle(blank.copy(), pos, 50, (255, 255, 255), -5)\n global numDrums\n numDrums += 1\n return (name, drum_image)\n\n\nwhile(1):\n now = time.time()\n fps = frameCount / (now - timeStart+1.0)\n frameCount += 1\n\n _, img = cap.read()\n img = cv2.flip(img, 1)\n\n # cv2.putText(img,\"FPS : \",(10,100),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,0), 2)\n cv2.putText(img,\"FPS: %.2f\" % (fps),(10,100),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,0), 2)\n\n # Add the drums\n drums[0] = newDrum((350, 400), \"snare\")\n drums[1] = newDrum((100, 400), \"hi_hat\")\n\n #converting frame(img i.e BGR) to HSV (hue-saturation-value)\n hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n\n #defining the range of red color\n red_lower=np.array([255,255,255],np.uint8)\n red_upper=np.array([255,255,255],np.uint8)\n\n #defining the Range of Blue color\n blue_lower=np.array([95,60,94],np.uint8)\n blue_upper=np.array([163,168,209],np.uint8)\n\n #finding the range of red,blue color in the image\n red=cv2.inRange(hsv, red_lower, red_upper)\n blue=cv2.inRange(hsv,blue_lower,blue_upper)\n\n #Morphological transformation, Dilation\n kernal = np.ones((5 ,5), \"uint8\")\n\n red=cv2.dilate(red, kernal)\n res=cv2.bitwise_and(img, img, mask = red)\n\n blue=cv2.dilate(blue,kernal)\n res1=cv2.bitwise_and(img, img, mask = blue)\n\n\n #Tracking the Red Color\n (contours,hierarchy)=cv2.findContours(red,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n (redCenter, redEllipse) = drawEllipse(contours, \"Red\")\n # cv2.drawContours(img, contours, -1 , (0,0,255), 2)\n\n\n #Tracking the Blue Color\n (contours,hierarchy)=cv2.findContours(blue,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n # cv2.drawContours(img, contours, -1 , (255,0,0), 2)\n (blueCenter, blueEllipse) = drawEllipse(contours, \"Blue\")\n\n b1 = b2\n b2 = blueCenter\n bDelta = math.sqrt((b2[0] - b1[0])**2 + (b2[1] - b1[1])**2)\n bVelocity = bDelta * fps / 100\n if (bVelocity - currentBlueVelocity) > 10:\n cv2.putText(img,str(int(bVelocity)),(10, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0), 2)\n else:\n cv2.putText(img,str(int(currentBlueVelocity)),(10, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0), 2)\n currentBlueVelocity = bVelocity\n\n r1 = r2\n r2 = redCenter\n rDelta = math.sqrt((r2[0] - r1[0])**2 + (r2[1] - r1[1])**2)\n rVelocity = rDelta * fps / 100\n if (rVelocity - currentRedVelocity) > 10:\n cv2.putText(img,str(int(rVelocity)),(70, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2)\n else:\n cv2.putText(img,str(int(currentRedVelocity)),(70, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2)\n currentRedVelocity = rVelocity\n\n\n for i in range(len(drums)):\n # print(booli)\n booli[i] = detectCollision(blueEllipse, drums[i][1], currentBlueVelocity, booli[i], \"{0}.wav\".format(drums[i][0]))\n # blueAndSnare = detectCollision(blueEllipse, drums[0][1], blueAndSnare, \"snare.wav\")\n # blueAndHiHat = detectCollision(blueEllipse, drums[1][1], blueAndHiHat, \"hi_hat.wav\")\n\n # blueAndSnare = detectCollision(blueEllipse, snare_image, blueAndSnare, \"snare.wav\")\n # blueAndHiHat = detectCollision(blueEllipse, hi_hat_image, blueAndHiHat, \"Closed-Hi-Hat.wav\")\n #\n # redAndSnare = detectCollision(redEllipse, snare_image, redAndSnare, \"snare.wav\")\n # redAndHiHat = detectCollision(redEllipse, hi_hat_image, redAndHiHat, \"Closed-Hi-Hat.wav\")\n\n\n\n #cv2.imshow(\"Redcolour\",red)\n cv2.imshow(\"Color Tracking\",img)\n #cv2.imshow(\"red\",res)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break\n", "_____no_output_____" ], [ "# getting a black drum to hit", "_____no_output_____" ], [ "#importing modules\nimport cv2\nimport numpy as np\nimport math\nimport time\nimport _thread\nimport wave\nimport struct\n\ndef playSound(name):\n import simpleaudio as sa\n\n wave_obj = sa.WaveObject.from_wave_file(name)\n play_obj = wave_obj.play()\n\n\n ####CRASHES ON FAST INPUT####\n # import pyglet\n # player = pyglet.media.Player()\n # src = pyglet.media.load(name)\n # player.volume = 0.1\n # player.queue(src)\n # player.play()\n\n #####VERY SLOW####\n # import pygame.mixer\n # pm = pygame.mixer\n # pm.init()\n # sound = pm.Sound(name)\n # sound.set_volume(0.5)\n # sound.play()\n\n\n\ndef drawEllipse(contours, text):\n if(contours == None or len(contours) == 0):\n return ((-100,-100), None)\n c = max(contours, key=cv2.contourArea)\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n if(cv2.contourArea(c) < 500):\n return ((-100,-100), None)\n ellipse = cv2.fitEllipse(c)\n cv2.ellipse(img, ellipse, (0,0,0), 2)\n\n blank = np.zeros(img.shape[0:2])\n ellipseImage = cv2.ellipse(blank, ellipse, (255, 255, 255), -2)\n # cv2.imshow(\"ell\",ellipseImage)\n\n M = cv2.moments(c)\n if M[\"m00\"] == 0:\n return\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n if radius > 10:\n # draw the ellipse and centroid on the frame,\n # then update the list of tracked points\n # cv2.circle(img, (int(x), int(y)), int(radius),(0, 0, 0), 2)\n cv2.circle(img, center, 3, (0, 0, 255), -1)\n cv2.putText(img,text, (center[0]+10,center[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0, 0, 0),2)\n cv2.putText(img,\"(\"+str(center[0])+\",\"+str(center[1])+\")\", (center[0]+10,center[1]+15), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0, 0, 0),1)\n\n return (center, ellipseImage)\n\ndef detectCollision(imgA, imgB, velocity, touching, name):\n mA = cv2.moments(imgA, False)\n mB = cv2.moments(imgB, False)\n blank = np.zeros(img.shape[0:2])\n if type(imgA) == type(None) or type(imgB) == type(None):\n return\n intersection = cv2.bitwise_and(imgA, imgB)\n area = cv2.countNonZero(intersection)\n if area < 20: # default 20\n touching = False\n if area > 100 and not touching:\n # print(int(mA[\"m01\"] / mA[\"m00\"])< int(mB[\"m01\"] / mB[\"m00\"]))\n # print(area)\n if int(mA[\"m01\"] / mA[\"m00\"])< int(mB[\"m01\"] / mB[\"m00\"]):\n if velocity > 10:\n _thread.start_new_thread(playSound, (name,))\n # playSound(name)\n touching = True\n return touching\n\n#capturing video through webcam\ncap=cv2.VideoCapture(0)\nframeCount = 0\ntimeStart = time.time()\n\nb1 = (0,0)\nb2 = (0,0)\ncurrentBlueVelocity = 0\nr1 = (0,0)\nr2 = (0,0)\ncurrentRedVelocity = 0\n\nblueAndSnare = False\nblueAndHiHat = False\nredAndSnare = False\nredAndHiHat = False\nbooli = [False for i in range(2)]\n\nnumDrums = 0\ndrums = [None for i in range(2)]\ndef newDrum(pos, name):\n # pos = (x, y)\n drum = cv2.circle(img,pos, 50,(0,0, 0),5)\n cv2.putText(drum,name,pos,cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2)\n blank = np.zeros(img.shape[0:2])\n drum_image = cv2.circle(blank.copy(), pos, 50, (255, 255, 255), -5)\n global numDrums\n numDrums += 1\n return (name, drum_image)\n\n\nwhile(1):\n now = time.time()\n fps = frameCount / (now - timeStart+1.0)\n frameCount += 1\n\n _, img = cap.read()\n img = cv2.flip(img, 1)\n\n # cv2.putText(img,\"FPS : \",(10,100),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,0), 2)\n cv2.putText(img,\"FPS: %.2f\" % (fps),(10,100),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,0), 2)\n\n # Add the drums\n drums[0] = newDrum((350, 400), \"snare\")\n drums[1] = newDrum((100, 400), \"hi_hat\")\n\n #converting frame(img i.e BGR) to HSV (hue-saturation-value)\n hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n\n #defining the range of red color\n red_lower=np.array([0,0,0],np.uint8)\n red_upper=np.array([105,105,105],np.uint8)\n\n #defining the Range of Blue color\n blue_lower=np.array([0,0,0],np.uint8)\n blue_upper=np.array([105,105,105],np.uint8)\n\n #finding the range of red,blue color in the image\n red=cv2.inRange(hsv, red_lower, red_upper)\n blue=cv2.inRange(hsv,blue_lower,blue_upper)\n\n #Morphological transformation, Dilation\n kernal = np.ones((5 ,5), \"uint8\")\n\n red=cv2.dilate(red, kernal)\n res=cv2.bitwise_and(img, img, mask = red)\n\n blue=cv2.dilate(blue,kernal)\n res1=cv2.bitwise_and(img, img, mask = blue)\n\n\n #Tracking the Red Color\n (contours,hierarchy)=cv2.findContours(red,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n (redCenter, redEllipse) = drawEllipse(contours, \"Red\")\n # cv2.drawContours(img, contours, -1 , (0,0,255), 2)\n\n\n #Tracking the Blue Color\n (contours,hierarchy)=cv2.findContours(blue,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n # cv2.drawContours(img, contours, -1 , (255,0,0), 2)\n (blueCenter, blueEllipse) = drawEllipse(contours, \"Blue\")\n\n b1 = b2\n b2 = blueCenter\n bDelta = math.sqrt((b2[0] - b1[0])**2 + (b2[1] - b1[1])**2)\n bVelocity = bDelta * fps / 100\n if (bVelocity - currentBlueVelocity) > 10:\n cv2.putText(img,str(int(bVelocity)),(10, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0), 2)\n else:\n cv2.putText(img,str(int(currentBlueVelocity)),(10, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0), 2)\n currentBlueVelocity = bVelocity\n\n r1 = r2\n r2 = redCenter\n rDelta = math.sqrt((r2[0] - r1[0])**2 + (r2[1] - r1[1])**2)\n rVelocity = rDelta * fps / 100\n if (rVelocity - currentRedVelocity) > 10:\n cv2.putText(img,str(int(rVelocity)),(70, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2)\n else:\n cv2.putText(img,str(int(currentRedVelocity)),(70, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2)\n currentRedVelocity = rVelocity\n\n\n for i in range(len(drums)):\n # print(booli)\n booli[i] = detectCollision(blueEllipse, drums[i][1], currentBlueVelocity, booli[i], \"{0}.wav\".format(drums[i][0]))\n # blueAndSnare = detectCollision(blueEllipse, drums[0][1], blueAndSnare, \"snare.wav\")\n # blueAndHiHat = detectCollision(blueEllipse, drums[1][1], blueAndHiHat, \"hi_hat.wav\")\n\n # blueAndSnare = detectCollision(blueEllipse, snare_image, blueAndSnare, \"snare.wav\")\n # blueAndHiHat = detectCollision(blueEllipse, hi_hat_image, blueAndHiHat, \"Closed-Hi-Hat.wav\")\n #\n # redAndSnare = detectCollision(redEllipse, snare_image, redAndSnare, \"snare.wav\")\n # redAndHiHat = detectCollision(redEllipse, hi_hat_image, redAndHiHat, \"Closed-Hi-Hat.wav\")\n\n\n\n #cv2.imshow(\"Redcolour\",red)\n cv2.imshow(\"Color Tracking\",img)\n #cv2.imshow(\"red\",res)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
d0a9a59e92bdc2934e90dfd13b98cc0d11ac67d6
890,088
ipynb
Jupyter Notebook
Jupyter_notebooks_and_data/Final Unit 2 Build Week Model.ipynb
willstauffernorris/boat_data
efd33dcf40155fcf8701269201d36b018a2249e0
[ "MIT" ]
null
null
null
Jupyter_notebooks_and_data/Final Unit 2 Build Week Model.ipynb
willstauffernorris/boat_data
efd33dcf40155fcf8701269201d36b018a2249e0
[ "MIT" ]
null
null
null
Jupyter_notebooks_and_data/Final Unit 2 Build Week Model.ipynb
willstauffernorris/boat_data
efd33dcf40155fcf8701269201d36b018a2249e0
[ "MIT" ]
null
null
null
35.204999
59,432
0.530812
[ [ [ "# Can I predict how much people will spend online?\n### Insights from Google Analytics data\n", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport plotly.express as px\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndf = pd.read_csv('mergedGAdata.csv')\nprint(df.shape)", "(3532, 16)\n" ] ], [ [ "# Wrangling", "_____no_output_____" ] ], [ [ "##Turn Session Duration into numbers not strings\nprint(type(df['Session Duration'][0]))\ndf['Session Duration'] = df['Session Duration'].str.replace(' seconds','')\n\ndf['Session Duration'] = pd.to_numeric(df['Session Duration'])\nprint(type(df['Session Duration'][0]))\n\n#display(df.head(5))", "<class 'str'>\n<class 'numpy.int64'>\n" ], [ "### Dropping an unneccesary column\ndf = df.drop(columns=['Unnamed: 0'])\n\n###Turn dates into datetime objects\nprint(type(df['Date'][0]))\ndf['Date'] = pd.to_datetime(df['Date'], format='%Y%m%d')\n##Checking to make sure it worked\nprint(type(df['Date'][0]))\n\ndf['Year']= df['Date'].dt.year\ndf['Month']= df['Date'].dt.month\ndf['Day']= df['Date'].dt.day\n\ndf['Day_of_year'] = ((df['Month']*30.333-30).round(0) + df['Day'])\n\n\n\n#X_train['year'] = X_train['date_cleaned'].dt.year\n\n## Get rid of a pesky NaN in the final observation\ndf.drop(df.tail(1).index,inplace=True)\n\nprint(df.shape)\n\n#df.head(30)", "<class 'numpy.int64'>\n<class 'pandas._libs.tslibs.timestamps.Timestamp'>\n(3531, 19)\n" ], [ "## Drop transaction ID\ndf = df.drop(columns=['Transaction ID'])\n\n## Dropping the datetime object becuase it breaks my Random Forest\ndf = df.drop(columns=['Date'])", "_____no_output_____" ] ], [ [ "# Exploratory Visualizations", "_____no_output_____" ] ], [ [ "\nfig = px.bar(df, x='Day_of_year', y='Revenue', \n #color=\"Session Duration\"\n )\nfig.show()", "_____no_output_____" ], [ "matplotlib_figure = plt.figure()\nx = df['Day_of_year']\ny = df['Revenue']\nplt.plot(x, y);", "_____no_output_____" ], [ "from plotly.tools import mpl_to_plotly\nplotly_figure = mpl_to_plotly(matplotlib_figure)\nplotly_figure.show()", "_____no_output_____" ], [ "import plotly.express as px\nfig = px.scatter(df, x='Page Depth', y='Revenue', \n #color=\"Session Duration\"\n )\n\nfig.show()", "_____no_output_____" ] ], [ [ "# Building a model", "_____no_output_____" ] ], [ [ "## Checking to make sure none of my data types will break the models\ndf.dtypes", "_____no_output_____" ] ], [ [ "# Break into Test/Train ", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "train, test = train_test_split(df, train_size=0.80, test_size=0.20, \n #stratify=df['Product'], \n random_state=42)\n\nprint(train.shape)\ntest.shape", "(2824, 17)\n" ], [ "# Arrange data into X features matrix and y target vector\ntarget = 'Revenue'\n\n#model_deploy = ['Day_of_year', 'Page Depth', 'Session Duration']\nX_train = train.drop(columns=target)\ny_train = train[target]\n\nX_test = test.drop(columns=target)\ny_test = test[target]", "_____no_output_____" ], [ "sns.distplot(y_train);\n## My target variable, 'Revenue' has quite a bimodal distribution", "_____no_output_____" ] ], [ [ "# Baselines\n", "_____no_output_____" ] ], [ [ "from sklearn.metrics import mean_absolute_error\nprice = df['Revenue']\nbest_guess = price.mean()\nbest_guess = [best_guess] * len(price)\nbaseline_mae = mean_absolute_error(price, best_guess)\nprint(f'The Baseline mean absolute error is ${baseline_mae.round(2)}')", "The Baseline mean absolute error is $813.25\n" ] ], [ [ "## A Linear Regression", "_____no_output_____" ] ], [ [ "import category_encoders as ce\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nlr = make_pipeline(\n ce.TargetEncoder(), \n LinearRegression()\n)\n\nlr.fit(X_train, y_train)\nprint('Linear Regression R^2', lr.score(X_test, y_test))\n\ny_pred = lr.predict(X_test)\nprint(f'The Baseline mean absolute error is ${baseline_mae.round(2)}')\nmae = mean_absolute_error(y_test, y_pred)\nprint(f'The linear regression mean absolute error is ${mae.round(2)}')\n", "Linear Regression R^2 0.0005382190419163102\nThe Baseline mean absolute error is $813.25\nThe linear regression mean absolute error is $759.96\n" ], [ "'''\nRESULTS\nPass 1\nLinear Regression R^2 -0.00442905637790969\nGradient Boosting R^2 -0.023036898195452293\n\nPass2\nLinear Regression R^2 -0.004429056377912577\nGradient Boosting R^2 0.07102524633751028\n\nPass3\nLinear Regression R^2 0.002162738520710472\nGradient Boosting R^2 0.08770212107051312\n\nPass4 (with only 3 features)\nLinear Regression R^2 0.02979757194722665\nGradient Boosting R^2 0.076352681199784\n\nPass5 (with all features)\nLinear Regression R^2 0.0005382190419163102\nGradient Boosting R^2 0.07698263724426613\n'''", "_____no_output_____" ] ], [ [ "# XGBoost Model", "_____no_output_____" ] ], [ [ "from sklearn.metrics import r2_score\nfrom xgboost import XGBRegressor\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom scipy.stats import randint\n\npipeline = make_pipeline(\n ce.OrdinalEncoder(), \n XGBRegressor(objective='reg:squarederror')\n)\n\n\nparam_distributions = {\n 'xgbregressor__n_estimators': randint(50, 500)\n}\n\n\nsearch = RandomizedSearchCV(\n pipeline, \n param_distributions=param_distributions, \n n_iter=10, \n cv=10, \n scoring='neg_mean_absolute_error', \n verbose=10, \n return_train_score=True, \n n_jobs=-1\n)\n\n\nsearch.fit(X_train, y_train)\n\n\nprint('Best hyperparameters', search.best_params_)\nprint('Cross-validation MAE', -search.best_score_)\n\n\npipeline = search.best_estimator_\n\n\npipeline.fit(X_train, y_train)\ny_pred = pipeline.predict(X_test)\nprint('Gradient Boosting R^2', r2_score(y_test, y_pred))\n\nmae = mean_absolute_error(y_test, y_pred)\n\nprint(f'The Baseline mean absolute error is ${baseline_mae.round(2)}')\nprint(f'Model MAE ${mae.round(2)}')", "Fitting 10 folds for each of 10 candidates, totalling 100 fits\n" ], [ "# Get feature importances\nrf = pipeline.named_steps['xgbregressor']\nimportances = pd.Series(rf.feature_importances_, X_train.columns)\n\n# Plot feature importances\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\nn = 20\nplt.figure(figsize=(10,n/2))\nplt.title(f'Top {n} features')\nimportances.sort_values()[-n:].plot.barh(color='grey');", "_____no_output_____" ] ], [ [ "## Permutation Importances", "_____no_output_____" ] ], [ [ "transformers = make_pipeline(\n ce.OrdinalEncoder(), \n #SimpleImputer(strategy='median')\n)\n\nX_train_transformed = transformers.fit_transform(X_train)\nX_test_transformed = transformers.transform(X_test)\n\nmodel = XGBRegressor(n_estimators=100, random_state=42, n_jobs=-1)\nmodel.fit(X_train_transformed, y_train)", "[09:04:48] WARNING: /usr/local/miniconda/conda-bld/xgboost_1572315027083/work/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n" ], [ "# Ignore warnings\n\nimport eli5\nfrom eli5.sklearn import PermutationImportance\n\npermuter = PermutationImportance(\n model, \n #scoring='accuracy', \n n_iter=5, \n random_state=42\n)\n\npermuter.fit(X_test_transformed, y_test)", "_____no_output_____" ], [ "feature_names = X_test.columns.tolist()\n#pd.Series(permuter.feature_importances_, feature_names).sort_values()", "_____no_output_____" ], [ "eli5.show_weights(\n permuter, \n top=None, # No limit: show permutation importances for all features\n feature_names=feature_names) # must be a list", "_____no_output_____" ] ], [ [ "# Partial dependence plot", "_____no_output_____" ] ], [ [ "from pdpbox.pdp import pdp_isolate, pdp_plot\n\nfeature = 'Page Depth'\n\nisolated = pdp_isolate(\n model=pipeline,\n dataset=X_test,\n model_features=X_test.columns,\n feature=feature)\npdp_plot(isolated, feature_name=feature);", "_____no_output_____" ], [ "feature = 'Session Duration'\n\nisolated = pdp_isolate(\n model=pipeline,\n dataset=X_test,\n model_features=X_test.columns,\n feature=feature)\npdp_plot(isolated, feature_name=feature);", "_____no_output_____" ], [ "feature = 'Day_of_year'\n\nisolated = pdp_isolate(\n model=pipeline,\n dataset=X_test,\n model_features=X_test.columns,\n feature=feature)", "_____no_output_____" ], [ "pdp_plot(isolated, feature_name=feature);", "_____no_output_____" ], [ "\nfeature = 'Days Since Last Session'\n\nisolated = pdp_isolate(\n model=pipeline,\n dataset=X_test,\n model_features=X_test.columns,\n feature=feature)\npdp_plot(isolated, feature_name=feature);\n\n", "_____no_output_____" ] ], [ [ "# Rebuilding the model with only a few features\n### This is so I can deploy a much simpler model with fewer sliders on Heroku\n", "_____no_output_____" ] ], [ [ "# Arrange data into X features matrix and y target vector\ntarget = 'Revenue'\n\nmodel_deploy = ['Day_of_year', 'Page Depth', 'Session Duration']\nX_train = train[model_deploy]\ny_train = train[target]\n\nX_test = test[model_deploy]\ny_test = test[target]", "_____no_output_____" ], [ "from sklearn.metrics import r2_score\nfrom xgboost import XGBRegressor\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom scipy.stats import randint\n\npipeline = make_pipeline(\n ce.OrdinalEncoder(), \n XGBRegressor(objective='reg:squarederror')\n)\n\n\nparam_distributions = {\n 'xgbregressor__n_estimators': randint(50, 500)\n}\n\n\nsearch = RandomizedSearchCV(\n pipeline, \n param_distributions=param_distributions, \n n_iter=10, \n cv=10, \n scoring='neg_mean_absolute_error', \n verbose=10, \n return_train_score=True, \n n_jobs=-1\n)\n\n\nsearch.fit(X_train, y_train)\n\n\nprint('Best hyperparameters', search.best_params_)\nprint('Cross-validation MAE', -search.best_score_)\n\n\npipeline = search.best_estimator_\n\n\npipeline.fit(X_train, y_train)\ny_pred = pipeline.predict(X_test)\nprint('Gradient Boosting R^2', r2_score(y_test, y_pred))\n\nmae = mean_absolute_error(y_test, y_pred)\n\nprint(f'The Baseline mean absolute error is ${baseline_mae.round(2)}')\nprint(f'Model MAE ${mae.round(2)}')", "Fitting 10 folds for each of 10 candidates, totalling 100 fits\n" ] ], [ [ "# Exporting a .joblib file for my Heroku model", "_____no_output_____" ] ], [ [ "from joblib import dump\ndump(pipeline, 'pipeline.joblib', compress=True)", "_____no_output_____" ], [ "import joblib\nimport sklearn\nimport category_encoders as ce\nimport xgboost\nprint(f'joblib=={joblib.__version__}')\nprint(f'scikit-learn=={sklearn.__version__}')\nprint(f'category_encoders=={ce.__version__}')\nprint(f'xgboost=={xgboost.__version__}')\n", "joblib==0.14.1\nscikit-learn==0.22.1\ncategory_encoders==2.1.0\nxgboost==0.90\n" ] ], [ [ "# Further data exploration- things I could do:", "_____no_output_____" ], [ "### Add in more features by exporting more csv's from Google Analytics\n\n### Make more visualizations\n\n### Try replacing 0's in 'session duration' with NaNs\n\n### Make some kind of feature that describes if a product was purchased together with another product.\n(This psuedocode could tell if somebody placed a follow up order the same day)\n\nif (df['Date'][i]==df['Date'][i+1]) & (df['Transaction ID'][i]==df['Transaction ID']+1[i+1]):\n return True\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
d0a9c17ffdee721a39942cb260292608c6528fb2
75,706
ipynb
Jupyter Notebook
pyspark_parquet_to_csv.ipynb
ebrunk/iPy_notebooks
9f2abd54945ab8660fd1b381bc45b1e8dedc3f83
[ "MIT" ]
null
null
null
pyspark_parquet_to_csv.ipynb
ebrunk/iPy_notebooks
9f2abd54945ab8660fd1b381bc45b1e8dedc3f83
[ "MIT" ]
null
null
null
pyspark_parquet_to_csv.ipynb
ebrunk/iPy_notebooks
9f2abd54945ab8660fd1b381bc45b1e8dedc3f83
[ "MIT" ]
null
null
null
49.839368
228
0.508612
[ [ [ "## Imports and simple case example in pyspark", "_____no_output_____" ] ], [ [ "import pyspark\nfrom pyspark import SparkContext\nfrom pyspark.sql.types import *\nfrom pyspark import sql", "_____no_output_____" ], [ "## simple example - imports flat file\n\nfrom pyspark import SparkContext\n\nlogFile = \"/home/ebrunk/Dropbox/SBRG/CHO/ribo_prof/Database/command\" # Should be some file on your system\nsc = SparkContext(\"local\", \"Simple App\")\nlogData = sc.textFile(logFile).cache()\n\nnumAs = logData.filter(lambda s: 'a' in s).count()\nnumBs = logData.filter(lambda s: 'b' in s).count()\n\nprint \"Lines with a: %i, lines with b: %i\" % (numAs, numBs)", "Lines with a: 1, lines with b: 0\n" ], [ "os.chdir(\"/home/ebrunk/Dropbox/SBRG/RECON3D/fatcat/domain_clustering/uniprotpdb/\")", "_____no_output_____" ] ], [ [ "## gets uniprot to pdb mapping df & human genome mapping", "_____no_output_____" ] ], [ [ "!pwd", "/home/ebrunk/Dropbox/SBRG/RECON3D/fatcat/domain_clustering/uniprotpdb\r\n" ], [ "!wget -r http://dataframes.rcsb.org/parquet/uniprotpdb/20160517/", "--2016-06-14 22:41:07-- http://dataframes.rcsb.org/parquet/uniprotpdb/20160517/\nResolving dataframes.rcsb.org (dataframes.rcsb.org)... 132.249.213.101\nConnecting to dataframes.rcsb.org (dataframes.rcsb.org)|132.249.213.101|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 1727 (1.7K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html’\n\n100%[======================================>] 1,727 --.-K/s in 0s \n\n2016-06-14 22:41:07 (325 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html’ saved [1727/1727]\n\nLoading robots.txt; please ignore errors.\n--2016-06-14 22:41:07-- http://dataframes.rcsb.org/robots.txt\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 404 Not Found\n2016-06-14 22:41:07 ERROR 404: Not Found.\n\n--2016-06-14 22:41:07-- http://dataframes.rcsb.org/icons/blank.gif\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 148 [image/gif]\nSaving to: ‘dataframes.rcsb.org/icons/blank.gif’\n\n100%[======================================>] 148 --.-K/s in 0s \n\n2016-06-14 22:41:07 (50.5 MB/s) - ‘dataframes.rcsb.org/icons/blank.gif’ saved [148/148]\n\n--2016-06-14 22:41:07-- http://dataframes.rcsb.org/parquet/uniprotpdb/20160517/?C=N;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1727 (1.7K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=N;O=D’\n\n100%[======================================>] 1,727 --.-K/s in 0s \n\n2016-06-14 22:41:07 (332 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=N;O=D’ saved [1727/1727]\n\n--2016-06-14 22:41:07-- http://dataframes.rcsb.org/parquet/uniprotpdb/20160517/?C=M;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1727 (1.7K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=M;O=A’\n\n100%[======================================>] 1,727 --.-K/s in 0s \n\n2016-06-14 22:41:07 (599 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=M;O=A’ saved [1727/1727]\n\n--2016-06-14 22:41:07-- http://dataframes.rcsb.org/parquet/uniprotpdb/20160517/?C=S;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1727 (1.7K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=S;O=A’\n\n100%[======================================>] 1,727 --.-K/s in 0s \n\n2016-06-14 22:41:07 (611 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=S;O=A’ saved [1727/1727]\n\n--2016-06-14 22:41:07-- http://dataframes.rcsb.org/parquet/uniprotpdb/20160517/?C=D;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1727 (1.7K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=D;O=A’\n\n100%[======================================>] 1,727 --.-K/s in 0s \n\n2016-06-14 22:41:07 (600 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=D;O=A’ saved [1727/1727]\n\n--2016-06-14 22:41:07-- http://dataframes.rcsb.org/icons/back.gif\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 216 [image/gif]\nSaving to: ‘dataframes.rcsb.org/icons/back.gif’\n\n100%[======================================>] 216 --.-K/s in 0s \n\n2016-06-14 22:41:07 (92.9 MB/s) - ‘dataframes.rcsb.org/icons/back.gif’ saved [216/216]\n\n--2016-06-14 22:41:07-- http://dataframes.rcsb.org/parquet/uniprotpdb/\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 978 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html’\n\n100%[======================================>] 978 --.-K/s in 0s \n\n2016-06-14 22:41:07 (358 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html’ saved [978/978]\n\n--2016-06-14 22:41:07-- http://dataframes.rcsb.org/icons/unknown.gif\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 245 [image/gif]\nSaving to: ‘dataframes.rcsb.org/icons/unknown.gif’\n\n100%[======================================>] 245 --.-K/s in 0s \n\n2016-06-14 22:41:07 (91.2 MB/s) - ‘dataframes.rcsb.org/icons/unknown.gif’ saved [245/245]\n\n--2016-06-14 22:41:07-- http://dataframes.rcsb.org/parquet/uniprotpdb/20160517/_SUCCESS\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 0\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/_SUCCESS’\n\n [ <=> ] 0 --.-K/s in 0s \n\n2016-06-14 22:41:07 (0.00 B/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/_SUCCESS’ saved [0/0]\n\n--2016-06-14 22:41:07-- http://dataframes.rcsb.org/parquet/uniprotpdb/20160517/_common_metadata\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 672\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/_common_metadata’\n\n100%[======================================>] 672 --.-K/s in 0s \n\n2016-06-14 22:41:07 (256 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/_common_metadata’ saved [672/672]\n\n--2016-06-14 22:41:07-- http://dataframes.rcsb.org/parquet/uniprotpdb/20160517/_metadata\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1439 (1.4K)\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/_metadata’\n\n100%[======================================>] 1,439 --.-K/s in 0s \n\n2016-06-14 22:41:07 (523 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/_metadata’ saved [1439/1439]\n\n--2016-06-14 22:41:07-- http://dataframes.rcsb.org/parquet/uniprotpdb/20160517/part-r-00000-04f7101a-c0a5-4d6d-b703-3b1ace2b2f76.snappy.parquet\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 52394007 (50M)\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/part-r-00000-04f7101a-c0a5-4d6d-b703-3b1ace2b2f76.snappy.parquet’\n\n100%[======================================>] 52,394,007 21.1MB/s in 2.4s \n\n2016-06-14 22:41:09 (21.1 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/part-r-00000-04f7101a-c0a5-4d6d-b703-3b1ace2b2f76.snappy.parquet’ saved [52394007/52394007]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/uniprotpdb/20160517/?C=N;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1727 (1.7K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=N;O=A’\n\n100%[======================================>] 1,727 --.-K/s in 0s \n\n2016-06-14 22:41:09 (745 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=N;O=A’ saved [1727/1727]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/uniprotpdb/20160517/?C=M;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1727 (1.7K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=M;O=D’\n\n100%[======================================>] 1,727 --.-K/s in 0s \n\n2016-06-14 22:41:09 (809 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=M;O=D’ saved [1727/1727]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/uniprotpdb/20160517/?C=S;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1727 (1.7K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=S;O=D’\n\n100%[======================================>] 1,727 --.-K/s in 0s \n\n2016-06-14 22:41:09 (928 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=S;O=D’ saved [1727/1727]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/uniprotpdb/20160517/?C=D;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1727 (1.7K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=D;O=D’\n\n100%[======================================>] 1,727 --.-K/s in 0s \n\n2016-06-14 22:41:09 (815 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/20160517/index.html?C=D;O=D’ saved [1727/1727]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/uniprotpdb/?C=N;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 978 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=N;O=D’\n\n100%[======================================>] 978 --.-K/s in 0s \n\n2016-06-14 22:41:09 (458 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=N;O=D’ saved [978/978]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/uniprotpdb/?C=M;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 978 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=M;O=A’\n\n100%[======================================>] 978 --.-K/s in 0s \n\n2016-06-14 22:41:09 (536 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=M;O=A’ saved [978/978]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/uniprotpdb/?C=S;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 978 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=S;O=A’\n\n100%[======================================>] 978 --.-K/s in 0s \n\n2016-06-14 22:41:09 (474 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=S;O=A’ saved [978/978]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/uniprotpdb/?C=D;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 978 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=D;O=A’\n\n100%[======================================>] 978 --.-K/s in 0s \n\n2016-06-14 22:41:09 (456 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=D;O=A’ saved [978/978]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1156 (1.1K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/index.html’\n\n100%[======================================>] 1,156 --.-K/s in 0s \n\n2016-06-14 22:41:09 (539 MB/s) - ‘dataframes.rcsb.org/parquet/index.html’ saved [1156/1156]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/icons/folder.gif\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 225 [image/gif]\nSaving to: ‘dataframes.rcsb.org/icons/folder.gif’\n\n100%[======================================>] 225 --.-K/s in 0s \n\n2016-06-14 22:41:09 (104 MB/s) - ‘dataframes.rcsb.org/icons/folder.gif’ saved [225/225]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/uniprotpdb/?C=N;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 978 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=N;O=A’\n\n100%[======================================>] 978 --.-K/s in 0s \n\n2016-06-14 22:41:09 (461 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=N;O=A’ saved [978/978]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/uniprotpdb/?C=M;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 978 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=M;O=D’\n\n100%[======================================>] 978 --.-K/s in 0s \n\n2016-06-14 22:41:09 (464 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=M;O=D’ saved [978/978]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/uniprotpdb/?C=S;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 978 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=S;O=D’\n\n100%[======================================>] 978 --.-K/s in 0s \n\n2016-06-14 22:41:09 (468 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=S;O=D’ saved [978/978]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/uniprotpdb/?C=D;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 978 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=D;O=D’\n\n100%[======================================>] 978 --.-K/s in 0s \n\n2016-06-14 22:41:09 (464 MB/s) - ‘dataframes.rcsb.org/parquet/uniprotpdb/index.html?C=D;O=D’ saved [978/978]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/?C=N;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1156 (1.1K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/index.html?C=N;O=D’\n\n100%[======================================>] 1,156 --.-K/s in 0s \n\n2016-06-14 22:41:09 (543 MB/s) - ‘dataframes.rcsb.org/parquet/index.html?C=N;O=D’ saved [1156/1156]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/?C=M;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1156 (1.1K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/index.html?C=M;O=A’\n\n100%[======================================>] 1,156 --.-K/s in 0s \n\n2016-06-14 22:41:09 (526 MB/s) - ‘dataframes.rcsb.org/parquet/index.html?C=M;O=A’ saved [1156/1156]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/?C=S;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1156 (1.1K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/index.html?C=S;O=A’\n\n100%[======================================>] 1,156 --.-K/s in 0s \n\n2016-06-14 22:41:09 (545 MB/s) - ‘dataframes.rcsb.org/parquet/index.html?C=S;O=A’ saved [1156/1156]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/?C=D;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1156 (1.1K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/index.html?C=D;O=A’\n\n100%[======================================>] 1,156 --.-K/s in 0s \n\n2016-06-14 22:41:09 (569 MB/s) - ‘dataframes.rcsb.org/parquet/index.html?C=D;O=A’ saved [1156/1156]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 11475 (11K) [text/html]\nSaving to: ‘dataframes.rcsb.org/index.html’\n\n100%[======================================>] 11,475 --.-K/s in 0s \n\n2016-06-14 22:41:09 (86.4 MB/s) - ‘dataframes.rcsb.org/index.html’ saved [11475/11475]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 980 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/index.html’\n\n100%[======================================>] 980 --.-K/s in 0s \n\n2016-06-14 22:41:09 (466 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/index.html’ saved [980/980]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/?C=N;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1156 (1.1K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/index.html?C=N;O=A’\n\n100%[======================================>] 1,156 --.-K/s in 0s \n\n2016-06-14 22:41:09 (538 MB/s) - ‘dataframes.rcsb.org/parquet/index.html?C=N;O=A’ saved [1156/1156]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/?C=M;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1156 (1.1K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/index.html?C=M;O=D’\n\n100%[======================================>] 1,156 --.-K/s in 0s \n\n2016-06-14 22:41:09 (545 MB/s) - ‘dataframes.rcsb.org/parquet/index.html?C=M;O=D’ saved [1156/1156]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/?C=S;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1156 (1.1K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/index.html?C=S;O=D’\n\n100%[======================================>] 1,156 --.-K/s in 0s \n\n2016-06-14 22:41:09 (542 MB/s) - ‘dataframes.rcsb.org/parquet/index.html?C=S;O=D’ saved [1156/1156]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/?C=D;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1156 (1.1K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/index.html?C=D;O=D’\n\n100%[======================================>] 1,156 --.-K/s in 0s \n\n2016-06-14 22:41:09 (533 MB/s) - ‘dataframes.rcsb.org/parquet/index.html?C=D;O=D’ saved [1156/1156]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 301 Moved Permanently\nLocation: http://dataframes.rcsb.org/parquet/ [following]\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1156 (1.1K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet.1’\n\n100%[======================================>] 1,156 --.-K/s in 0s \n\n2016-06-14 22:41:09 (558 MB/s) - ‘dataframes.rcsb.org/parquet.1’ saved [1156/1156]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/orc\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 301 Moved Permanently\nLocation: http://dataframes.rcsb.org/orc/ [following]\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/orc/\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 742 [text/html]\nSaving to: ‘dataframes.rcsb.org/orc’\n\n100%[======================================>] 742 --.-K/s in 0s \n\n2016-06-14 22:41:09 (368 MB/s) - ‘dataframes.rcsb.org/orc’ saved [742/742]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/?C=N;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 980 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=N;O=D’\n\n100%[======================================>] 980 --.-K/s in 0s \n\n2016-06-14 22:41:09 (564 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=N;O=D’ saved [980/980]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/?C=M;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 980 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=M;O=A’\n\n100%[======================================>] 980 --.-K/s in 0s \n\n2016-06-14 22:41:09 (486 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=M;O=A’ saved [980/980]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/?C=S;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 980 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=S;O=A’\n\n100%[======================================>] 980 --.-K/s in 0s \n\n2016-06-14 22:41:09 (530 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=S;O=A’ saved [980/980]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/?C=D;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 980 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=D;O=A’\n\n100%[======================================>] 980 --.-K/s in 0s \n\n2016-06-14 22:41:09 (493 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=D;O=A’ saved [980/980]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/20160517/\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1192 (1.2K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/index.html’\n\n100%[======================================>] 1,192 --.-K/s in 0s \n\n2016-06-14 22:41:09 (567 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/index.html’ saved [1192/1192]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/orc/?C=N;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 742 [text/html]\nSaving to: ‘dataframes.rcsb.org/orc/index.html?C=N;O=D’\n\n100%[======================================>] 742 --.-K/s in 0s \n\n2016-06-14 22:41:09 (316 MB/s) - ‘dataframes.rcsb.org/orc/index.html?C=N;O=D’ saved [742/742]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/orc/?C=M;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 742 [text/html]\nSaving to: ‘dataframes.rcsb.org/orc/index.html?C=M;O=A’\n\n100%[======================================>] 742 --.-K/s in 0s \n\n2016-06-14 22:41:09 (359 MB/s) - ‘dataframes.rcsb.org/orc/index.html?C=M;O=A’ saved [742/742]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/orc/?C=S;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 742 [text/html]\nSaving to: ‘dataframes.rcsb.org/orc/index.html?C=S;O=A’\n\n100%[======================================>] 742 --.-K/s in 0s \n\n2016-06-14 22:41:09 (437 MB/s) - ‘dataframes.rcsb.org/orc/index.html?C=S;O=A’ saved [742/742]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/orc/?C=D;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 742 [text/html]\nSaving to: ‘dataframes.rcsb.org/orc/index.html?C=D;O=A’\n\n100%[======================================>] 742 --.-K/s in 0s \n\n2016-06-14 22:41:09 (370 MB/s) - ‘dataframes.rcsb.org/orc/index.html?C=D;O=A’ saved [742/742]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/?C=N;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 980 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=N;O=A’\n\n100%[======================================>] 980 --.-K/s in 0s \n\n2016-06-14 22:41:09 (476 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=N;O=A’ saved [980/980]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/?C=M;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 980 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=M;O=D’\n\n100%[======================================>] 980 --.-K/s in 0s \n\n2016-06-14 22:41:09 (482 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=M;O=D’ saved [980/980]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/?C=S;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 980 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=S;O=D’\n\n100%[======================================>] 980 --.-K/s in 0s \n\n2016-06-14 22:41:09 (495 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=S;O=D’ saved [980/980]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/?C=D;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 980 [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=D;O=D’\n\n100%[======================================>] 980 --.-K/s in 0s \n\n2016-06-14 22:41:09 (457 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/index.html?C=D;O=D’ saved [980/980]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/20160517/?C=N;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1192 (1.2K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/index.html?C=N;O=D’\n\n100%[======================================>] 1,192 --.-K/s in 0s \n\n2016-06-14 22:41:09 (578 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/index.html?C=N;O=D’ saved [1192/1192]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/20160517/?C=M;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1192 (1.2K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/index.html?C=M;O=A’\n\n100%[======================================>] 1,192 --.-K/s in 0s \n\n2016-06-14 22:41:09 (572 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/index.html?C=M;O=A’ saved [1192/1192]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/20160517/?C=S;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1192 (1.2K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/index.html?C=S;O=A’\n\n100%[======================================>] 1,192 --.-K/s in 0s \n\n2016-06-14 22:41:09 (579 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/index.html?C=S;O=A’ saved [1192/1192]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/20160517/?C=D;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1192 (1.2K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/index.html?C=D;O=A’\n\n100%[======================================>] 1,192 --.-K/s in 0s \n\n2016-06-14 22:41:09 (592 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/index.html?C=D;O=A’ saved [1192/1192]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg37/\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 5609 (5.5K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg37/index.html’\n\n100%[======================================>] 5,609 --.-K/s in 0s \n\n2016-06-14 22:41:09 (1.66 GB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg37/index.html’ saved [5609/5609]\n\n--2016-06-14 22:41:09-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 5417 (5.3K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/index.html’\n\n100%[======================================>] 5,417 --.-K/s in 0s \n\n2016-06-14 22:41:09 (2.25 GB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/index.html’ saved [5417/5417]\n\nFINISHED --2016-06-14 22:41:09--\nTotal wall clock time: 2.5s\nDownloaded: 58 files, 50M in 2.4s (21.1 MB/s)\n" ], [ "!mv dataframes.rcsb.org/* 20160517/.\n!mv 20160517/parquet/uniprotpdb/20160517/* 20160517/.\n!rm -rf 20160517/", "_____no_output_____" ], [ "!ls 20160517/", "_common_metadata\r\nindex.html\r\nindex.html?C=D;O=A\r\nindex.html?C=D;O=D\r\nindex.html?C=M;O=A\r\nindex.html?C=M;O=D\r\nindex.html?C=N;O=A\r\nindex.html?C=N;O=D\r\nindex.html?C=S;O=A\r\nindex.html?C=S;O=D\r\n_metadata\r\npart-r-00000-04f7101a-c0a5-4d6d-b703-3b1ace2b2f76.snappy.parquet\r\n_SUCCESS\r\n" ], [ "!wget -r --no-parent http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/", "--2016-06-14 22:43:42-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/\nResolving dataframes.rcsb.org (dataframes.rcsb.org)... 132.249.213.101\nConnecting to dataframes.rcsb.org (dataframes.rcsb.org)|132.249.213.101|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 5165 (5.0K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html’\n\n100%[======================================>] 5,165 --.-K/s in 0s \n\n2016-06-14 22:43:42 (951 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html’ saved [5165/5165]\n\nLoading robots.txt; please ignore errors.\n--2016-06-14 22:43:42-- http://dataframes.rcsb.org/robots.txt\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 404 Not Found\n2016-06-14 22:43:42 ERROR 404: Not Found.\n\n--2016-06-14 22:43:42-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/?C=N;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 5165 (5.0K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=N;O=D’\n\n100%[======================================>] 5,165 --.-K/s in 0s \n\n2016-06-14 22:43:42 (1.53 GB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=N;O=D’ saved [5165/5165]\n\n--2016-06-14 22:43:42-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/?C=M;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 5165 (5.0K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=M;O=A’\n\n100%[======================================>] 5,165 --.-K/s in 0s \n\n2016-06-14 22:43:42 (1.61 GB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=M;O=A’ saved [5165/5165]\n\n--2016-06-14 22:43:42-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/?C=S;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 5165 (5.0K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=S;O=A’\n\n100%[======================================>] 5,165 --.-K/s in 0s \n\n2016-06-14 22:43:42 (2.11 GB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=S;O=A’ saved [5165/5165]\n\n--2016-06-14 22:43:42-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/?C=D;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 5165 (5.0K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=D;O=A’\n\n100%[======================================>] 5,165 --.-K/s in 0s \n\n2016-06-14 22:43:42 (2.24 GB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=D;O=A’ saved [5165/5165]\n\n--2016-06-14 22:43:42-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/_SUCCESS\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 0\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/_SUCCESS’\n\n [ <=> ] 0 --.-K/s in 0s \n\n2016-06-14 22:43:42 (0.00 B/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/_SUCCESS’ saved [0/0]\n\n--2016-06-14 22:43:42-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/_common_metadata\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 1372 (1.3K)\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/_common_metadata’\n\n100%[======================================>] 1,372 --.-K/s in 0s \n\n2016-06-14 22:43:42 (711 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/_common_metadata’ saved [1372/1372]\n\n--2016-06-14 22:43:42-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/_metadata\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 22870 (22K)\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/_metadata’\n\n100%[======================================>] 22,870 --.-K/s in 0.001s \n\n2016-06-14 22:43:42 (21.2 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/_metadata’ saved [22870/22870]\n\n--2016-06-14 22:43:42-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00000-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 41131721 (39M)\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00000-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’\n\n100%[======================================>] 41,131,721 91.2MB/s in 0.4s \n\n2016-06-14 22:43:43 (91.2 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00000-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’ saved [41131721/41131721]\n\n--2016-06-14 22:43:43-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00001-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 37922994 (36M)\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00001-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’\n\n100%[======================================>] 37,922,994 100MB/s in 0.4s \n\n2016-06-14 22:43:43 (100 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00001-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’ saved [37922994/37922994]\n\n--2016-06-14 22:43:43-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00002-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 69418738 (66M)\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00002-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’\n\n100%[======================================>] 69,418,738 98.6MB/s in 0.7s \n\n2016-06-14 22:43:44 (98.6 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00002-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’ saved [69418738/69418738]\n\n--2016-06-14 22:43:44-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00003-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 31746582 (30M)\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00003-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’\n\n100%[======================================>] 31,746,582 99.7MB/s in 0.3s \n\n2016-06-14 22:43:44 (99.7 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00003-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’ saved [31746582/31746582]\n\n--2016-06-14 22:43:44-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00004-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 28284636 (27M)\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00004-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’\n\n100%[======================================>] 28,284,636 95.9MB/s in 0.3s \n\n2016-06-14 22:43:44 (95.9 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00004-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’ saved [28284636/28284636]\n\n--2016-06-14 22:43:44-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00005-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 46103077 (44M)\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00005-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’\n\n100%[======================================>] 46,103,077 96.7MB/s in 0.5s \n\n2016-06-14 22:43:45 (96.7 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00005-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’ saved [46103077/46103077]\n\n--2016-06-14 22:43:45-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00006-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 74192388 (71M)\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00006-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’\n\n100%[======================================>] 74,192,388 95.0MB/s in 0.7s \n\n2016-06-14 22:43:45 (95.0 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00006-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’ saved [74192388/74192388]\n\n--2016-06-14 22:43:45-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00007-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 36477321 (35M)\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00007-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’\n\n100%[======================================>] 36,477,321 97.3MB/s in 0.4s \n\n2016-06-14 22:43:46 (97.3 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00007-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’ saved [36477321/36477321]\n\n--2016-06-14 22:43:46-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00008-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 27625511 (26M)\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00008-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’\n\n100%[======================================>] 27,625,511 93.4MB/s in 0.3s \n\n2016-06-14 22:43:46 (93.4 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00008-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’ saved [27625511/27625511]\n\n--2016-06-14 22:43:46-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00009-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 32280478 (31M)\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00009-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’\n\n100%[======================================>] 32,280,478 97.0MB/s in 0.3s \n\n2016-06-14 22:43:46 (97.0 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00009-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’ saved [32280478/32280478]\n\n--2016-06-14 22:43:46-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00010-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 25681505 (24M)\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00010-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’\n\n100%[======================================>] 25,681,505 97.4MB/s in 0.3s \n\n2016-06-14 22:43:47 (97.4 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00010-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’ saved [25681505/25681505]\n\n--2016-06-14 22:43:47-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00011-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 20590806 (20M)\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00011-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’\n\n100%[======================================>] 20,590,806 94.5MB/s in 0.2s \n\n2016-06-14 22:43:47 (94.5 MB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-00011-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet’ saved [20590806/20590806]\n\n--2016-06-14 22:43:47-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/?C=N;O=A\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 5165 (5.0K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=N;O=A’\n\n100%[======================================>] 5,165 --.-K/s in 0s \n\n2016-06-14 22:43:47 (1.82 GB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=N;O=A’ saved [5165/5165]\n\n--2016-06-14 22:43:47-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/?C=M;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 5165 (5.0K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=M;O=D’\n\n100%[======================================>] 5,165 --.-K/s in 0s \n\n2016-06-14 22:43:47 (1.68 GB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=M;O=D’ saved [5165/5165]\n\n--2016-06-14 22:43:47-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/?C=S;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 5165 (5.0K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=S;O=D’\n\n100%[======================================>] 5,165 --.-K/s in 0s \n\n2016-06-14 22:43:47 (1.81 GB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=S;O=D’ saved [5165/5165]\n\n--2016-06-14 22:43:47-- http://dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/?C=D;O=D\nReusing existing connection to dataframes.rcsb.org:80.\nHTTP request sent, awaiting response... 200 OK\nLength: 5165 (5.0K) [text/html]\nSaving to: ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=D;O=D’\n\n100%[======================================>] 5,165 --.-K/s in 0s \n\n2016-06-14 22:43:47 (2.07 GB/s) - ‘dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/index.html?C=D;O=D’ saved [5165/5165]\n\nFINISHED --2016-06-14 22:43:47--\nTotal wall clock time: 4.8s\nDownloaded: 24 files, 450M in 4.7s (96.4 MB/s)\n" ], [ "!ls dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/", "_common_metadata\r\nindex.html\r\nindex.html?C=D;O=A\r\nindex.html?C=D;O=D\r\nindex.html?C=M;O=A\r\nindex.html?C=M;O=D\r\nindex.html?C=N;O=A\r\nindex.html?C=N;O=D\r\nindex.html?C=S;O=A\r\nindex.html?C=S;O=D\r\n_metadata\r\npart-r-00000-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\r\npart-r-00001-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\r\npart-r-00002-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\r\npart-r-00003-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\r\npart-r-00004-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\r\npart-r-00005-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\r\npart-r-00006-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\r\npart-r-00007-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\r\npart-r-00008-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\r\npart-r-00009-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\r\npart-r-00010-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\r\npart-r-00011-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\r\n_SUCCESS\r\n" ] ], [ [ "## Load data into df", "_____no_output_____" ] ], [ [ "sqlContext = pyspark.SQLContext(sc)\nuniprotPDB = sqlContext.load(\"/home/ebrunk/Dropbox/SBRG/RECON3D/fatcat/domain_clustering/uniprotpdb/20160517/part-r-00000-04f7101a-c0a5-4d6d-b703-3b1ace2b2f76.snappy.parquet\")\nuniprotPDB.registerTempTable(\"uniprotPDB\")", "_____no_output_____" ], [ "uniprotPDB.show(1)", "+-------+-------+----------+-----+---------+----------+\n|chainId|insCode|pdbAtomPos|pdbId|uniProtId|uniProtPos|\n+-------+-------+----------+-----+---------+----------+\n| A| null| 0| 101M| P02185| 1|\n+-------+-------+----------+-----+---------+----------+\nonly showing top 1 row\n\n" ], [ "chr11 = sqlContext.load(\"/home/ebrunk/Dropbox/SBRG/RECON3D/fatcat/domain_clustering/uniprotpdb/dataframes.rcsb.org/parquet/humangenome/20160517/hg38/chr11/part-r-000*-3b4d8124-bea8-4b62-844c-cb51354aae17.snappy.parquet\")", "_____no_output_____" ], [ "chr11.show(1)", "+---+--------------+-------+--------+----------+--------+-----+--------------------------+---------+-----------+-----+--------+---------+----------+\n|cds|chromosomeName|exonNum|geneName|geneSymbol|inCoding|inUtr|isoformCorrectedUniprotPos|isoformNr|orientation|phase|position|uniProtId|uniProtPos|\n+---+--------------+-------+--------+----------+--------+-----+--------------------------+---------+-----------+-----+--------+---------+----------+\n| -1| chr11| -1| AAMDC| AAMDC| false| true| -1| 0| +| 0|77821113| Q9H7C9| -1|\n+---+--------------+-------+--------+----------+--------+-----+--------------------------+---------+-----------+-----+--------+---------+----------+\nonly showing top 1 row\n\n" ] ], [ [ "## Perform mapping", "_____no_output_____" ] ], [ [ "chr11.registerTempTable(\"chr11\")\n\nsickeCellSNP = sqlContext.sql(\"select * from chr11 where position = 5227002\")\n\nsickeCellSNP.show()", "+---+--------------+-------+--------+----------+--------+-----+--------------------------+---------+-----------+-----+--------+---------+----------+\n|cds|chromosomeName|exonNum|geneName|geneSymbol|inCoding|inUtr|isoformCorrectedUniprotPos|isoformNr|orientation|phase|position|uniProtId|uniProtPos|\n+---+--------------+-------+--------+----------+--------+-----+--------------------------+---------+-----------+-----+--------+---------+----------+\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7|\n+---+--------------+-------+--------+----------+--------+-----+--------------------------+---------+-----------+-----+--------+---------+----------+\n\n" ], [ "sickeCellSNP.registerTempTable(\"snp\")\n\n# join genomic info with UniProt to PDB mapping\nmap2PDB = sqlContext.sql(\"select * from snp left join uniprotPDB where snp.uniProtId = uniprotPDB.uniProtId and snp.uniProtPos = uniprotPDB.uniProtPos \")\nmap2PDB.show()", "+---+--------------+-------+--------+----------+--------+-----+--------------------------+---------+-----------+-----+--------+---------+----------+-------+-------+----------+-----+---------+----------+\n|cds|chromosomeName|exonNum|geneName|geneSymbol|inCoding|inUtr|isoformCorrectedUniprotPos|isoformNr|orientation|phase|position|uniProtId|uniProtPos|chainId|insCode|pdbAtomPos|pdbId|uniProtId|uniProtPos|\n+---+--------------+-------+--------+----------+--------+-----+--------------------------+---------+-----------+-----+--------+---------+----------+-------+-------+----------+-----+---------+----------+\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| B| null| 6| 1A00| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| D| null| 6| 1A00| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| B| null| 6| 1A01| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| D| null| 6| 1A01| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| B| null| 6| 1A0U| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| D| null| 6| 1A0U| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| B| null| 6| 1A0Z| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| D| null| 6| 1A0Z| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| B| null| 6| 1A3N| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| D| null| 6| 1A3N| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| B| null| 6| 1A3O| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| D| null| 6| 1A3O| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| B| null| 6| 1ABW| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| D| null| 6| 1ABW| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| B| null| 6| 1ABY| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| D| null| 6| 1ABY| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| B| null| 6| 1AJ9| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| B| null| 149| 1B86| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| D| null| 549| 1B86| P68871| 7|\n| 20| chr11| 3| HBB| HBB| false|false| 7| 0| -| 1| 5227002| P68871| 7| B| null| 6| 1BAB| P68871| 7|\n+---+--------------+-------+--------+----------+--------+-----+--------------------------+---------+-----------+-----+--------+---------+----------+-------+-------+----------+-----+---------+----------+\nonly showing top 20 rows\n\n" ] ], [ [ "## save df to pandas", "_____no_output_____" ] ], [ [ "type(map2PDB)", "_____no_output_____" ], [ "map2PDB_pandas = map2PDB.toPandas()", "_____no_output_____" ], [ "type(map2PDB_pandas)", "_____no_output_____" ], [ "map2PDB_pandas.head()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d0a9cf62eba5533e0622d6a9f3ebd4ef9f79e3be
24,416
ipynb
Jupyter Notebook
code/02_modeling/ScoringAndEvaluation.ipynb
Azure/MachineLearningSamples-TDSPUCIAdultIncome
ed6e61ffb83b758d1e21e2ea95fa1808a797f4a5
[ "MIT" ]
6
2017-12-13T18:38:33.000Z
2018-09-12T14:57:09.000Z
code/02_modeling/ScoringAndEvaluation.ipynb
Azure-Samples/MachineLearningSamples-TDSPUCIAdultIncome
ed6e61ffb83b758d1e21e2ea95fa1808a797f4a5
[ "MIT" ]
4
2017-10-26T11:01:16.000Z
2018-05-14T16:57:49.000Z
code/02_modeling/ScoringAndEvaluation.ipynb
Azure/MachineLearningSamples-TDSPUCIAdultIncome
ed6e61ffb83b758d1e21e2ea95fa1808a797f4a5
[ "MIT" ]
6
2017-10-18T13:30:07.000Z
2018-10-25T02:13:32.000Z
190.75
21,334
0.906291
[ [ [ "## IMPORT LIBRARIES", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas\nimport pickle\nfrom ScoringAndEvaluation import Get_Class_Probabilities, Evaluate_Predictions\n%matplotlib inline \n\n", "_____no_output_____" ] ], [ [ "## SET FILE LOCATION, LOCATION TO TEST DATA AND MODEL FILE", "_____no_output_____" ] ], [ [ "dirpath = '..\\\\..\\\\outputs'\n\n# Transformed test data for scoring\ntransformed_test_file = dirpath + \"\\\\test_data_engineered.pkl\"\ninFile = open(transformed_test_file, 'rb')\ntestDataFrame = pickle.load(inFile)\ninFile.close()\n\n## Model files\nRandomForest_model_file = dirpath + '\\\\CVRandomForestModel.pkl'\nElasticNet_model_file = dirpath + '\\\\CVElasticNetModel.pkl'\n\n## PDF files for output of ROC plots\nRandomForestROCplotpath = dirpath + '\\\\RandomForestROCCurve.pdf'\nElasticNetROCplotpath = dirpath + '\\\\ElasticNetROCCurve.pdf'", "_____no_output_____" ] ], [ [ "## PERFORM SCORING AND EVALUATION", "_____no_output_____" ] ], [ [ "# LOAD TEST DATA\ny_test = testDataFrame[\"income\"].values\nX_test = testDataFrame.drop(\"income\", axis=1)\n\n# RANDOMFOREST MODEL\ny_pred = Get_Class_Probabilities(transformed_test_file, RandomForest_model_file);\nRFAuc = Evaluate_Predictions(y_pred, y_test, RandomForestROCplotpath)\nprint (\"Random Forest AUC: \" + str(round(RFAuc, 3)))\n\n# ELASTICNET MODEL\ny_pred = Get_Class_Probabilities(transformed_test_file, ElasticNet_model_file);\nEnetAuc = Evaluate_Predictions(y_pred, y_test, ElasticNetROCplotpath)\nprint (\"ElasticNet AUC: \" + str(round(EnetAuc, 3)))\n", "Random Forest AUC: 0.917\nElasticNet AUC: 0.897\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0a9cfaaceeaa6a31c6c7287e87abab56bd08156
133,086
ipynb
Jupyter Notebook
3.4-classifying-movie-reviews.ipynb
alro923/deep-learning-with-python-notebooks
73d5f39f5c02b31e9b825d51c0060a40868be14a
[ "MIT" ]
null
null
null
3.4-classifying-movie-reviews.ipynb
alro923/deep-learning-with-python-notebooks
73d5f39f5c02b31e9b825d51c0060a40868be14a
[ "MIT" ]
null
null
null
3.4-classifying-movie-reviews.ipynb
alro923/deep-learning-with-python-notebooks
73d5f39f5c02b31e9b825d51c0060a40868be14a
[ "MIT" ]
null
null
null
40.811408
15,400
0.565672
[ [ [ "import keras\nkeras.__version__", "_____no_output_____" ] ], [ [ "# 영화 리뷰 분류: 이진 분류 예제\n\n이 노트북은 [케라스 창시자에게 배우는 딥러닝](https://tensorflow.blog/케라스-창시자에게-배우는-딥러닝/) 책의 3장 4절의 코드 예제입니다. 책에는 더 많은 내용과 그림이 있습니다. 이 노트북에는 소스 코드에 관련된 설명만 포함합니다. 이 노트북의 설명은 케라스 버전 2.2.2에 맞추어져 있습니다. 케라스 최신 버전이 릴리스되면 노트북을 다시 테스트하기 때문에 설명과 코드의 결과가 조금 다를 수 있습니다. (현재 내가 사용하는 것은 케라스 버전 2.3.1 이다.)\n\n----\n\n2종 분류 또는 이진 분류는 아마도 가장 널리 적용된 머신 러닝 문제일 것입니다. 이 예제에서 리뷰 텍스트를 기반으로 영화 리뷰를 긍정과 부정로 분류하는 법을 배우겠습니다.", "_____no_output_____" ], [ "## IMDB 데이터셋\n\n인터넷 영화 데이터베이스로부터 가져온 양극단의 리뷰 50,000개로 이루어진 IMDB 데이터셋을 사용하겠습니다. 이 데이터셋은 훈련 데이터 25,000개와 테스트 데이터 25,000개로 나뉘어 있고 각각 50%는 부정, 50%는 긍정 리뷰로 구성되어 있습니다.\n\n왜 훈련 데이터와 테스트 데이터를 나눌까요? 같은 데이터에서 머신 러닝 모델을 훈련하고 테스트해서는 절대 안 되기 때문입니다! 모델이 훈련 데이터에서 잘 작동한다는 것이 처음 만난 데이터에서도 잘 동작한다는 것을 보장하지 않습니다. 중요한 것은 새로운 데이터에 대한 모델의 성능입니다(사실 훈련 데이터의 레이블은 이미 알고 있기 때문에 이를 예측하는 모델은 필요하지 않습니다). 예를 들어 모델이 훈련 샘플과 타깃 사이의 매핑을 모두 외워버릴 수 있습니다. 이런 모델은 처음 만나는 데이터에서 타깃을 예측하는 작업에는 쓸모가 없습니다. 다음 장에서 이에 대해 더 자세히 살펴보겠습니다.\n\nMNIST 데이터셋처럼 IMDB 데이터셋도 케라스에 포함되어 있습니다. 이 데이터는 전처리되어 있어 각 리뷰(단어 시퀀스)가 숫자 시퀀스로 변환되어 있습니다. 여기서 각 숫자는 사전에 있는 고유한 단어를 나타냅니다.\n\n다음 코드는 데이터셋을 로드합니다(처음 실행하면 17MB 정도의 데이터가 컴퓨터에 다운로드됩니다):", "_____no_output_____" ] ], [ [ "from keras.datasets import imdb\n\n(train_data, train_labels), (test_data, test_labels), = imdb.load_data(num_words=10000)", "_____no_output_____" ] ], [ [ "매개변수 `num_words=10000`은 훈련 데이터에서 가장 자주 나타나는 단어 10,000개만 사용하겠다는 의미입니다. 드물게 나타나는 단어는 무시하겠습니다. 이렇게 하면 적절한 크기의 벡터 데이터를 얻을 수 있습니다.\n\n변수 `train_data`와 `test_data`는 리뷰의 목록입니다. 각 리뷰는 단어 인덱스의 리스트입니다(단어 시퀀스가 인코딩된 것입니다). `train_labels`와 `test_labels`는 부정을 나타내는 0과 긍정을 나타내는 1의 리스트입니다:\n\nimdb 데이터셋은 train set 25000개, text set 25000개의 샘플을 제공한다. 라벨은 1과 0의 좋다, 싫다로 지정되어있다. 케라스에서 제공하는 imdb의 load_data() 함수를 이용하면 데이터 셋을 쉽게 얻을 수 있다. 데이터셋은 이미 정수로 인코딩되어 있고, 정수값은 단어의 빈도수를 나타낸다. 위의 코드는 가장 빈도수가 높게 나타난 단어 10000개로 데이터셋을 만든 것이다.", "_____no_output_____" ] ], [ [ "train_data", "_____no_output_____" ] ], [ [ "train_data는 array 안에 list가 여러개 들어있는 형태.\narray(list0, list1, list2, ... , list24999) 총 25000개 있다", "_____no_output_____" ] ], [ [ "train_labels", "_____no_output_____" ], [ "train_data[0]", "_____no_output_____" ], [ "train_labels[0]", "_____no_output_____" ] ], [ [ "가장 자주 등장하는 단어 10,000개로 제한했기 때문에 단어 인덱스는 10,000을 넘지 않습니다:", "_____no_output_____" ] ], [ [ "max([max(sequence) for sequence in train_data])", "_____no_output_____" ] ], [ [ "재미 삼아 이 리뷰 데이터 하나를 원래 영어 단어로 어떻게 바꾸는지 보겠습니다:", "_____no_output_____" ] ], [ [ "# word_index는 단어와 정수 인덱스를 매핑한 딕셔너리입니다\nword_index = imdb.get_word_index()\n# 정수 인덱스와 단어를 매핑하도록 뒤집습니다\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n# 리뷰를 디코딩합니다. \n# 0, 1, 2는 '패딩', '문서 시작', '사전에 없음'을 위한 인덱스이므로 3을 뺍니다\ndecoded_review = ' '.join([reverse_word_index.get(i - 3, '?') for i in train_data[0]])\n# 딕셔너리 reverse_word_index에서 key 값이 i-3인 value를 가져와서 ' '를 구분자로 하여 가져오는데, 없으면 '?'를 default로 돌려준다. 이때 key값에 사용되는 i는 train_data[0] 리스트에 있는 정수들", "_____no_output_____" ], [ "word_index", "_____no_output_____" ], [ "reverse_word_index\n# word_index에 있는 items를 가져와서, 거기에 있는 key, value를 value, key 로 딕셔너리화 한 것이다.", "_____no_output_____" ], [ "decoded_review", "_____no_output_____" ] ], [ [ "## 데이터 준비\n\n신경망에 숫자 리스트를 주입할 수는 없습니다. 리스트를 텐서로 바꾸는 두 가지 방법이 있습니다:\n\n* 같은 길이가 되도록 리스트에 패딩을 추가하고 `(samples, sequence_length)` 크기의 정수 텐서로 변환합니다. 그다음 이 정수 텐서를 다룰 수 있는 층을 신경망의 첫 번째 층으로 사용합니다(`Embedding` 층을 말하며 나중에 자세히 다루겠습니다).\n* 리스트를 원-핫 인코딩하여 0과 1의 벡터로 변환합니다. 예를 들면 시퀀스 `[3, 5]`를 인덱스 3과 5의 위치는 1이고 그 외는 모두 0인 10,000차원의 벡터로 각각 변환합니다. 그다음 부동 소수 벡터 데이터를 다룰 수 있는 `Dense` 층을 신경망의 첫 번째 층으로 사용합니다.\n* 원-핫 인코딩은 단어 집합의 크기를 벡터의 차원으로 하고, 표현하고 싶은 단어의 인덱스에 1의 값을 부여하고, 다른 인덱스에는 0을 부여하는 단어의 벡터 표현 방식이다. 이렇게 표현된 벡터를 원-핫 벡터(One-hot vector)라고 한다.\n\n여기서는 두 번째 방식을 사용하고 이해를 돕기 위해 직접 데이터를 원-핫 벡터로 만들겠습니다:", "_____no_output_____" ] ], [ [ "import numpy as np\n\ndef vectorize_sequences(sequences, dimension=10000):\n # 크기가 (len(sequences), dimension))이고 모든 원소가 0인 행렬을 만듭니다\n results = np.zeros((len(sequences), dimension))\n for i, sequence in enumerate(sequences):\n # 리스트 sequences 의 순서 i, 리스트값 sequence를 가져온다.\n results[i, sequence] = 1. # results[i]에서 특정 인덱스의 위치를 1로 만듭니다\n return results\n\n# 훈련 데이터를 벡터로 변환합니다\nx_train = vectorize_sequences(train_data)\n# 테스트 데이터를 벡터로 변환합니다\nx_test = vectorize_sequences(test_data)", "_____no_output_____" ] ], [ [ "이제 샘플은 다음과 같이 나타납니다:", "_____no_output_____" ] ], [ [ "x_train[0]", "_____no_output_____" ], [ "x_train[0].shape", "_____no_output_____" ] ], [ [ "레이블은 쉽게 벡터로 바꿀 수 있습니다:", "_____no_output_____" ] ], [ [ "# 레이블을 벡터로 바꿉니다\ny_train = np.asarray(train_labels).astype('float32')\ny_test = np.asarray(test_labels).astype('float32')", "_____no_output_____" ] ], [ [ "이제 신경망에 주입할 데이터가 준비되었습니다.", "_____no_output_____" ], [ "## 신경망 모델 만들기\n\n입력 데이터가 벡터이고 레이블은 스칼라(1 또는 0)입니다. 아마 앞으로 볼 수 있는 문제 중에서 가장 간단할 것입니다. 이런 문제에 잘 작동하는 네트워크 종류는 `relu` 활성화 함수를 사용한 완전 연결 층(즉, `Dense(16, activation='relu')`)을 그냥 쌓은 것입니다.\n\n`Dense` 층에 전달한 매개변수(16)는 은닉 유닛의 개수입니다. 하나의 은닉 유닛은 층이 나타내는 표현 공간에서 하나의 차원이 됩니다. 2장에서 `relu` 활성화 함수를 사용한 `Dense` 층을 다음과 같은 텐서 연산을 연결하여 구현하였습니다:\n\n`output = relu(dot(W, input) + b)`\n\n16개의 은닉 유닛이 있다는 것은 가중치 행렬 `W`의 크기가 `(input_dimension, 16)`이라는 뜻입니다. 입력 데이터와 `W`를 점곱하면 입력 데이터가 16 차원으로 표현된 공간으로 투영됩니다(그리고 편향 벡터 `b`를 더하고 `relu` 연산을 적용합니다). 표현 공간의 차원을 '신경망이 내재된 표현을 학습할 때 가질 수 있는 자유도'로 이해할 수 있습니다. <b>은닉 유닛을 늘리면 (표현 공간을 더 고차원으로 만들면) 신경망이 더욱 복잡한 표현을 학습할 수 있지만 계산 비용이 커지고 원치 않은 패턴을 학습할 수도 있습니다(훈련 데이터에서는 성능이 향상되지만 테스트 데이터에서는 그렇지 않은 패턴입니다).</b>\n\n`Dense` 층을 쌓을 때 두 가진 중요한 구조상의 결정이 필요합니다:\n\n* 얼마나 많은 층을 사용할 것인가\n* 각 층에 얼마나 많은 은닉 유닛을 둘 것인가\n\n4장에서 이런 결정을 하는 데 도움이 되는 일반적인 원리를 배우겠습니다. 당분간은 저를 믿고 선택한 다음 구조를 따라 주세요.\n\n* 16개의 은닉 유닛을 가진 두 개의 은닉층\n* 현재 리뷰의 감정을 스칼라 값의 예측으로 출력하는 세 번째 층\n\n중간에 있는 은닉층은 활성화 함수로 `relu`를 사용하고 마지막 층은 확률(0과 1 사이의 점수로, 어떤 샘플이 타깃 '1'일 가능성이 높다는 것은 그 리뷰가 긍정일 가능성이 높다는 것을 의미합니다)을 출력하기 위해 시그모이드 활성화 함수를 사용합니다. `relu`는 음수를 0으로 만드는 함수입니다. 시그모이드는 임의의 값을 [0, 1] 사이로 압축하므로 출력 값을 확률처럼 해석할 수 있습니다.", "_____no_output_____" ], [ "다음이 이 신경망의 모습입니다:\n\n![3-layer network](https://s3.amazonaws.com/book.keras.io/img/ch3/3_layer_network.png)", "_____no_output_____" ], [ "다음은 이 신경망의 케라스 구현입니다. 이전에 보았던 MNIST 예제와 비슷합니다:", "_____no_output_____" ] ], [ [ "from keras import models\nfrom keras import layers\n\nmodel = models.Sequential()\nmodel.add(layers.Dense(16, activation='relu', input_shape=(10000,)))\nmodel.add(layers.Dense(16, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))", "_____no_output_____" ] ], [ [ "마지막으로 손실 함수와 옵티마이저를 선택해야 합니다. 이진 분류 문제이고 신경망의 출력이 확률이기 때문에(네트워크의 끝에 시그모이드 활성화 함수를 사용한 하나의 유닛으로 된 층을 놓았습니다), `binary_crossentropy` 손실이 적합합니다. 이 함수가 유일한 선택은 아니고 예를 들어 `mean_squared_error`를 사용할 수도 있습니다. 확률을 출력하는 모델을 사용할 때는 크로스엔트로피가 최선의 선택입니다. 크로스엔트로피는 정보 이론 분야에서 온 개념으로 확률 분포 간의 차이를 측정합니다. 여기에서는 원본 분포와 예측 분포 사이를 측정합니다.\n\n다음은 `rmsprop` 옵티마이저와 `binary_crossentropy` 손실 함수로 모델을 설정하는 단계입니다. 훈련하는 동안 정확도를 사용해 모니터링하겠습니다.", "_____no_output_____" ] ], [ [ "model.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n# 옵티마이저를 이와 같이 이름으로 사용하면, 해당 옵티마이저의 기본 설정이 사용됩니다.", "_____no_output_____" ] ], [ [ "케라스에 `rmsprop`, `binary_crossentropy`, `accuracy`가 포함되어 있기 때문에 옵티마이저, 손실 함수, 측정 지표를 문자열로 지정하는 것이 가능합니다. 이따금 옵티마이저의 매개변수를 바꾸거나 자신만의 손실 함수, 측정 함수를 전달해야 할 경우가 있습니다. 전자의 경우에는 옵티마이저 파이썬 클래스를 사용해 객체를 직접 만들어 `optimizer` 매개변수에 전달하면 됩니다:", "_____no_output_____" ] ], [ [ "from keras import optimizers\n\nmodel.compile(optimizer=optimizers.RMSprop(lr=0.001),\n loss='binary_crossentropy',\n metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "후자의 경우는 `loss`와 `metrics` 매개변수에 함수 객체를 전달하면 됩니다\n", "_____no_output_____" ] ], [ [ "from keras import losses\nfrom keras import metrics\n\nmodel.compile(optimizer=optimizers.RMSprop(lr=0.001),\n loss=losses.binary_crossentropy,\n metrics=[metrics.binary_accuracy])", "_____no_output_____" ], [ "model.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "# 잘 모르겠다 다시 해보자.", "_____no_output_____" ] ], [ [ "# 옵티마이저의 이름을 사용하는 경우 : 기본 설정이 사용된다.\nmodel.compile(optimizer='sgd', loss='mean_squared_error', metrics=[metrics.binary_accuracy])", "_____no_output_____" ] ], [ [ "아 여기서 metrics=[metrics.binary_accuracy] 를 안했어서 아래부분에서 acc KeyError 난 거임", "_____no_output_____" ] ], [ [ "# 옵티마이저의 매개변수를 바꾸거나\n# 자신만의 손실 함수, 측정함수를 전달하기 위해서\n# 객체 (여기서는 sgd)를 직접 만들어서, model.compile()의 매개변수로 전달해서 쓸 수도 있다.\nsgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(optimizer=sgd, loss='mean_squared_error', metrics=['acc'])", "_____no_output_____" ] ], [ [ "여기에서도 accuracy 안넣어서 KeyError 났음", "_____no_output_____" ] ], [ [ "metrics=['acc']로 하면 history에서 key 가 acc, val_acc 로 바뀐다", "_____no_output_____" ] ], [ [ "* loss == 손실함수 : 학습을 통해 직접적으로 줄이고자 하는 값 (loss(손실) = error(에러) = cost(코스트))\n* metrics == 측정함수 : 학습을 통해 목표를 얼마나 잘(못) 달성했는지를 나타내는 값, metric(척도)\n* 머신러닝의 최종 목표는 척도로 달성률을 표시하지만,\n* 직접 척도를 낮추도록 훈련하는 것은 어렵기때문에 손실을 줄이는 방향으로 훈련한다.", "_____no_output_____" ], [ "## 훈련 검증\n\n훈련하는 동안 처음 본 데이터에 대한 모델의 정확도를 측정하기 위해서는 원본 훈련 데이터에서 10,000의 샘플을 떼어서 검증 세트를 만들어야 합니다:", "_____no_output_____" ] ], [ [ "x_val = x_train[:10000]\n# 리스트 x_train에서 index 값이 0 이상 10000미만인 값들을 떼어내서 리스트 x_val을 만든다.\npartial_x_train = x_train[10000:]\n# 리스트 x_train에서 index 값이 10000 이상인 값들을 떼어내서 리스트 partial_x_train을 만든다.\n\ny_val = y_train[:10000]\npartial_y_train = y_train[10000:]", "_____no_output_____" ] ], [ [ "이제 모델을 512개 샘플씩 미니 배치를 만들어 20번의 에포크 동안 훈련시킵니다(`x_train`과 `y_train` 텐서에 있는 모든 샘플에 대해 20번 반복합니다). 동시에 따로 떼어 놓은 10,000개의 샘플에서 손실과 정확도를 측정할 것입니다. 이렇게 하려면 `validation_data` 매개변수에 검증 데이터를 전달해야 합니다:", "_____no_output_____" ] ], [ [ "history = model.fit(partial_x_train,\n partial_y_train,\n epochs=20,\n batch_size=512,\n validation_data=(x_val, y_val))", "Train on 15000 samples, validate on 10000 samples\nEpoch 1/20\n15000/15000 [==============================] - 2s 130us/step - loss: 0.0327 - acc: 0.9714 - val_loss: 0.0905 - val_acc: 0.8768\nEpoch 2/20\n15000/15000 [==============================] - 2s 119us/step - loss: 0.0319 - acc: 0.9725 - val_loss: 0.0907 - val_acc: 0.8767\nEpoch 3/20\n15000/15000 [==============================] - 2s 120us/step - loss: 0.0311 - acc: 0.9735 - val_loss: 0.0912 - val_acc: 0.8775\nEpoch 4/20\n15000/15000 [==============================] - 2s 120us/step - loss: 0.0302 - acc: 0.9739 - val_loss: 0.0913 - val_acc: 0.8755\nEpoch 5/20\n15000/15000 [==============================] - 2s 118us/step - loss: 0.0296 - acc: 0.9749 - val_loss: 0.0915 - val_acc: 0.8763\nEpoch 6/20\n15000/15000 [==============================] - 2s 120us/step - loss: 0.0288 - acc: 0.9764 - val_loss: 0.0925 - val_acc: 0.8764\nEpoch 7/20\n15000/15000 [==============================] - 2s 119us/step - loss: 0.0281 - acc: 0.9765 - val_loss: 0.0920 - val_acc: 0.8757\nEpoch 8/20\n15000/15000 [==============================] - 2s 118us/step - loss: 0.0274 - acc: 0.9778 - val_loss: 0.0926 - val_acc: 0.8755\nEpoch 9/20\n15000/15000 [==============================] - 2s 120us/step - loss: 0.0267 - acc: 0.9792 - val_loss: 0.0928 - val_acc: 0.8751\nEpoch 10/20\n15000/15000 [==============================] - 2s 117us/step - loss: 0.0260 - acc: 0.9800 - val_loss: 0.0929 - val_acc: 0.8750\nEpoch 11/20\n15000/15000 [==============================] - 2s 117us/step - loss: 0.0254 - acc: 0.9800 - val_loss: 0.0935 - val_acc: 0.8759\nEpoch 12/20\n15000/15000 [==============================] - 2s 118us/step - loss: 0.0247 - acc: 0.9812 - val_loss: 0.0943 - val_acc: 0.8734\nEpoch 13/20\n15000/15000 [==============================] - 2s 118us/step - loss: 0.0244 - acc: 0.9809 - val_loss: 0.0942 - val_acc: 0.8736\nEpoch 14/20\n15000/15000 [==============================] - 2s 117us/step - loss: 0.0237 - acc: 0.9818 - val_loss: 0.0942 - val_acc: 0.8747\nEpoch 15/20\n15000/15000 [==============================] - 2s 117us/step - loss: 0.0232 - acc: 0.9827 - val_loss: 0.0944 - val_acc: 0.8753\nEpoch 16/20\n15000/15000 [==============================] - 2s 117us/step - loss: 0.0227 - acc: 0.9834 - val_loss: 0.0947 - val_acc: 0.8748\nEpoch 17/20\n15000/15000 [==============================] - 2s 118us/step - loss: 0.0221 - acc: 0.9837 - val_loss: 0.0950 - val_acc: 0.8747\nEpoch 18/20\n15000/15000 [==============================] - 2s 119us/step - loss: 0.0217 - acc: 0.9843 - val_loss: 0.0953 - val_acc: 0.8745\nEpoch 19/20\n15000/15000 [==============================] - 2s 118us/step - loss: 0.0212 - acc: 0.9849 - val_loss: 0.0958 - val_acc: 0.8738\nEpoch 20/20\n15000/15000 [==============================] - 2s 119us/step - loss: 0.0208 - acc: 0.9853 - val_loss: 0.0962 - val_acc: 0.8729\n" ] ], [ [ "CPU를 사용해도 에포크마다 2초가 걸리지 않습니다. 전체 훈련은 20초 이상 걸립니다. 에포크가 끝날 때마다 10,000개의 검증 샘플 데이터에서 손실과 정확도를 계산하기 때문에 약간씩 지연됩니다.\n\n`model.fit()` 메서드는 `History` 객체를 반환합니다. 이 객체는 훈련하는 동안 발생한 모든 정보를 담고 있는 딕셔너리인 `history` 속성을 가지고 있습니다. 한 번 확인해 보죠:", "_____no_output_____" ] ], [ [ "history_dict = history.history\nhistory_dict.keys()", "_____no_output_____" ] ], [ [ "이 딕셔너리는 훈련과 검증하는 동안 모니터링할 측정 지표당 하나씩 모두 네 개의 항목을 담고 있습니다. 맷플롯립을 사용해 훈련과 검증 데이터에 대한 손실과 정확도를 그려 보겠습니다:", "_____no_output_____" ] ], [ [ "print(history.history['acc'])", "[0.9714, 0.97253335, 0.9734667, 0.97386664, 0.9748667, 0.9764, 0.97653335, 0.9778, 0.9792, 0.98, 0.98, 0.9812, 0.9808667, 0.9818, 0.9827333, 0.9834, 0.98373336, 0.98433334, 0.9848667, 0.9853333]\n" ], [ "import matplotlib.pyplot as plt", "_____no_output_____" ], [ "acc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(1, len(acc) + 1)\n\n# ‘bo’는 파란색 점을 의미합니다 (blue dot)\nplt.plot(epochs, loss, 'bo', label='Training loss')\n# ‘b’는 파란색 실선을 의미합니다 (solid blue line)\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\n# legend 는 아래에 나오는 범례\nplt.legend()\n\nplt.show()", "_____no_output_____" ] ], [ [ "val_loss, val_accuracy, loss, accuracy --> val_loss, val_acc, loss, acc 로 업데이트 됨", "_____no_output_____" ] ], [ [ "plt.clf() # 그래프를 초기화합니다\nacc = history_dict['acc']\nval_acc = history_dict['val_acc']\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\n\nplt.show()", "_____no_output_____" ] ], [ [ "점선은 훈련 손실과 정확도이고 실선은 검증 손실과 정확도입니다. 신경망의 무작위한 초기화 때문에 사람마다 결과거 조금 다를 수 있습니다.\n\n여기에서 볼 수 있듯이 훈련 손실이 에포크마다 감소하고 훈련 정확도는 에포크마다 증가합니다. 경사 하강법 최적화를 사용했을 때 반복마다 최소화되는 것이 손실이므로 기대했던 대로입니다. 검증 손실과 정확도는 이와 같지 않습니다. 4번째 에포크에서 그래프가 역전되는 것 같습니다. 이것이 훈련 세트에서 잘 작동하는 모델이 처음 보는 데이터에 잘 작동하지 않을 수 있다고 앞서 언급한 경고의 한 사례입니다. 정확한 용어로 말하면 과대적합되었다고 합니다. 2번째 에포크 이후부터 훈련 데이터에 과도하게 최적화되어 훈련 데이터에 특화된 표현을 학습하므로 훈련 세트 이외의 데이터에는 일반화되지 못합니다.\n\n이런 경우에 과대적합을 방지하기 위해서 3번째 에포크 이후에 훈련을 중지할 수 있습니다. 일반적으로 4장에서 보게 될 과대적합을 완화하는 다양한 종류의 기술을 사용할 수 있습니다.\n\n처음부터 다시 새로운 신경망을 4번의 에포크 동안만 훈련하고 테스트 데이터에서 평가해 보겠습니다:", "_____no_output_____" ] ], [ [ "model = models.Sequential()\nmodel.add(layers.Dense(16, activation='relu', input_shape=(10000,)))\nmodel.add(layers.Dense(16, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nmodel.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(x_train, y_train, epochs=4, batch_size=512)\nresults = model.evaluate(x_test, y_test)", "Epoch 1/4\n25000/25000 [==============================] - 2s 92us/step - loss: 0.4499 - accuracy: 0.8226\nEpoch 2/4\n25000/25000 [==============================] - 2s 77us/step - loss: 0.2586 - accuracy: 0.9100\nEpoch 3/4\n25000/25000 [==============================] - 2s 77us/step - loss: 0.1985 - accuracy: 0.9297\nEpoch 4/4\n25000/25000 [==============================] - 2s 78us/step - loss: 0.1657 - accuracy: 0.9414\n25000/25000 [==============================] - 2s 93us/step\n" ], [ "results", "_____no_output_____" ] ], [ [ "아주 단순한 방식으로도 87%의 정확도를 달성했습니다. 최고 수준의 기법을 사용하면 95%에 가까운 성능을 얻을 수 있습니다.", "_____no_output_____" ], [ "## 훈련된 모델로 새로운 데이터에 대해 예측하기\n\n모델을 훈련시킨 후에 이를 실전 환경에서 사용하고 싶을 것입니다. `predict` 메서드를 사용해서 어떤 리뷰가 긍정일 확률을 예측할 수 있습니다:", "_____no_output_____" ] ], [ [ "model.predict(x_test)", "_____no_output_____" ] ], [ [ "여기에서처럼 이 모델은 어떤 샘플에 대해 확신을 가지고 있지만(0.99 또는 그 이상, 0.01 또는 그 이하) 어떤 샘플에 대해서는 확신이 부족합니다(0.6, 0.4). ", "_____no_output_____" ], [ "## 추가 실험\n\n* 여기에서는 두 개의 은닉층을 사용했습니다. 한 개 또는 세 개의 은닉층을 사용하고 검증과 테스트 정확도에 어떤 영향을 미치는지 확인해 보세요.\n* 층의 은닉 유닛을 추가하거나 줄여 보세요: 32개 유닛, 64개 유닛 등\n* `binary_crossentropy` 대신에 `mse` 손실 함수를 사용해 보세요.\n* `relu` 대신에 `tanh` 활성화 함수(초창기 신경망에서 인기 있었던 함수입니다)를 사용해 보세요.\n\n다음 실험을 진행하면 여기에서 선택한 구조가 향상의 여지는 있지만 어느 정도 납득할 만한 수준이라는 것을 알게 것입니다!", "_____no_output_____" ] ], [ [ "model = models.Sequential()\nmodel.add(layers.Dense(32, activation='tanh', input_shape=(10000,)))\nmodel.add(layers.Dense(32, activation='tanh'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nmodel.compile(optimizer='rmsprop',\n loss='mse',\n metrics=['acc'])\n\nmodel.fit(x_train, y_train, epochs=4, batch_size=512)\nresults = model.evaluate(x_test, y_test)", "Epoch 1/4\n25000/25000 [==============================] - 2s 89us/step - loss: 0.1324 - acc: 0.8223\nEpoch 2/4\n25000/25000 [==============================] - 2s 80us/step - loss: 0.0651 - acc: 0.9148\nEpoch 3/4\n25000/25000 [==============================] - 2s 79us/step - loss: 0.0503 - acc: 0.9344\nEpoch 4/4\n25000/25000 [==============================] - 2s 79us/step - loss: 0.0437 - acc: 0.9454\n25000/25000 [==============================] - 2s 90us/step\n" ], [ "results", "_____no_output_____" ] ], [ [ "## 정리\n\n다음은 이 예제에서 배운 것들입니다:\n\n* 원본 데이터를 신경망에 텐서로 주입하기 위해서는 꽤 많은 전처리가 필요합니다. 단어 시퀀스는 이진 벡터로 인코딩될 수 있고 다른 인코딩 방식도 있습니다.\n* `relu` 활성화 함수와 함께 `Dense` 층을 쌓은 네트워크는 (감성 분류를 포함하여) 여러 종류의 문제에 적용할 수 있어서 앞으로 자주 사용하게 될 것입니다.\n* (출력 클래스가 두 개인) 이진 분류 문제에서 네트워크는 하나의 유닛과 `sigmoid` 활성화 함수를 가진 `Dense` 층으로 끝나야 합니다. 이 신경망의 출력은 확률을 나타내는 0과 1 사이의 스칼라 값입니다.\n* 이진 분류 문제에서 이런 스칼라 시그모이드 출력에 대해 사용할 손실 함수는 `binary_crossentropy`입니다.\n* `rmsprop` 옵티마이저는 문제에 상관없이 일반적으로 충분히 좋은 선택입니다. 걱정할 거리가 하나 줄은 셈입니다.\n* 훈련 데이터에 대해 성능이 향상됨에 따라 신경망은 과대적합되기 시작하고 이전에 본적 없는 데이터에서는 결과가 점점 나빠지게 됩니다. 항상 훈련 세트 이외의 데이터에서 성능을 모니터링해야 합니다.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d0a9d12745f976bc44101f3d64dc7cc80fb55a2b
13,207
ipynb
Jupyter Notebook
bilstm-crf-model/0.44/thainer.ipynb
Sutthipong/thai-ner
a5084661edf70ecd6a649d620255fbe5bed1b417
[ "CC-BY-3.0", "Apache-2.0" ]
20
2018-09-06T06:08:56.000Z
2019-12-10T08:32:12.000Z
bilstm-crf-model/0.44/thainer.ipynb
Sutthipong/thai-ner
a5084661edf70ecd6a649d620255fbe5bed1b417
[ "CC-BY-3.0", "Apache-2.0" ]
3
2019-01-17T03:12:56.000Z
2019-04-07T07:07:58.000Z
bilstm-crf-model/0.44/thainer.ipynb
Sutthipong/thai-ner
a5084661edf70ecd6a649d620255fbe5bed1b417
[ "CC-BY-3.0", "Apache-2.0" ]
11
2020-01-13T06:18:26.000Z
2021-11-03T16:15:57.000Z
35.407507
117
0.535852
[ [ [ "# ThaiNER (Bi-LSTM CRF)\n\nusing pytorch\n\nBy Mr.Wannaphong Phatthiyaphaibun\n\nBachelor of Science Program in Computer and Information Science, Nong Khai Campus, Khon Kaen University\n\nhttps://iam.wannaphong.com/\n\nE-mail : [email protected]", "_____no_output_____" ], [ "Thank you Faculty of Applied Science and Engineering, Nong Khai Campus, Khon Kaen University for server.", "_____no_output_____" ] ], [ [ "import torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.optim as optim\n\nprint(torch.__version__)", "1.0.0\n" ], [ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n#torch.backends.cudnn.benchmark=torch.cuda.is_available()\n\n#FloatTensor = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor\nLongTensor = torch.long\n#ByteTensor = torch.cuda.ByteTensor if USE_CUDA else torch.ByteTensor", "_____no_output_____" ], [ "def argmax(vec):\n # return the argmax as a python int\n _, idx = torch.max(vec, 1)\n return idx.item()\n\n\ndef prepare_sequence(seq, to_ix):\n idxs = [to_ix[w] if w in to_ix else to_ix[\"UNK\"] for w in seq]\n return torch.tensor(idxs, dtype=LongTensor, device=device)\n\n\n# Compute log sum exp in a numerically stable way for the forward algorithm\ndef log_sum_exp(vec):\n max_score = vec[0, argmax(vec)]\n max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])\n return max_score + \\\n torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))", "_____no_output_____" ], [ "class BiLSTM_CRF(nn.Module):\n\n def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):\n super(BiLSTM_CRF, self).__init__()\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n self.vocab_size = vocab_size\n self.tag_to_ix = tag_to_ix\n self.tagset_size = len(tag_to_ix)\n\n self.word_embeds = nn.Embedding(vocab_size, embedding_dim)\n self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,\n num_layers=1, bidirectional=True)\n\n # Maps the output of the LSTM into tag space.\n self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)\n\n # Matrix of transition parameters. Entry i,j is the score of\n # transitioning *to* i *from* j.\n self.transitions = nn.Parameter(\n torch.randn(self.tagset_size, self.tagset_size, device=device))\n\n # These two statements enforce the constraint that we never transfer\n # to the start tag and we never transfer from the stop tag\n self.transitions.data[tag_to_ix[START_TAG], :] = -10000\n self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000\n\n self.hidden = self.init_hidden()\n\n def init_hidden(self):\n return (torch.randn(2, 1, self.hidden_dim // 2,device=device),\n torch.randn(2, 1, self.hidden_dim // 2,device=device))\n\n def _forward_alg(self, feats):\n # Do the forward algorithm to compute the partition function\n init_alphas = torch.full((1, self.tagset_size), -10000., device=device)\n # START_TAG has all of the score.\n init_alphas[0][self.tag_to_ix[START_TAG]] = 0.\n\n # Wrap in a variable so that we will get automatic backprop\n forward_var = init_alphas\n\n # Iterate through the sentence\n for feat in feats:\n alphas_t = [] # The forward tensors at this timestep\n for next_tag in range(self.tagset_size):\n # broadcast the emission score: it is the same regardless of\n # the previous tag\n emit_score = feat[next_tag].view(\n 1, -1).expand(1, self.tagset_size)\n # the ith entry of trans_score is the score of transitioning to\n # next_tag from i\n trans_score = self.transitions[next_tag].view(1, -1)\n # The ith entry of next_tag_var is the value for the\n # edge (i -> next_tag) before we do log-sum-exp\n next_tag_var = forward_var + trans_score + emit_score\n # The forward variable for this tag is log-sum-exp of all the\n # scores.\n alphas_t.append(log_sum_exp(next_tag_var).view(1))\n forward_var = torch.cat(alphas_t).view(1, -1)\n terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]\n alpha = log_sum_exp(terminal_var)\n return alpha\n\n def _get_lstm_features(self, sentence):\n self.hidden = self.init_hidden()\n embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)\n lstm_out, self.hidden = self.lstm(embeds, self.hidden)\n lstm_out = lstm_out.view(len(sentence), self.hidden_dim)\n lstm_feats = self.hidden2tag(lstm_out)\n return lstm_feats\n\n def _score_sentence(self, feats, tags):\n # Gives the score of a provided tag sequence\n score = torch.zeros(1,device=device)\n tags = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=LongTensor, device=device), tags])\n for i, feat in enumerate(feats):\n score = score + \\\n self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]\n score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]\n return score\n\n def _viterbi_decode(self, feats):\n backpointers = []\n\n # Initialize the viterbi variables in log space\n init_vvars = torch.full((1, self.tagset_size), -10000., device=device)\n init_vvars[0][self.tag_to_ix[START_TAG]] = 0\n\n # forward_var at step i holds the viterbi variables for step i-1\n forward_var = init_vvars\n for feat in feats:\n bptrs_t = [] # holds the backpointers for this step\n viterbivars_t = [] # holds the viterbi variables for this step\n\n for next_tag in range(self.tagset_size):\n # next_tag_var[i] holds the viterbi variable for tag i at the\n # previous step, plus the score of transitioning\n # from tag i to next_tag.\n # We don't include the emission scores here because the max\n # does not depend on them (we add them in below)\n next_tag_var = forward_var + self.transitions[next_tag]\n best_tag_id = argmax(next_tag_var)\n bptrs_t.append(best_tag_id)\n viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))\n # Now add in the emission scores, and assign forward_var to the set\n # of viterbi variables we just computed\n forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)\n backpointers.append(bptrs_t)\n\n # Transition to STOP_TAG\n terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]\n best_tag_id = argmax(terminal_var)\n path_score = terminal_var[0][best_tag_id]\n\n # Follow the back pointers to decode the best path.\n best_path = [best_tag_id]\n for bptrs_t in reversed(backpointers):\n best_tag_id = bptrs_t[best_tag_id]\n best_path.append(best_tag_id)\n # Pop off the start tag (we dont want to return that to the caller)\n start = best_path.pop()\n assert start == self.tag_to_ix[START_TAG] # Sanity check\n best_path.reverse()\n return path_score, best_path\n\n def neg_log_likelihood(self, sentence, tags):\n feats = self._get_lstm_features(sentence)\n forward_score = self._forward_alg(feats)\n gold_score = self._score_sentence(feats, tags)\n return forward_score - gold_score\n\n def forward(self, sentence): # dont confuse this with _forward_alg above.\n # Get the emission scores from the BiLSTM\n lstm_feats = self._get_lstm_features(sentence)\n\n # Find the best path, given the features.\n score, tag_seq = self._viterbi_decode(lstm_feats)\n return score, tag_seq", "_____no_output_____" ], [ "START_TAG = \"<START>\"\nSTOP_TAG = \"<STOP>\"\nEMBEDDING_DIM = 64\nHIDDEN_DIM = 128", "_____no_output_____" ], [ "import dill\nwith open('word_to_ix.pkl', 'rb') as file:\n word_to_ix = dill.load(file)\nwith open('pos_to_ix.pkl', 'rb') as file:\n pos_to_ix = dill.load(file)", "_____no_output_____" ], [ "ix_to_word = dict((v,k) for k,v in word_to_ix.items()) #convert index to word\nix_to_pos = dict((v,k) for k,v in pos_to_ix.items()) #convert index to word", "_____no_output_____" ], [ "model = BiLSTM_CRF(len(word_to_ix), pos_to_ix, EMBEDDING_DIM, HIDDEN_DIM)\nmodel.load_state_dict(torch.load(\"thainer.model\"), strict=False)\nmodel.to(device)", "_____no_output_____" ], [ "def predict(input_sent):\n y_pred=[]\n temp=[]\n with torch.no_grad():\n precheck_sent = prepare_sequence(input_sent, word_to_ix)\n output=model(precheck_sent)[1]\n y_pred=[ix_to_pos[i] for i in output]\n \n return y_pred", "_____no_output_____" ], [ "predict([\"ผม\",\"ชื่อ\",\"นาย\",\"บุญ\",\"มาก\",\" \",\"ทอง\",\"ดี\"])", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0a9d13acbbcaf8b667d8994827fe8e74d61031a
390,089
ipynb
Jupyter Notebook
.ipynb_checkpoints/flight_price-checkpoint.ipynb
theAdarshSrivastava/Flight_Fare_Prediction-
fc3014ddf074a921e16bd39bbaf2139474860143
[ "MIT" ]
6
2021-07-03T05:37:07.000Z
2021-08-17T02:42:28.000Z
.ipynb_checkpoints/flight_price-checkpoint.ipynb
theAdarshSrivastava/Flight_Fare_Prediction-
fc3014ddf074a921e16bd39bbaf2139474860143
[ "MIT" ]
null
null
null
.ipynb_checkpoints/flight_price-checkpoint.ipynb
theAdarshSrivastava/Flight_Fare_Prediction-
fc3014ddf074a921e16bd39bbaf2139474860143
[ "MIT" ]
null
null
null
99.99718
119,012
0.759211
[ [ [ "# Flight Price Prediction\n---", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set()", "_____no_output_____" ], [ "pip list", "Package Version\r\n---------------------------------- -------------------\r\nabsl-py 0.12.0\r\nalabaster 0.7.12\r\naltair 4.1.0\r\nanaconda-client 1.7.2\r\nanaconda-navigator 1.10.0\r\nanaconda-project 0.8.3\r\nappdirs 1.4.4\r\nargh 0.26.2\r\nargon2-cffi 20.1.0\r\nasn1crypto 1.4.0\r\nastor 0.8.1\r\nastroid 2.4.2\r\nastropy 4.0.2\r\nastunparse 1.6.3\r\nasync-generator 1.10\r\natomicwrites 1.4.0\r\nattrs 20.3.0\r\nautopep8 1.5.4\r\nBabel 2.8.1\r\nbackcall 0.2.0\r\nbackports.functools-lru-cache 1.6.1\r\nbackports.shutil-get-terminal-size 1.0.0\r\nbackports.tempfile 1.0\r\nbackports.weakref 1.0.post1\r\nbase58 2.1.0\r\nbeautifulsoup4 4.9.3\r\nbitarray 1.6.1\r\nbkcharts 0.2\r\nbleach 3.2.1\r\nblinker 1.4\r\nbokeh 2.2.3\r\nboto 2.49.0\r\nBottleneck 1.3.2\r\nbrotlipy 0.7.0\r\ncachetools 4.2.1\r\ncertifi 2020.6.20\r\ncffi 1.14.3\r\nchardet 3.0.4\r\nclick 7.1.2\r\ncloudpickle 1.6.0\r\nclyent 1.2.2\r\ncolorama 0.4.4\r\nconda 4.9.2\r\nconda-build 3.20.5\r\nconda-package-handling 1.7.2\r\nconda-verify 3.4.2\r\ncontextlib2 0.6.0.post1\r\ncryptography 3.1.1\r\ncycler 0.10.0\r\nCython 0.29.21\r\ncytoolz 0.11.0\r\ndask 2.30.0\r\ndecorator 4.4.2\r\ndefusedxml 0.6.0\r\ndiff-match-patch 20200713\r\ndistlib 0.3.2\r\ndistributed 2.30.1\r\ndocutils 0.16\r\nentrypoints 0.3\r\net-xmlfile 1.0.1\r\nfastcache 1.1.0\r\nfilelock 3.0.12\r\nflake8 3.8.4\r\nFlask 1.1.2\r\nFlask-Cors 3.0.10\r\nflatbuffers 1.12\r\nfsspec 0.8.3\r\nfuture 0.18.2\r\ngast 0.3.3\r\ngevent 20.9.0\r\ngitdb 4.0.7\r\nGitPython 3.1.17\r\nglob2 0.7\r\ngmpy2 2.0.8\r\ngoogle-auth 1.27.1\r\ngoogle-auth-oauthlib 0.4.3\r\ngoogle-pasta 0.2.0\r\ngreenlet 0.4.17\r\ngrpcio 1.32.0\r\ngunicorn 20.1.0\r\nh5py 2.10.0\r\nHeapDict 1.0.1\r\nhtml5lib 1.1\r\nidna 2.10\r\nimageio 2.9.0\r\nimagesize 1.2.0\r\nimportlib-metadata 2.0.0\r\niniconfig 1.1.1\r\nintervaltree 3.1.0\r\nipykernel 5.3.4\r\nipython 7.19.0\r\nipython-genutils 0.2.0\r\nipywidgets 7.5.1\r\nisort 5.6.4\r\nitsdangerous 1.1.0\r\njdcal 1.4.1\r\njedi 0.17.1\r\njeepney 0.5.0\r\nJinja2 2.11.2\r\njoblib 0.17.0\r\njson5 0.9.5\r\njsonschema 3.2.0\r\njupyter 1.0.0\r\njupyter-client 6.1.7\r\njupyter-console 6.2.0\r\njupyter-core 4.6.3\r\njupyterlab 2.2.6\r\njupyterlab-pygments 0.1.2\r\njupyterlab-server 1.2.0\r\nKeras 2.4.3\r\nKeras-Preprocessing 1.1.2\r\nkeyring 21.4.0\r\nkiwisolver 1.3.0\r\nlazy-object-proxy 1.4.3\r\nlibarchive-c 2.9\r\nllvmlite 0.34.0\r\nlocket 0.2.0\r\nlxml 4.6.1\r\nMarkdown 3.3.4\r\nMarkupSafe 1.1.1\r\nmatplotlib 3.3.2\r\nmccabe 0.6.1\r\nmistune 0.8.4\r\nmkl-fft 1.2.0\r\nmkl-random 1.1.1\r\nmkl-service 2.3.0\r\nmock 4.0.2\r\nmore-itertools 8.6.0\r\nmpmath 1.1.0\r\nmsgpack 1.0.0\r\nmultipledispatch 0.6.0\r\nnavigator-updater 0.2.1\r\nnbclient 0.5.1\r\nnbconvert 6.0.7\r\nnbformat 5.0.8\r\nnest-asyncio 1.4.2\r\nnetworkx 2.5\r\nnltk 3.5\r\nnose 1.3.7\r\nnotebook 6.1.4\r\nnumba 0.51.2\r\nnumexpr 2.7.1\r\nnumpy 1.19.2\r\nnumpydoc 1.1.0\r\noauthlib 3.1.0\r\nolefile 0.46\r\nopencv-python 4.5.1.48\r\nopenpyxl 3.0.5\r\nopt-einsum 3.3.0\r\npackaging 20.4\r\npandas 1.1.3\r\npandocfilters 1.4.3\r\nparso 0.7.0\r\npartd 1.1.0\r\npath 15.0.0\r\npathlib2 2.3.5\r\npathtools 0.1.2\r\npatsy 0.5.1\r\npep8 1.7.1\r\npexpect 4.8.0\r\npickleshare 0.7.5\r\nPillow 8.0.1\r\npip 20.2.4\r\npkginfo 1.6.1\r\npluggy 0.13.1\r\nply 3.11\r\nprometheus-client 0.8.0\r\nprompt-toolkit 3.0.8\r\nprotobuf 3.15.6\r\npsutil 5.7.2\r\nptyprocess 0.6.0\r\npy 1.9.0\r\npyarrow 4.0.0\r\npyasn1 0.4.8\r\npyasn1-modules 0.2.8\r\npycodestyle 2.6.0\r\npycosat 0.6.3\r\npycparser 2.20\r\npycurl 7.43.0.6\r\npydeck 0.6.2\r\npydocstyle 5.1.1\r\npydot 1.4.2\r\npyflakes 2.2.0\r\nPygments 2.7.2\r\npylint 2.6.0\r\npyodbc 4.0.0-unsupported\r\npyOpenSSL 19.1.0\r\npyparsing 2.4.7\r\npyrsistent 0.17.3\r\nPySocks 1.7.1\r\npytest 0.0.0\r\npython-dateutil 2.8.1\r\npython-jsonrpc-server 0.4.0\r\npython-language-server 0.35.1\r\npytz 2020.1\r\nPyWavelets 1.1.1\r\npyxdg 0.27\r\nPyYAML 5.3.1\r\npyzmq 19.0.2\r\nQDarkStyle 2.8.1\r\nQtAwesome 1.0.1\r\nqtconsole 4.7.7\r\nQtPy 1.9.0\r\nregex 2020.10.15\r\nrequests 2.24.0\r\nrequests-oauthlib 1.3.0\r\nrope 0.18.0\r\nrsa 4.7.2\r\nRtree 0.9.4\r\nruamel-yaml 0.15.87\r\nscikit-image 0.17.2\r\nscikit-learn 0.24.1\r\nscipy 1.5.2\r\nseaborn 0.11.0\r\nSecretStorage 3.1.2\r\nSend2Trash 1.5.0\r\nsetuptools 50.3.1.post20201107\r\nsimplegeneric 0.8.1\r\nsingledispatch 3.4.0.3\r\nsip 4.19.13\r\nsix 1.15.0\r\nsklearn 0.0\r\nsmmap 4.0.0\r\nsnowballstemmer 2.0.0\r\nsortedcollections 1.2.1\r\nsortedcontainers 2.2.2\r\nsoupsieve 2.0.1\r\nSphinx 3.2.1\r\nsphinxcontrib-applehelp 1.0.2\r\nsphinxcontrib-devhelp 1.0.2\r\nsphinxcontrib-htmlhelp 1.0.3\r\nsphinxcontrib-jsmath 1.0.1\r\nsphinxcontrib-qthelp 1.0.3\r\nsphinxcontrib-serializinghtml 1.1.4\r\nsphinxcontrib-websupport 1.2.4\r\nspyder 4.1.5\r\nspyder-kernels 1.9.4\r\nSQLAlchemy 1.3.20\r\nstatsmodels 0.12.0\r\nstreamlit 0.82.0\r\nsympy 1.6.2\r\ntables 3.6.1\r\ntblib 1.7.0\r\ntensorboard 2.4.1\r\ntensorboard-plugin-wit 1.8.0\r\ntensorflow 2.4.1\r\ntensorflow-estimator 2.4.0\r\ntermcolor 1.1.0\r\nterminado 0.9.1\r\ntestpath 0.4.4\r\nthreadpoolctl 2.1.0\r\ntifffile 2020.10.1\r\ntoml 0.10.1\r\ntoolz 0.11.1\r\ntornado 6.0.4\r\ntqdm 4.50.2\r\ntraitlets 5.0.5\r\ntyping-extensions 3.7.4.3\r\ntzlocal 2.1\r\nujson 4.0.1\r\nunicodecsv 0.14.1\r\nurllib3 1.25.11\r\nvalidators 0.18.2\r\nvirtualenv 20.4.7\r\nwatchdog 0.10.3\r\nwcwidth 0.2.5\r\nwebencodings 0.5.1\r\nWerkzeug 1.0.1\r\nwheel 0.35.1\r\nwidgetsnbextension 3.5.1\r\nwincertstore 0.2\r\nwrapt 1.12.1\r\nwurlitzer 2.0.1\r\nxlrd 1.2.0\r\nXlsxWriter 1.3.7\r\nxlwt 1.3.0\r\nxmltodict 0.12.0\r\nyapf 0.30.0\r\nzict 2.0.0\r\nzipp 3.4.0\r\nzope.event 4.5.0\r\nzope.interface 5.1.2\r\n" ] ], [ [ "## Importing dataset\n\n1. Check whether any null values are there or not. if it is present then following can be done,\n 1. Imputing data using Imputation method in sklearn\n 2. Filling NaN values with mean, median and mode using fillna() method", "_____no_output_____" ] ], [ [ "train_data = pd.read_excel(r\"/home/adarshsrivastava/Github/Flight_Fare_Prediction-/dataset/Data_Train.xlsx\")", "_____no_output_____" ], [ "#pd.set_option('display.max_columns', None)", "_____no_output_____" ], [ "train_data.head()", "_____no_output_____" ], [ "train_data.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 10683 entries, 0 to 10682\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Airline 10683 non-null object\n 1 Date_of_Journey 10683 non-null object\n 2 Source 10683 non-null object\n 3 Destination 10683 non-null object\n 4 Route 10682 non-null object\n 5 Dep_Time 10683 non-null object\n 6 Arrival_Time 10683 non-null object\n 7 Duration 10683 non-null object\n 8 Total_Stops 10682 non-null object\n 9 Additional_Info 10683 non-null object\n 10 Price 10683 non-null int64 \ndtypes: int64(1), object(10)\nmemory usage: 918.2+ KB\n" ], [ "train_data[\"Duration\"].value_counts()", "_____no_output_____" ], [ "train_data.dropna(inplace = True)", "_____no_output_____" ], [ "train_data.isnull().sum() #To check if there is any NaN value in any of the column", "_____no_output_____" ] ], [ [ "## EDA", "_____no_output_____" ], [ "From description we can see that Date_of_Journey is a object data type,\\\nTherefore, we have to convert this datatype into timestamp so as to use this column properly for prediction\n\nFor this we require pandas **to_datetime** to convert object data type to datetime dtype.\n\n<span style=\"color: red;\">**.dt.day method will extract only day of that date**</span>\\\n<span style=\"color: red;\">**.dt.month method will extract only month of that date**</span>", "_____no_output_____" ] ], [ [ "train_data[\"Journey_day\"] = pd.to_datetime(train_data.Date_of_Journey, format=\"%d/%m/%Y\").dt.day", "_____no_output_____" ], [ "train_data[\"Journey_month\"] = pd.to_datetime(train_data[\"Date_of_Journey\"], format = \"%d/%m/%Y\").dt.month", "_____no_output_____" ], [ "train_data.head()", "_____no_output_____" ], [ "# Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use.\n\ntrain_data.drop([\"Date_of_Journey\"], axis = 1, inplace = True)", "_____no_output_____" ], [ "# Departure time is when a plane leaves the gate. \n# Similar to Date_of_Journey we can extract values from Dep_Time\n\n# Extracting Hours\ntrain_data[\"Dep_hour\"] = pd.to_datetime(train_data[\"Dep_Time\"]).dt.hour\n\n# Extracting Minutes\ntrain_data[\"Dep_min\"] = pd.to_datetime(train_data[\"Dep_Time\"]).dt.minute\n\n# Now we can drop Dep_Time as it is of no use\ntrain_data.drop([\"Dep_Time\"], axis = 1, inplace = True)", "_____no_output_____" ], [ "train_data.head()", "_____no_output_____" ], [ "# Arrival time is when the plane pulls up to the gate.\n# Similar to Date_of_Journey we can extract values from Arrival_Time\n\n# Extracting Hours\ntrain_data[\"Arrival_hour\"] = pd.to_datetime(train_data.Arrival_Time).dt.hour\n\n# Extracting Minutes\ntrain_data[\"Arrival_min\"] = pd.to_datetime(train_data.Arrival_Time).dt.minute\n\n# Now we can drop Arrival_Time as it is of no use\ntrain_data.drop([\"Arrival_Time\"], axis = 1, inplace = True)", "_____no_output_____" ], [ "train_data.head()", "_____no_output_____" ], [ "# Time taken by plane to reach destination is called Duration\n# It is the differnce betwwen Departure Time and Arrival time\n\n\n# Assigning and converting Duration column into list\nduration = list(train_data[\"Duration\"])\n\nfor i in range(len(duration)):\n if len(duration[i].split()) != 2: # Check if duration contains only hour or mins\n if \"h\" in duration[i]:\n duration[i] = duration[i].strip() + \" 0m\" # Adds 0 minute\n else:\n duration[i] = \"0h \" + duration[i] # Adds 0 hour\n\nduration_hours = []\nduration_mins = []\nfor i in range(len(duration)):\n duration_hours.append(int(duration[i].split(sep = \"h\")[0])) # Extract hours from duration\n duration_mins.append(int(duration[i].split(sep = \"m\")[0].split()[-1])) # Extracts only minutes from duration", "_____no_output_____" ], [ "# Adding duration_hours and duration_mins list to train_data dataframe\n\ntrain_data[\"Duration_hours\"] = duration_hours\ntrain_data[\"Duration_mins\"] = duration_mins", "_____no_output_____" ], [ "train_data.drop([\"Duration\"], axis = 1, inplace = True)", "_____no_output_____" ], [ "train_data.head()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "## Handling Categorical Data\n\nOne can find many ways to handle categorical data. Some of them categorical data are,\n1. <span style=\"color: blue;\">**Nominal data**</span> --> data are not in any order --> <span style=\"color: green;\">**OneHotEncoder**</span> is used in this case\n2. <span style=\"color: blue;\">**Ordinal data**</span> --> data are in order --> <span style=\"color: green;\">**LabelEncoder**</span> is used in this case", "_____no_output_____" ] ], [ [ "train_data[\"Airline\"].value_counts()", "_____no_output_____" ], [ "# From graph we can see that Jet Airways Business have the highest Price.\n# Apart from the first Airline almost all are having similar median\n\n# Airline vs Price\nsns.catplot(y = \"Price\", x = \"Airline\", data = train_data.sort_values(\"Price\", ascending = False), kind=\"boxen\", height = 12, aspect = 2)\nplt.show()", "_____no_output_____" ], [ "# As Airline is Nominal Categorical data we will perform OneHotEncoding\n\nAirline = train_data[[\"Airline\"]]\n\nAirline = pd.get_dummies(Airline, drop_first= True)\n\nAirline.head()", "_____no_output_____" ], [ "train_data[\"Source\"].value_counts()", "_____no_output_____" ], [ "# Source vs Price\n\nsns.catplot(y = \"Price\", x = \"Source\", data = train_data.sort_values(\"Price\", ascending = False), kind=\"boxen\", height = 6, aspect = 2)\nplt.show()", "_____no_output_____" ], [ "# As Source is Nominal Categorical data we will perform OneHotEncoding\n\nSource = train_data[[\"Source\"]]\n\nSource = pd.get_dummies(Source, drop_first= True)\n\nSource.head()", "_____no_output_____" ], [ "train_data[\"Destination\"].value_counts()", "_____no_output_____" ], [ "# As Destination is Nominal Categorical data we will perform OneHotEncoding\n\nDestination = train_data[[\"Destination\"]]\n\nDestination = pd.get_dummies(Destination, drop_first = True)\n\nDestination.head()", "_____no_output_____" ], [ "train_data[\"Route\"]", "_____no_output_____" ], [ "# Additional_Info contains almost 80% no_info\n# Route and Total_Stops are related to each other\n\ntrain_data.drop([\"Route\", \"Additional_Info\"], axis = 1, inplace = True)", "_____no_output_____" ], [ "train_data[\"Total_Stops\"].value_counts()", "_____no_output_____" ], [ "# As this is case of Ordinal Categorical type we perform LabelEncoder\n# Here Values are assigned with corresponding keys\n\ntrain_data.replace({\"non-stop\": 0, \"1 stop\": 1, \"2 stops\": 2, \"3 stops\": 3, \"4 stops\": 4}, inplace = True)", "_____no_output_____" ], [ "train_data.head()", "_____no_output_____" ], [ "# Concatenate dataframe --> train_data + Airline + Source + Destination\n\ndata_train = pd.concat([train_data, Airline, Source, Destination], axis = 1)", "_____no_output_____" ], [ "data_train.head()", "_____no_output_____" ], [ "data_train.drop([\"Airline\", \"Source\", \"Destination\"], axis = 1, inplace = True)", "_____no_output_____" ], [ "data_train.head()", "_____no_output_____" ], [ "data_train.shape", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "## Test set", "_____no_output_____" ] ], [ [ "test_data = pd.read_excel(r\"/home/adarshsrivastava/Github/Flight_Fare_Prediction-/dataset/Test_set.xlsx\")", "_____no_output_____" ], [ "test_data.head()", "_____no_output_____" ], [ "# Preprocessing\n\nprint(\"Test data Info\")\nprint(\"-\"*75)\nprint(test_data.info())\n\nprint()\nprint()\n\nprint(\"Null values :\")\nprint(\"-\"*75)\ntest_data.dropna(inplace = True)\nprint(test_data.isnull().sum())\n\n# EDA\n\n# Date_of_Journey\ntest_data[\"Journey_day\"] = pd.to_datetime(test_data.Date_of_Journey, format=\"%d/%m/%Y\").dt.day\ntest_data[\"Journey_month\"] = pd.to_datetime(test_data[\"Date_of_Journey\"], format = \"%d/%m/%Y\").dt.month\ntest_data.drop([\"Date_of_Journey\"], axis = 1, inplace = True)\n\n# Dep_Time\ntest_data[\"Dep_hour\"] = pd.to_datetime(test_data[\"Dep_Time\"]).dt.hour\ntest_data[\"Dep_min\"] = pd.to_datetime(test_data[\"Dep_Time\"]).dt.minute\ntest_data.drop([\"Dep_Time\"], axis = 1, inplace = True)\n\n# Arrival_Time\ntest_data[\"Arrival_hour\"] = pd.to_datetime(test_data.Arrival_Time).dt.hour\ntest_data[\"Arrival_min\"] = pd.to_datetime(test_data.Arrival_Time).dt.minute\ntest_data.drop([\"Arrival_Time\"], axis = 1, inplace = True)\n\n# Duration\nduration = list(test_data[\"Duration\"])\n\nfor i in range(len(duration)):\n if len(duration[i].split()) != 2: # Check if duration contains only hour or mins\n if \"h\" in duration[i]:\n duration[i] = duration[i].strip() + \" 0m\" # Adds 0 minute\n else:\n duration[i] = \"0h \" + duration[i] # Adds 0 hour\n\nduration_hours = []\nduration_mins = []\nfor i in range(len(duration)):\n duration_hours.append(int(duration[i].split(sep = \"h\")[0])) # Extract hours from duration\n duration_mins.append(int(duration[i].split(sep = \"m\")[0].split()[-1])) # Extracts only minutes from duration\n\n# Adding Duration column to test set\ntest_data[\"Duration_hours\"] = duration_hours\ntest_data[\"Duration_mins\"] = duration_mins\ntest_data.drop([\"Duration\"], axis = 1, inplace = True)\n\n\n# Categorical data\n\nprint(\"Airline\")\nprint(\"-\"*75)\nprint(test_data[\"Airline\"].value_counts())\nAirline = pd.get_dummies(test_data[\"Airline\"], drop_first= True)\n\nprint()\n\nprint(\"Source\")\nprint(\"-\"*75)\nprint(test_data[\"Source\"].value_counts())\nSource = pd.get_dummies(test_data[\"Source\"], drop_first= True)\n\nprint()\n\nprint(\"Destination\")\nprint(\"-\"*75)\nprint(test_data[\"Destination\"].value_counts())\nDestination = pd.get_dummies(test_data[\"Destination\"], drop_first = True)\n\n# Additional_Info contains almost 80% no_info\n# Route and Total_Stops are related to each other\ntest_data.drop([\"Route\", \"Additional_Info\"], axis = 1, inplace = True)\n\n# Replacing Total_Stops\ntest_data.replace({\"non-stop\": 0, \"1 stop\": 1, \"2 stops\": 2, \"3 stops\": 3, \"4 stops\": 4}, inplace = True)\n\n# Concatenate dataframe --> test_data + Airline + Source + Destination\ndata_test = pd.concat([test_data, Airline, Source, Destination], axis = 1)\n\ndata_test.drop([\"Airline\", \"Source\", \"Destination\"], axis = 1, inplace = True)\n\nprint()\nprint()\n\nprint(\"Shape of test data : \", data_test.shape)\n\n", "Test data Info\n---------------------------------------------------------------------------\n<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2671 entries, 0 to 2670\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Airline 2671 non-null object\n 1 Date_of_Journey 2671 non-null object\n 2 Source 2671 non-null object\n 3 Destination 2671 non-null object\n 4 Route 2671 non-null object\n 5 Dep_Time 2671 non-null object\n 6 Arrival_Time 2671 non-null object\n 7 Duration 2671 non-null object\n 8 Total_Stops 2671 non-null object\n 9 Additional_Info 2671 non-null object\ndtypes: object(10)\nmemory usage: 208.8+ KB\nNone\n\n\nNull values :\n---------------------------------------------------------------------------\nAirline 0\nDate_of_Journey 0\nSource 0\nDestination 0\nRoute 0\nDep_Time 0\nArrival_Time 0\nDuration 0\nTotal_Stops 0\nAdditional_Info 0\ndtype: int64\nAirline\n---------------------------------------------------------------------------\nJet Airways 897\nIndiGo 511\nAir India 440\nMultiple carriers 347\nSpiceJet 208\nVistara 129\nAir Asia 86\nGoAir 46\nMultiple carriers Premium economy 3\nVistara Premium economy 2\nJet Airways Business 2\nName: Airline, dtype: int64\n\nSource\n---------------------------------------------------------------------------\nDelhi 1145\nKolkata 710\nBanglore 555\nMumbai 186\nChennai 75\nName: Source, dtype: int64\n\nDestination\n---------------------------------------------------------------------------\nCochin 1145\nBanglore 710\nDelhi 317\nNew Delhi 238\nHyderabad 186\nKolkata 75\nName: Destination, dtype: int64\n\n\nShape of test data : (2671, 28)\n" ], [ "data_test.head()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "# Feature Selection\n\nFinding out the best feature which will contribute and have good relation with target variable.\nFollowing are some of the feature selection methods,\n\n\n1. <span style=\"color: red;\">**heatmap**</span>\n2. <span style=\"color: red;\">**feature_importance_**</span>\n3. <span style=\"color: red;\">**SelectKBest**</span>", "_____no_output_____" ] ], [ [ "data_train.shape", "_____no_output_____" ], [ "data_train.columns", "_____no_output_____" ], [ "X = data_train.loc[:, ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour',\n 'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',\n 'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',\n 'Airline_Jet Airways', 'Airline_Jet Airways Business',\n 'Airline_Multiple carriers',\n 'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',\n 'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',\n 'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',\n 'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',\n 'Destination_Kolkata', 'Destination_New Delhi']]\nX.head()", "_____no_output_____" ], [ "y = data_train.iloc[:,1]\ny.head()", "_____no_output_____" ], [ "# Finds correlation between Independent and dependent attributes\n\nplt.figure(figsize = (18,18))\nsns.heatmap(train_data.corr(), annot = True)\n\nplt.show()", "_____no_output_____" ], [ "# Important feature using ExtraTreesRegressor\n\nfrom sklearn.ensemble import ExtraTreesRegressor\nselection = ExtraTreesRegressor()\nselection.fit(X, y)", "_____no_output_____" ], [ "print(selection.feature_importances_)", "[2.15101854e-01 1.42991920e-01 5.38073370e-02 2.43077795e-02\n 2.12171255e-02 2.82049689e-02 1.91812941e-02 1.31302969e-01\n 1.82140281e-02 1.09786543e-02 1.95067958e-03 1.77161364e-02\n 1.41462322e-01 6.77073441e-02 1.82574866e-02 7.84452811e-04\n 3.24103414e-03 8.30164616e-05 5.18147062e-03 7.06503226e-05\n 4.46597375e-04 9.21616355e-03 3.35127643e-03 6.06929956e-03\n 1.25377764e-02 1.31451778e-02 8.39222112e-03 4.87587520e-04\n 2.45913761e-02]\n" ], [ "#plot graph of feature importances for better visualization\n\nplt.figure(figsize = (12,8))\nfeat_importances = pd.Series(selection.feature_importances_, index=X.columns)\nfeat_importances.nlargest(20).plot(kind='barh')\nplt.show()", "_____no_output_____" ] ], [ [ "## Fitting model using Random Forest\n\n1. Split dataset into train and test set in order to prediction w.r.t X_test\n2. If needed do scaling of data\n * Scaling is not done in Random forest\n3. Import model\n4. Fit the data\n5. Predict w.r.t X_test\n6. In regression check **RSME** Score\n7. Plot graph", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)", "_____no_output_____" ], [ "from sklearn.ensemble import RandomForestRegressor\nrf = RandomForestRegressor()\nrf.fit(X_train, y_train)", "_____no_output_____" ], [ "y_pred=rf.predict(X_test)", "_____no_output_____" ], [ "rf.score(X_train,y_train)", "_____no_output_____" ], [ "rf.score(X_test,y_test)", "_____no_output_____" ], [ "sns.distplot(y_test-y_pred)\nplt.show()", "/home/adarshsrivastava/anaconda3/lib/python3.8/site-packages/seaborn/distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).\n warnings.warn(msg, FutureWarning)\n" ], [ "plt.scatter(y_test,y_pred)\nplt.xlabel(\"y_test\")\nplt.ylabel(\"y_pred\")\nplt.show()", "_____no_output_____" ], [ "from sklearn import metrics", "_____no_output_____" ], [ "print(\"MAE:\",metrics.mean_absolute_error(y_test,y_pred))\nprint(\"MSE:\",metrics.mean_squared_error(y_test,y_pred))\nrmse=np.sqrt(metrics.mean_squared_error(y_test,y_pred))\nprint(\"RMSE:\",rmse)", "MAE: 1170.6429886388503\nMSE: 4356181.121038173\nRMSE: 2087.1466457913716\n" ], [ "rmse/(max(y)-min(y))", "_____no_output_____" ], [ "metrics.r2_score(y_test,y_pred)", "_____no_output_____" ], [ "import pickle\n# open a file, where you ant to store the data\nfile = open('flight_fare_pred.pkl', 'wb')\n\n# dump information to that file\npickle.dump(rf, file)", "_____no_output_____" ], [ "model = open('flight_fare_pred.pkl','rb')\nforest = pickle.load(model)", "_____no_output_____" ], [ "y_prediction = forest.predict(X_test)", "_____no_output_____" ], [ "metrics.r2_score(y_test, y_prediction)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0a9d3a04516c1f6a197fae90f7119bbd4deba39
45,211
ipynb
Jupyter Notebook
notebooks/rl/berater-v5.ipynb
subodhchhabra/ai
7a2b5942e8761d436a33597e94bef8b42e2cf632
[ "MIT" ]
null
null
null
notebooks/rl/berater-v5.ipynb
subodhchhabra/ai
7a2b5942e8761d436a33597e94bef8b42e2cf632
[ "MIT" ]
null
null
null
notebooks/rl/berater-v5.ipynb
subodhchhabra/ai
7a2b5942e8761d436a33597e94bef8b42e2cf632
[ "MIT" ]
null
null
null
62.188446
18,914
0.634713
[ [ [ "<a href=\"https://colab.research.google.com/github/DJCordhose/ai/blob/master/notebooks/rl/berater-v5.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Berater Environment v5\n\n## Changes from v4\n 1. encode observation to local one\n 1. non existing connection has highest penalty\n \n## next steps\n 1. use complex customer graph \n 1. per episode set certain rewards to 0 to simulate different customers per consultant\n 1. make sure things generalize well\n\n", "_____no_output_____" ], [ "## Installation (required for colab)", "_____no_output_____" ] ], [ [ "!pip install git+https://github.com/openai/baselines >/dev/null\n!pip install gym >/dev/null", "_____no_output_____" ] ], [ [ "## Environment", "_____no_output_____" ] ], [ [ "import numpy\nimport gym\nfrom gym.utils import seeding\nfrom gym import spaces\n\ndef state_name_to_int(state):\n state_name_map = {\n 'S': 0,\n 'A': 1,\n 'B': 2,\n 'C': 3,\n 'D': 4,\n 'E': 5,\n 'F': 6,\n 'G': 7,\n 'H': 8,\n 'K': 9,\n 'L': 10,\n 'M': 11,\n 'N': 12,\n 'O': 13\n }\n return state_name_map[state]\n\ndef int_to_state_name(state_as_int):\n state_map = {\n 0: 'S',\n 1: 'A',\n 2: 'B',\n 3: 'C',\n 4: 'D',\n 5: 'E',\n 6: 'F',\n 7: 'G',\n 8: 'H',\n 9: 'K',\n 10: 'L',\n 11: 'M',\n 12: 'N',\n 13: 'O'\n }\n return state_map[state_as_int]\n \nclass BeraterEnv(gym.Env):\n \"\"\"\n The Berater Problem\n\n Actions: \n There are 4 discrete deterministic actions, each choosing one direction\n \"\"\"\n metadata = {'render.modes': ['ansi']}\n \n showStep = False\n showDone = True\n envEpisodeModulo = 100\n\n def __init__(self):\n self.map = {\n 'S': [('A', 100), ('B', 400), ('C', 200 )],\n 'A': [('B', 250), ('C', 400), ('S', 100 )],\n 'B': [('A', 250), ('C', 250), ('S', 400 )],\n 'C': [('A', 400), ('B', 250), ('S', 200 )]\n }\n# self.map = {\n# 'S': [('A', 300), ('B', 100), ('C', 200 )],\n# 'A': [('S', 300), ('B', 100), ('E', 100 ), ('D', 100 )],\n# 'B': [('S', 100), ('A', 100), ('C', 50 ), ('K', 200 )],\n# 'C': [('S', 200), ('B', 50), ('M', 100 ), ('L', 200 )],\n# 'D': [('A', 100), ('F', 50)],\n# 'E': [('A', 100), ('F', 100), ('H', 100)],\n# 'F': [('D', 50), ('E', 100), ('G', 200)],\n# 'G': [('F', 200), ('O', 300)],\n# 'H': [('E', 100), ('K', 300)],\n# 'K': [('B', 200), ('H', 300)],\n# 'L': [('C', 200), ('M', 50)],\n# 'M': [('C', 100), ('L', 50), ('N', 100)],\n# 'N': [('M', 100), ('O', 100)],\n# 'O': [('N', 100), ('G', 300)]\n# }\n self.action_space = spaces.Discrete(4)\n # position, and up to 4 paths from that position, non existing path is -1000 and no position change\n self.observation_space = spaces.Box(low=numpy.array([0,-1000,-1000,-1000,-1000]),\n high=numpy.array([13,1000,1000,1000,1000]),\n dtype=numpy.float32)\n self.reward_range = (-1, 1)\n\n self.totalReward = 0\n self.stepCount = 0\n self.isDone = False\n\n self.envReward = 0\n self.envEpisodeCount = 0\n self.envStepCount = 0\n\n self.reset()\n self.optimum = self.calculate_customers_reward()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def iterate_path(self, state, action):\n paths = self.map[state]\n if action < len(paths):\n return paths[action]\n else:\n # sorry, no such action, stay where you are and pay a high penalty\n return (state, 1000)\n \n def step(self, action):\n destination, cost = self.iterate_path(self.state, action)\n lastState = self.state\n customerReward = self.customer_reward[destination]\n reward = (customerReward - cost) / self.optimum\n\n self.state = destination\n self.customer_visited(destination)\n done = destination == 'S' and self.all_customers_visited()\n\n stateAsInt = state_name_to_int(self.state)\n self.totalReward += reward\n self.stepCount += 1\n self.envReward += reward\n self.envStepCount += 1\n\n if self.showStep:\n print( \"Episode: \" + (\"%4.0f \" % self.envEpisodeCount) + \n \" Step: \" + (\"%4.0f \" % self.stepCount) + \n lastState + ' --' + str(action) + '-> ' + self.state + \n ' R=' + (\"% 2.2f\" % reward) + ' totalR=' + (\"% 3.2f\" % self.totalReward) + \n ' cost=' + (\"%4.0f\" % cost) + ' customerR=' + (\"%4.0f\" % customerReward) + ' optimum=' + (\"%4.0f\" % self.optimum) \n )\n\n if done and not self.isDone:\n self.envEpisodeCount += 1\n if BeraterEnv.showDone:\n episodes = BeraterEnv.envEpisodeModulo\n if (self.envEpisodeCount % BeraterEnv.envEpisodeModulo != 0):\n episodes = self.envEpisodeCount % BeraterEnv.envEpisodeModulo\n print( \"Done: \" + \n (\"episodes=%6.0f \" % self.envEpisodeCount) + \n (\"avgSteps=%6.2f \" % (self.envStepCount/episodes)) + \n (\"avgTotalReward=% 3.2f\" % (self.envReward/episodes) )\n )\n if (self.envEpisodeCount%BeraterEnv.envEpisodeModulo) == 0:\n self.envReward = 0\n self.envStepCount = 0\n\n self.isDone = done\n observation = self.getObservation(stateAsInt)\n info = {\"from\": self.state, \"to\": destination}\n\n return observation, reward, done, info\n\n def getObservation(self, position):\n result = numpy.array([ position, \n self.getPathObservation(position, 0),\n self.getPathObservation(position, 1),\n self.getPathObservation(position, 2),\n self.getPathObservation(position, 3)\n ],\n dtype=numpy.float32)\n return result\n\n def getPathObservation(self, position, path):\n source = int_to_state_name(position)\n paths = self.map[self.state]\n if path < len(paths):\n target, cost = paths[path]\n reward = self.customer_reward[target] \n result = reward - cost\n else:\n result = -1000\n\n return result\n\n def customer_visited(self, customer):\n self.customer_reward[customer] = 0\n\n def all_customers_visited(self):\n return self.calculate_customers_reward() == 0\n\n def calculate_customers_reward(self):\n sum = 0\n for value in self.customer_reward.values():\n sum += value\n return sum\n\n def reset(self):\n self.totalReward = 0\n self.stepCount = 0\n self.isDone = False\n reward_per_customer = 1000\n self.customer_reward = {\n 'S': 0,\n 'A': reward_per_customer,\n 'B': reward_per_customer,\n 'C': reward_per_customer,\n# 'D': reward_per_customer,\n# 'E': reward_per_customer,\n# 'F': reward_per_customer,\n# 'G': reward_per_customer,\n# 'H': reward_per_customer,\n# 'K': reward_per_customer,\n# 'L': reward_per_customer,\n# 'M': reward_per_customer,\n# 'N': reward_per_customer,\n# 'O': reward_per_customer\n }\n\n self.state = 'S'\n return self.getObservation(state_name_to_int(self.state))", "_____no_output_____" ] ], [ [ "# Try out Environment", "_____no_output_____" ] ], [ [ "BeraterEnv.showStep = True\nBeraterEnv.showDone = True\n\nenv = BeraterEnv()\nprint(env)\nobservation = env.reset()\nprint(observation)\n\nfor t in range(1000):\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n if done:\n print(\"Episode finished after {} timesteps\".format(t+1))\n break\nenv.close()\nprint(observation)", "<BeraterEnv instance>\n[ 0. 900. 600. 800. -1000.]\nEpisode: 0 Step: 1 S --0-> A R= 0.30 totalR= 0.30 cost= 100 customerR=1000 optimum=3000\nEpisode: 0 Step: 2 A --3-> A R=-0.33 totalR=-0.03 cost=1000 customerR= 0 optimum=3000\nEpisode: 0 Step: 3 A --1-> C R= 0.20 totalR= 0.17 cost= 400 customerR=1000 optimum=3000\nEpisode: 0 Step: 4 C --0-> A R=-0.13 totalR= 0.03 cost= 400 customerR= 0 optimum=3000\nEpisode: 0 Step: 5 A --3-> A R=-0.33 totalR=-0.30 cost=1000 customerR= 0 optimum=3000\nEpisode: 0 Step: 6 A --3-> A R=-0.33 totalR=-0.63 cost=1000 customerR= 0 optimum=3000\nEpisode: 0 Step: 7 A --3-> A R=-0.33 totalR=-0.97 cost=1000 customerR= 0 optimum=3000\nEpisode: 0 Step: 8 A --3-> A R=-0.33 totalR=-1.30 cost=1000 customerR= 0 optimum=3000\nEpisode: 0 Step: 9 A --1-> C R=-0.13 totalR=-1.43 cost= 400 customerR= 0 optimum=3000\nEpisode: 0 Step: 10 C --3-> C R=-0.33 totalR=-1.77 cost=1000 customerR= 0 optimum=3000\nEpisode: 0 Step: 11 C --1-> B R= 0.25 totalR=-1.52 cost= 250 customerR=1000 optimum=3000\nEpisode: 0 Step: 12 B --2-> S R=-0.13 totalR=-1.65 cost= 400 customerR= 0 optimum=3000\nDone: episodes= 1 avgSteps= 12.00 avgTotalReward=-1.65\nEpisode finished after 12 timesteps\n[ 0. -100. -400. -200. -1000.]\n" ] ], [ [ "# Train model\n\n* 0.73 would be perfect total reward", "_____no_output_____" ] ], [ [ "import tensorflow as tf\ntf.logging.set_verbosity(tf.logging.ERROR)\nprint(tf.__version__)", "1.12.0\n" ], [ "!rm -r logs\n!mkdir logs\n!mkdir logs/berater", "_____no_output_____" ], [ "# https://github.com/openai/baselines/blob/master/baselines/deepq/experiments/train_pong.py\n# log_dir = logger.get_dir()\nlog_dir = '/content/logs/berater/'\n\nimport gym\nfrom baselines import bench\nfrom baselines import logger\n\nfrom baselines.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom baselines.common.vec_env.vec_monitor import VecMonitor\nfrom baselines.ppo2 import ppo2\n\nBeraterEnv.showStep = False\nBeraterEnv.showDone = False\n\nenv = BeraterEnv()\n\nwrapped_env = DummyVecEnv([lambda: BeraterEnv()])\nmonitored_env = VecMonitor(wrapped_env, log_dir)\n\n# https://github.com/openai/baselines/blob/master/baselines/ppo2/ppo2.py\nmodel = ppo2.learn(network='mlp', env=monitored_env, total_timesteps=50000)\n\n# monitored_env = bench.Monitor(env, log_dir)\n# https://en.wikipedia.org/wiki/Q-learning#Influence_of_variables\n# %time model = deepq.learn(\\\n# monitored_env,\\\n# seed=42,\\\n# network='mlp',\\\n# lr=1e-3,\\\n# gamma=0.99,\\\n# total_timesteps=30000,\\\n# buffer_size=50000,\\\n# exploration_fraction=0.5,\\\n# exploration_final_eps=0.02,\\\n# print_freq=1000)\n\nmodel.save('berater-ppo-v4.pkl')\nmonitored_env.close()", "Logging to /tmp/openai-2019-01-03-11-18-00-870090\n-------------------------------------\n| approxkl | 0.0015796605 |\n| clipfrac | 0.0 |\n| eplenmean | 11.2 |\n| eprewmean | -0.6935 |\n| explained_variance | -0.785 |\n| fps | 159 |\n| nupdates | 1 |\n| policy_entropy | 1.3847406 |\n| policy_loss | -0.013725469 |\n| serial_timesteps | 2048 |\n| time_elapsed | 12.8 |\n| total_timesteps | 2048 |\n| value_loss | 0.28990766 |\n-------------------------------------\n-------------------------------------\n| approxkl | 0.0029727407 |\n| clipfrac | 0.012329102 |\n| eplenmean | 5.06 |\n| eprewmean | 0.55383337 |\n| explained_variance | 0.689 |\n| fps | 417 |\n| nupdates | 10 |\n| policy_entropy | 0.90667784 |\n| policy_loss | -0.027320659 |\n| serial_timesteps | 20480 |\n| time_elapsed | 73.8 |\n| total_timesteps | 20480 |\n| value_loss | 0.011048271 |\n-------------------------------------\n--------------------------------------\n| approxkl | 0.00030720897 |\n| clipfrac | 0.005004883 |\n| eplenmean | 4.02 |\n| eprewmean | 0.7226666 |\n| explained_variance | 0.982 |\n| fps | 438 |\n| nupdates | 20 |\n| policy_entropy | 0.11884171 |\n| policy_loss | -0.008551375 |\n| serial_timesteps | 40960 |\n| time_elapsed | 119 |\n| total_timesteps | 40960 |\n| value_loss | 0.00082263123 |\n--------------------------------------\n" ] ], [ [ "### Visualizing Results\n\nhttps://github.com/openai/baselines/blob/master/docs/viz/viz.ipynb", "_____no_output_____" ] ], [ [ "!ls -l $log_dir", "total 228\n-rw-r--r-- 1 root root 229608 Jan 3 11:20 monitor.csv\n" ], [ "from baselines.common import plot_util as pu\nresults = pu.load_results(log_dir)", "/usr/local/lib/python3.6/dist-packages/baselines/bench/monitor.py:164: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access\n df.headers = headers # HACK to preserve backwards compatibility\n" ], [ "import matplotlib.pyplot as plt\nimport numpy as np\nr = results[0]\n# plt.ylim(-1, 1)\n# plt.plot(np.cumsum(r.monitor.l), r.monitor.r)", "_____no_output_____" ], [ "plt.plot(np.cumsum(r.monitor.l), pu.smooth(r.monitor.r, radius=100))", "_____no_output_____" ] ], [ [ "# Enjoy model", "_____no_output_____" ] ], [ [ "import numpy as np \n\nobservation = env.reset()\nstate = np.zeros((1, 2*128))\ndones = np.zeros((1))\n\nBeraterEnv.showStep = True\nBeraterEnv.showDone = False\n\nfor t in range(1000):\n actions, _, state, _ = model.step(observation, S=state, M=dones)\n observation, reward, done, info = env.step(actions[0])\n if done:\n print(\"Episode finished after {} timesteps\".format(t+1))\n break\nenv.close()", "Episode: 0 Step: 1 S --0-> A R= 0.30 totalR= 0.30 cost= 100 customerR=1000 optimum=3000\nEpisode: 0 Step: 2 A --0-> B R= 0.25 totalR= 0.55 cost= 250 customerR=1000 optimum=3000\nEpisode: 0 Step: 3 B --1-> C R= 0.25 totalR= 0.80 cost= 250 customerR=1000 optimum=3000\nEpisode: 0 Step: 4 C --2-> S R=-0.07 totalR= 0.73 cost= 200 customerR= 0 optimum=3000\nEpisode finished after 4 timesteps\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0a9d9057a62ddbd018574a75f59cc2c0518e4cf
640,798
ipynb
Jupyter Notebook
data_acquisition/Stacked Charts Opinion Sections.ipynb
c4fcm/oped-gender-report
86628020202c66c82821f714eb3b7342331597ca
[ "MIT" ]
2
2015-02-22T09:52:24.000Z
2015-03-28T11:38:15.000Z
data_acquisition/Stacked Charts Opinion Sections.ipynb
c4fcm/oped-gender-report
86628020202c66c82821f714eb3b7342331597ca
[ "MIT" ]
null
null
null
data_acquisition/Stacked Charts Opinion Sections.ipynb
c4fcm/oped-gender-report
86628020202c66c82821f714eb3b7342331597ca
[ "MIT" ]
null
null
null
607.967742
35,335
0.930818
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d0a9e39dd5f46b6939e0966120bbe1b9e78714ac
415,376
ipynb
Jupyter Notebook
HHL.ipynb
bluefoxZero/Solving-Linear-Equations-Quantumly
de5c7832018fd6a79cfb15c7fa82ba45dfbd9930
[ "MIT" ]
2
2019-09-13T10:28:00.000Z
2019-09-17T16:37:53.000Z
HHL.ipynb
bluefoxZero/Solving-Linear-Equations-Quantumly
de5c7832018fd6a79cfb15c7fa82ba45dfbd9930
[ "MIT" ]
null
null
null
HHL.ipynb
bluefoxZero/Solving-Linear-Equations-Quantumly
de5c7832018fd6a79cfb15c7fa82ba45dfbd9930
[ "MIT" ]
1
2020-09-12T13:01:25.000Z
2020-09-12T13:01:25.000Z
1,579.376426
196,900
0.960219
[ [ [ "<h1><center>Solving Linear Equations with Quantum Circuits</center></h1>", "_____no_output_____" ], [ "<h2><center>Ax = b</center></h2>\n<h4><center> Attempt to replicate the following paper </center></h4>", "_____no_output_____" ], [ "![image.png](attachment:image.png)", "_____no_output_____" ], [ "<h3><center>Algorithm for a simpler 2 x 2 example</center></h3>\n\n![image.png](attachment:image.png)", "_____no_output_____" ], [ "![image.png](attachment:image.png)", "_____no_output_____" ], [ "$\\newcommand{\\ket}[1]{\\left|{#1}\\right\\rangle}$\n$\\newcommand{\\bra}[1]{\\left\\langle{#1}\\right|}$\nThe Final state looks like: \n$$ \\ket{\\psi} = \\sum_{j=1}^N \\beta_j \\left( \\sqrt{1-\\frac{C^2}{\\lambda_j^2}} \\ket{0} + \\frac{C}{\\lambda_j} \\ket{1} \\right) \\ket{00} \\ket{u_j} $$", "_____no_output_____" ] ], [ [ "#Solving a linear system of equation for 2 dimensional equantion of the form Ax = b\n\n### code specific initialization (importing libraries)\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport numpy as np\nfrom math import *\nimport scipy\n\n# importing Qiskit\nfrom qiskit import IBMQ, BasicAer\n#from qiskit.providers.ibmq import least_busy\nfrom qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute\n\n# import basic plot tools\nfrom qiskit.tools.visualization import plot_histogram\n\nfrom qiskit.quantum_info.synthesis import euler_angles_1q \nfrom cmath import exp\n\n### problem specific parameters\n\n# matrix representation of linear equation\nA = 0.5*np.array([[3,1],[1,3]])\nt0 = 2*pi #time paramter appearing in the unitary\nr = 4", "_____no_output_____" ], [ "q = QuantumRegister(4, 'q')\nc = ClassicalRegister(1, 'c')\n\nqpe = QuantumCircuit(q,c)\nqpe.h(q[3])\nqpe.barrier()\n\nqpe.h(q[1])\nqpe.h(q[2])\n\n# 1st unitary corresponding to A\nUA = scipy.linalg.expm(complex(0,1)*A*t0/4)\n[theta, phi, lmda] = euler_angles_1q(UA)\nqpe.cu3(theta, phi, lmda,q[2],q[3])\n\n# 2nd unitary corresponding to A\nUA = scipy.linalg.expm(complex(0,1)*A*2*t0/4)\n[theta, phi, lmda] = euler_angles_1q(UA)\nqpe.cu3(theta, phi, lmda,q[1],q[3])\nqpe.barrier()\n\n# quantum fourier transform\nqpe.swap(q[1],q[2])\nqpe.h(q[2])\nqpe.cu1(-pi/2,q[1],q[2])\nqpe.h(q[1])\nqpe.swap(q[1],q[2])\n\nqpe.barrier()\n\n#controlled rotations gate\nqpe.cry(2*pi/(2**r),q[1],q[0])\nqpe.cry(pi/(2**r),q[2],q[0])\n\nqpe.barrier()\n\nqpe.draw(output=\"mpl\")", "_____no_output_____" ], [ "###############################################################\n\n### uncomputation\n# reversing fourier transform\nqpe.swap(q[1],q[2])\nqpe.h(q[1])\nqpe.cu1(pi/2,q[1],q[2])\nqpe.h(q[2])\nqpe.swap(q[1],q[2])\n\n# reversing 2nd unitary corresponding to A\nUA = scipy.linalg.expm(complex(0,-1)*A*2*t0/4)\n[theta, phi, lmda] = euler_angles_1q(UA)\nqpe.cu3(theta, phi, lmda,q[1],q[3])\n\n# reversing 1st unitary corresponding to A\nUA = scipy.linalg.expm(complex(0,-1)*A*t0/4)\n[theta, phi, lmda] = euler_angles_1q(UA)\nqpe.cu3(theta, phi, lmda,q[2],q[3])\n\nqpe.h(q[1])\nqpe.h(q[2])\n\nqpe.barrier()\nqpe.draw(output=\"mpl\")\n\nqpe.measure(q[0], c[0])\nqpe.draw(output=\"mpl\")", "_____no_output_____" ], [ "circuit = qpe\nsimulator = BasicAer.get_backend('qasm_simulator')\nresult = execute(circuit, backend = simulator, shots = 2048).result()\ncounts = result.get_counts()\nfrom qiskit.tools.visualization import plot_histogram\nplot_histogram(counts)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ] ]
d0a9f3018b6f70ec869759547ca0763652ffaff9
134,542
ipynb
Jupyter Notebook
notebooks/from_polygon.ipynb
ajstewart/mocpy
e74a118cab9334b5881a4bb619de3bafc671c7d8
[ "BSD-3-Clause" ]
34
2017-07-24T10:11:20.000Z
2022-02-23T06:22:59.000Z
notebooks/from_polygon.ipynb
ajstewart/mocpy
e74a118cab9334b5881a4bb619de3bafc671c7d8
[ "BSD-3-Clause" ]
55
2017-10-17T12:05:04.000Z
2022-03-30T21:31:06.000Z
notebooks/from_polygon.ipynb
ajstewart/mocpy
e74a118cab9334b5881a4bb619de3bafc671c7d8
[ "BSD-3-Clause" ]
19
2017-10-17T09:51:04.000Z
2022-01-07T21:09:23.000Z
134,542
134,542
0.957114
[ [ [ "from mocpy import MOC\nimport numpy as np\n\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\n\n%matplotlib inline", "_____no_output_____" ], [ "# Plot the polygon vertices on a matplotlib axis\ndef plot_graph(vertices):\n import matplotlib.pyplot as plt\n from matplotlib import path, patches\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n p = path.Path(vertices)\n patch = patches.PathPatch(p, facecolor='orange', lw=2)\n ax.add_patch(patch)", "_____no_output_____" ], [ "# Methods for defining random polygons\ndef generate_rand_polygon(num_points):\n lon_min, lon_max = (-5, 5)\n lat_min, lat_max = (-5, 5)\n lon = (np.random.random(num_points) * (lon_max - lon_min) + lon_min) * u.deg\n lat = (np.random.random(num_points) * (lat_max - lat_min) + lat_min) * u.deg\n \n vertices = np.vstack((lon.to_value(), lat.to_value())).T\n return vertices\n\ndef generate_concave_polygon(num_points, lon_offset, lat_offset):\n delta_ang = (2 * np.pi) / num_points\n radius_max = 10\n \n angles = np.linspace(0, 2 * np.pi, num_points)\n radius = np.random.random(angles.shape[0]) * radius_max\n \n lon = (np.cos(angles) * radius + lon_offset) * u.deg\n lat = (np.sin(angles) * radius + lat_offset) * u.deg\n \n vertices = np.vstack((lon.to_value(), lat.to_value())).T\n return vertices\n\ndef generate_convexe_polygon(num_points, lon_offset, lat_offset):\n delta_ang = (2 * np.pi) / num_points\n radius_max = 10\n \n angles = np.linspace(0, 2 * np.pi, num_points)\n radius = np.random.random() * radius_max * np.ones(angles.shape[0])\n \n lon = (np.cos(angles) * radius + lon_offset) * u.deg\n lat = (np.sin(angles) * radius + lat_offset) * u.deg\n \n vertices = np.vstack((lon.to_value(), lat.to_value())).T\n return vertices\n\n#vertices = generate_convexe_polygon(20, 10, 5)\nvertices = generate_concave_polygon(20, 10, 5)", "_____no_output_____" ], [ "def plot(moc, skycoord):\n from matplotlib import path, patches\n import matplotlib.pyplot as plt\n\n fig = plt.figure(figsize=(10, 10))\n from mocpy import World2ScreenMPL\n\n from astropy.coordinates import Angle\n with World2ScreenMPL(fig, \n fov=20 * u.deg,\n center=SkyCoord(10, 5, unit='deg', frame='icrs'),\n coordsys=\"icrs\",\n rotation=Angle(0, u.degree),\n projection=\"TAN\") as wcs:\n ax = fig.add_subplot(1, 1, 1, projection=wcs)\n\n moc.fill(ax=ax, wcs=wcs, edgecolor='r', facecolor='r', linewidth=1.0, fill=True, alpha=0.5)\n\n from astropy.wcs.utils import skycoord_to_pixel\n x, y = skycoord_to_pixel(skycoord, wcs)\n p = path.Path(np.vstack((x, y)).T)\n patch = patches.PathPatch(p, facecolor='green', alpha=0.25, lw=2)\n ax.add_patch(patch)\n \n plt.xlabel('ra')\n plt.ylabel('dec')\n plt.grid(color='black', ls='dotted')\n plt.title('from polygon')\n plt.show()\n plt.close()", "_____no_output_____" ], [ "# Convert the vertices to lon and lat astropy quantities\nlon, lat = vertices[:, 0] * u.deg, vertices[:, 1] * u.deg\n\nskycoord = SkyCoord(lon, lat, unit=\"deg\", frame=\"icrs\")\n\n# Define a vertex that is said to belongs to the MOC.\n# It is important as there is no way on the sphere to know the area from\n# which we want to build the MOC (a set of vertices delimits two finite areas).\n%time moc = MOC.from_polygon(lon=lon, lat=lat, max_depth=12)\nplot(moc, skycoord)", "CPU times: user 14 ms, sys: 554 µs, total: 14.6 ms\nWall time: 14.1 ms\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d0a9f7a7b595daa28c66032bac6df28fef9dbef5
33,144
ipynb
Jupyter Notebook
notebooks/Untitled.ipynb
buffbob/air_bus
957a3263ecffb64d9812ca20752af1c7c4b745ee
[ "MIT" ]
null
null
null
notebooks/Untitled.ipynb
buffbob/air_bus
957a3263ecffb64d9812ca20752af1c7c4b745ee
[ "MIT" ]
null
null
null
notebooks/Untitled.ipynb
buffbob/air_bus
957a3263ecffb64d9812ca20752af1c7c4b745ee
[ "MIT" ]
null
null
null
20.548047
111
0.380129
[ [ [ "import json, sys, random, os, warnings, argparse, time, concurrent.futures\nfrom air_bus.airbus_utils import rle_decode, rle_encode, save_img, same_id\nfrom air_bus.decorate import profile\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom PIL import Image", "_____no_output_____" ], [ "masks_data = \"/media/thistle/Passport/Kaggle_Data/airbus/fromkaggle/train_ship_segmentations_v2.csv\"\nmasks = pd.read_csv(masks_data)\n# removing masks with no ships bc of large class imbalance\nwith_ships = masks.dropna(axis=0)\n# now find the unique ImageId names\nunique_files = list(with_ships.ImageId.value_counts().index)", "_____no_output_____" ], [ "files1000 = unique_files.copy()[:1000]", "_____no_output_____" ], [ "target = \"/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT\"\n\nnum = len(files1000) # 1,000\n# define \ntest_img_path = os.path.join(target, \"Images\",\"test\", \"images\")\ntrain_img_path = os.path.join(target, \"Images\",\"train\", \"images\")\n\ntest_mask_path = os.path.join(target, \"Masks\", \"test\", \"masks\")\ntrain_mask_path = os.path.join(target, \"Masks\", \"train\", \"masks\")", "_____no_output_____" ], [ "# os.makedirs(test_img_path)\n# os.makedirs(train_img_path)\nos.makedirs(test_mask_path)\nos.makedirs(train_mask_path)", "_____no_output_____" ], [ "idxs = list(range(num_files_created))\nnp.random.seed(101)\nnp.random.shuffle(idxs)", "_____no_output_____" ], [ "split = .8\nnum_train = int(num*split)", "_____no_output_____" ], [ "train_ids = idxs[:num_train]\ntest_ids = idxs[num_train:]", "_____no_output_____" ], [ "#create the 800 paths to new images\ntrain_paths = [files1000[each] for each in train_ids]\ntest_paths = [files1000[each] for each in test_ids]", "_____no_output_____" ], [ "#train_paths[1:11]", "_____no_output_____" ], [ "source_images = \"/media/thistle/Passport/Kaggle_Data/airbus/fromkaggle/train_v2\"", "_____no_output_____" ], [ "train_img_path, test_img_path", "_____no_output_____" ], [ "/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Images/train/images", "_____no_output_____" ], [ "def copy_train_image(filename, source=source_images, target=train_img_path):\n r = tf.io.read_file(os.path.join(source,filename))\n tf.io.write_file(os.path.join(target,filename), r)\n \ndef copy_test_image(filename, source =source_images, target=test_img_path):\n r = tf.io.read_file(os.path.join(source,filename))\n tf.io.write_file(os.path.join(target,filename), r) ", "_____no_output_____" ], [ "len(os.listdir(\"/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Images/train/images\"))", "_____no_output_____" ], [ "len(os.listdir(\"/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Images/test/images\"))", "_____no_output_____" ], [ "[copy_train_image(each) for each in train_paths]", "_____no_output_____" ], [ "%%time\n[copy_test_image(each) for each in test_paths]", "CPU times: user 209 ms, sys: 169 ms, total: 379 ms\nWall time: 6.15 s\n" ], [ "# now for dividing masks up", "_____no_output_____" ], [ "source_masks = \"/media/thistle/Passport/Kaggle_Data/airbus/updated_data/combined_masks\"\ntest_masks_dir = \"/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Masks/test/masks\"\ntrain_masks_dir = \"/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Masks/train/masks\"", "_____no_output_____" ], [ "def copy_train_masks(filename, source=source_masks, target=train_masks_dir):\n r = tf.io.read_file(os.path.join(source,filename))\n tf.io.write_file(os.path.join(target,filename), r)", "_____no_output_____" ], [ "%%time\nwith concurrent.futures.ThreadPoolExecutor() as executor_train:\n train_result = executor_train.map(copy_train_masks, train_paths)\n\nlength = len(os.listdir(train_masks_dir))\nprint(f\"train files copied: {length}\")\n", "train files copied: 800\nCPU times: user 248 ms, sys: 89.9 ms, total: 338 ms\nWall time: 256 ms\n" ], [ "def copy_test_masks(filename, source=source_masks, target=test_masks_dir):\n r = tf.io.read_file(os.path.join(source,filename))\n tf.io.write_file(os.path.join(target,filename), r)", "_____no_output_____" ], [ "%%time\nwith concurrent.futures.ThreadPoolExecutor() as executor_test_mask:\n mask_test_result = executor_test_mask.map(copy_test_masks, test_paths)\n\nlength = len(os.listdir(test_masks_dir))\nprint(f\"train files copied: {length}\")\n", "train files copied: 200\nCPU times: user 86 ms, sys: 29 ms, total: 115 ms\nWall time: 93.7 ms\n" ], [ "def copy_train_masks(filename, source=source_masks, target=train_masks_dir):\n r = tf.io.read_file(os.path.join(source,filename))\n tf.io.write_file(os.path.join(target,filename), r)", "_____no_output_____" ], [ "with concurrent.futures.ThreadPoolExecutor() as executor_train:\n train_result = executor_train.map(copy_train_masks, train_paths)\nlength = len(os.listdir(train_masks_dir))\nprint(f\"{length} train-mask files copied\")", "800train mask files copied\n" ], [ "randidxs = np.random.randint(1,200,size=5)\nrandidxs", "_____no_output_____" ], [ "testMasks = \"/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Masks/test/masks\"\ntrainMasks = \"/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Masks/train/masks\"\ntestImg = \"/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Images/test/images\"\ntrainImg = \"/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Images/test/images\"", "_____no_output_____" ], [ "testm = sorted(os.listdir(testMasks))[:5]\ntrainm = sorted(os.listdir(testImg))[:5]\nfor i in range(5):\n if not testm[i] == trainm[i]:\n print(\"shit\")\n break\n else:\n print(\"swell\")", "swell\nswell\nswell\nswell\nswell\n" ], [ "#####################DELETE##########################33", "_____no_output_____" ], [ "testMasks = \"/media/thistle/Passport/Kaggle_Data/airbus/updated_data/MASKS/test/masks\"\ntrainMasks = \"/media/thistle/Passport/Kaggle_Data/airbus/updated_data/MASKS/train/masks\"\ntestImg = \"/media/thistle/Passport/Kaggle_Data/airbus/updated_data/IMAGES/test/images\"\ntrainImg = \"/media/thistle/Passport/Kaggle_Data/airbus/updated_data/iMAGES/test/images\"", "_____no_output_____" ], [ "testm = sorted(os.listdir(testMasks))[:50]\ntrainm = sorted(os.listdir(testImg))[:50]\nfor i in range(5):\n if not testm[i] == trainm[i]:\n print(\"shit\")\n break\n else:\n print(\"swell\")", "shit\n" ], [ "a = list (range(11))\nnp.random.seed(3)\nnp.random.shuffle(a)\na", "_____no_output_____" ], [ "a = list (range(11))\nnp.random.seed(3)\nnp.random.shuffle(a)\na", "_____no_output_____" ], [ "aa = list(range(11))", "_____no_output_____" ], [ "np.random.shuffle(aa)", "_____no_output_____" ], [ "aa", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0a9fb44f7763f12a5780ff9c174dd1b04a8c333
15,441
ipynb
Jupyter Notebook
docs/tutorials/mcmc_sampling.ipynb
JohannesBuchner/gammapy
48769519f04b7df7b3e4580ebb61396445790bc3
[ "BSD-3-Clause" ]
1
2021-02-02T21:35:27.000Z
2021-02-02T21:35:27.000Z
docs/tutorials/mcmc_sampling.ipynb
kabartay/gammapy
015206d2418b1d254f1c9d3ea819ab0c5ece99e9
[ "BSD-3-Clause" ]
null
null
null
docs/tutorials/mcmc_sampling.ipynb
kabartay/gammapy
015206d2418b1d254f1c9d3ea819ab0c5ece99e9
[ "BSD-3-Clause" ]
null
null
null
34.086093
554
0.596464
[ [ [ "# MCMC sampling using the emcee package\n\n## Introduction\n\nThe goal of Markov Chain Monte Carlo (MCMC) algorithms is to approximate the posterior distribution of your model parameters by random sampling in a probabilistic space. For most readers this sentence was probably not very helpful so here we'll start straight with and example but you should read the more detailed mathematical approaches of the method [here](https://www.pas.rochester.edu/~sybenzvi/courses/phy403/2015s/p403_17_mcmc.pdf) and [here](https://github.com/jakevdp/BayesianAstronomy/blob/master/03-Bayesian-Modeling-With-MCMC.ipynb).\n\n### How does it work ?\n\nThe idea is that we use a number of walkers that will sample the posterior distribution (i.e. sample the Likelihood profile).\n\nThe goal is to produce a \"chain\", i.e. a list of $\\theta$ values, where each $\\theta$ is a vector of parameters for your model.<br>\nIf you start far away from the truth value, the chain will take some time to converge until it reaches a stationary state. Once it has reached this stage, each successive elements of the chain are samples of the target posterior distribution.<br>\nThis means that, once we have obtained the chain of samples, we have everything we need. We can compute the distribution of each parameter by simply approximating it with the histogram of the samples projected into the parameter space. This will provide the errors and correlations between parameters.\n\n\nNow let's try to put a picture on the ideas described above. With this notebook, we have simulated and carried out a MCMC analysis for a source with the following parameters:<br>\n$Index=2.0$, $Norm=5\\times10^{-12}$ cm$^{-2}$ s$^{-1}$ TeV$^{-1}$, $Lambda =(1/Ecut) = 0.02$ TeV$^{-1}$ (50 TeV) for 20 hours.\n\nThe results that you can get from a MCMC analysis will look like this :\n\n<img src=\"images/gammapy_mcmc.png\" width=\"800\">\n\nOn the first two top panels, we show the pseudo-random walk of one walker from an offset starting value to see it evolve to a better solution.\nIn the bottom right panel, we show the trace of each 16 walkers for 500 runs (the chain described previsouly). For the first 100 runs, the parameter evolve towards a solution (can be viewed as a fitting step). Then they explore the local minimum for 400 runs which will be used to estimate the parameters correlations and errors.\nThe choice of the Nburn value (when walkers have reached a stationary stage) can be done by eye but you can also look at the autocorrelation time.\n\n### Why should I use it ?\n\nWhen it comes to evaluate errors and investigate parameter correlation, one typically estimate the Likelihood in a gridded search (2D Likelihood profiles). Each point of the grid implies a new model fitting. If we use 10 steps for each parameters, we will need to carry out 100 fitting procedures. \n\nNow let's say that I have a model with $N$ parameters, we need to carry out that gridded analysis $N*(N-1)$ times. \nSo for 5 free parameters you need 20 gridded search, resulting in 2000 individual fit. \nClearly this strategy doesn't scale well to high-dimensional models.\n\nJust for fun: if each fit procedure takes 10s, we're talking about 5h of computing time to estimate the correlation plots. \n\nThere are many MCMC packages in the python ecosystem but here we will focus on [emcee](https://emcee.readthedocs.io), a lightweight Python package. A description is provided here : [Foreman-Mackey, Hogg, Lang & Goodman (2012)](https://arxiv.org/abs/1202.3665).", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ], [ "import numpy as np\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nfrom gammapy.irf import load_cta_irfs\nfrom gammapy.maps import WcsGeom, MapAxis\nfrom gammapy.modeling.models import (\n ExpCutoffPowerLawSpectralModel,\n GaussianSpatialModel,\n SkyModel,\n Models,\n FoVBackgroundModel,\n)\nfrom gammapy.datasets import MapDataset\nfrom gammapy.makers import MapDatasetMaker\nfrom gammapy.data import Observation\nfrom gammapy.modeling.sampling import (\n run_mcmc,\n par_to_model,\n plot_corner,\n plot_trace,\n)\nfrom gammapy.modeling import Fit", "_____no_output_____" ], [ "import logging\n\nlogging.basicConfig(level=logging.INFO)", "_____no_output_____" ] ], [ [ "## Simulate an observation\n\nHere we will start by simulating an observation using the `simulate_dataset` method.", "_____no_output_____" ] ], [ [ "irfs = load_cta_irfs(\n \"$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits\"\n)\n\nobservation = Observation.create(\n pointing=SkyCoord(0 * u.deg, 0 * u.deg, frame=\"galactic\"),\n livetime=20 * u.h,\n irfs=irfs,\n)", "_____no_output_____" ], [ "# Define map geometry\naxis = MapAxis.from_edges(\n np.logspace(-1, 2, 15), unit=\"TeV\", name=\"energy\", interp=\"log\"\n)\n\ngeom = WcsGeom.create(\n skydir=(0, 0), binsz=0.05, width=(2, 2), frame=\"galactic\", axes=[axis]\n)\n\nempty_dataset = MapDataset.create(geom=geom, name=\"dataset-mcmc\")\nmaker = MapDatasetMaker(selection=[\"background\", \"edisp\", \"psf\", \"exposure\"])\ndataset = maker.run(empty_dataset, observation)", "_____no_output_____" ], [ "# Define sky model to simulate the data\nspatial_model = GaussianSpatialModel(\n lon_0=\"0 deg\", lat_0=\"0 deg\", sigma=\"0.2 deg\", frame=\"galactic\"\n)\n\nspectral_model = ExpCutoffPowerLawSpectralModel(\n index=2,\n amplitude=\"3e-12 cm-2 s-1 TeV-1\",\n reference=\"1 TeV\",\n lambda_=\"0.05 TeV-1\",\n)\n\nsky_model_simu = SkyModel(\n spatial_model=spatial_model, spectral_model=spectral_model, name=\"source\"\n)\n\nbkg_model = FoVBackgroundModel(dataset_name=\"dataset-mcmc\")\nmodels = Models([sky_model_simu, bkg_model])\nprint(models)", "_____no_output_____" ], [ "dataset.models = models\ndataset.fake()", "_____no_output_____" ], [ "dataset.counts.sum_over_axes().plot(add_cbar=True);", "_____no_output_____" ], [ "# If you want to fit the data for comparison with MCMC later\n# fit = Fit(dataset)\n# result = fit.run(optimize_opts={\"print_level\": 1})", "_____no_output_____" ] ], [ [ "## Estimate parameter correlations with MCMC\n\nNow let's analyse the simulated data.\nHere we just fit it again with the same model we had before as a starting point.\nThe data that would be needed are the following: \n- counts cube, psf cube, exposure cube and background model\n\nLuckily all those maps are already in the Dataset object.\n\nWe will need to define a Likelihood function and define priors on parameters.<br>\nHere we will assume a uniform prior reading the min, max parameters from the sky model.", "_____no_output_____" ], [ "### Define priors\n\nThis steps is a bit manual for the moment until we find a better API to define priors.<br>\nNote the you **need** to define priors for each parameter otherwise your walkers can explore uncharted territories (e.g. negative norms).", "_____no_output_____" ] ], [ [ "print(dataset)", "_____no_output_____" ], [ "# Define the free parameters and min, max values\nparameters = dataset.models.parameters\n\nparameters[\"sigma\"].frozen = True\nparameters[\"lon_0\"].frozen = True\nparameters[\"lat_0\"].frozen = True\nparameters[\"amplitude\"].frozen = False\nparameters[\"index\"].frozen = False\nparameters[\"lambda_\"].frozen = False\n\n\nparameters[\"norm\"].frozen = True\nparameters[\"tilt\"].frozen = True\n\nparameters[\"norm\"].min = 0.5\nparameters[\"norm\"].max = 2\n\nparameters[\"index\"].min = 1\nparameters[\"index\"].max = 5\nparameters[\"lambda_\"].min = 1e-3\nparameters[\"lambda_\"].max = 1\n\nparameters[\"amplitude\"].min = 0.01 * parameters[\"amplitude\"].value\nparameters[\"amplitude\"].max = 100 * parameters[\"amplitude\"].value\n\nparameters[\"sigma\"].min = 0.05\nparameters[\"sigma\"].max = 1\n\n# Setting amplitude init values a bit offset to see evolution\n# Here starting close to the real value\nparameters[\"index\"].value = 2.0\nparameters[\"amplitude\"].value = 3.2e-12\nparameters[\"lambda_\"].value = 0.05\n\nprint(dataset.models)\nprint(\"stat =\", dataset.stat_sum())", "_____no_output_____" ], [ "%%time\n# Now let's define a function to init parameters and run the MCMC with emcee\n# Depending on your number of walkers, Nrun and dimensionality, this can take a while (> minutes)\nsampler = run_mcmc(dataset, nwalkers=6, nrun=150) # to speedup the notebook\n# sampler=run_mcmc(dataset,nwalkers=12,nrun=1000) # more accurate contours", "_____no_output_____" ] ], [ [ "## Plot the results\n\nThe MCMC will return a sampler object containing the trace of all walkers.<br>\nThe most important part is the chain attribute which is an array of shape:<br>\n_(nwalkers, nrun, nfreeparam)_\n\nThe chain is then used to plot the trace of the walkers and estimate the burnin period (the time for the walkers to reach a stationary stage).", "_____no_output_____" ] ], [ [ "plot_trace(sampler, dataset)", "_____no_output_____" ], [ "plot_corner(sampler, dataset, nburn=50)", "_____no_output_____" ] ], [ [ "## Plot the model dispersion\n\nUsing the samples from the chain after the burn period, we can plot the different models compared to the truth model. To do this we need to the spectral models for each parameter state in the sample.", "_____no_output_____" ] ], [ [ "emin, emax = [0.1, 100] * u.TeV\nnburn = 50\n\nfig, ax = plt.subplots(1, 1, figsize=(12, 6))\n\nfor nwalk in range(0, 6):\n for n in range(nburn, nburn + 100):\n pars = sampler.chain[nwalk, n, :]\n\n # set model parameters\n par_to_model(dataset, pars)\n spectral_model = dataset.models[\"source\"].spectral_model\n\n spectral_model.plot(\n energy_range=(emin, emax),\n ax=ax,\n energy_power=2,\n alpha=0.02,\n color=\"grey\",\n )\n\n\nsky_model_simu.spectral_model.plot(\n energy_range=(emin, emax), energy_power=2, ax=ax, color=\"red\"\n);", "_____no_output_____" ] ], [ [ "## Fun Zone\n\nNow that you have the sampler chain, you have in your hands the entire history of each walkers in the N-Dimensional parameter space. <br>\nYou can for example trace the steps of each walker in any parameter space.", "_____no_output_____" ] ], [ [ "# Here we plot the trace of one walker in a given parameter space\nparx, pary = 0, 1\n\nplt.plot(sampler.chain[0, :, parx], sampler.chain[0, :, pary], \"ko\", ms=1)\nplt.plot(\n sampler.chain[0, :, parx],\n sampler.chain[0, :, pary],\n ls=\":\",\n color=\"grey\",\n alpha=0.5,\n)\n\nplt.xlabel(\"Index\")\nplt.ylabel(\"Amplitude\");", "_____no_output_____" ] ], [ [ "## PeVatrons in CTA ?\n\nNow it's your turn to play with this MCMC notebook. For example to test the CTA performance to measure a cutoff at very high energies (100 TeV ?).\n\nAfter defining your Skymodel it can be as simple as this :", "_____no_output_____" ] ], [ [ "# dataset = simulate_dataset(model, geom, pointing, irfs)\n# sampler = run_mcmc(dataset)\n# plot_trace(sampler, dataset)\n# plot_corner(sampler, dataset, nburn=200)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0aa0209122f089689de86d580dde16e3d5bacb8
311,748
ipynb
Jupyter Notebook
C3W2_Pix2PixHD_(Optional).ipynb
akanametov/Pix2PixHD
acea4c6fdd0e2897b5285a0ebbfb96f3387a08d9
[ "MIT" ]
null
null
null
C3W2_Pix2PixHD_(Optional).ipynb
akanametov/Pix2PixHD
acea4c6fdd0e2897b5285a0ebbfb96f3387a08d9
[ "MIT" ]
null
null
null
C3W2_Pix2PixHD_(Optional).ipynb
akanametov/Pix2PixHD
acea4c6fdd0e2897b5285a0ebbfb96f3387a08d9
[ "MIT" ]
null
null
null
222.042735
129,060
0.882328
[ [ [ "# Pix2PixHD\n\n*Please note that this is an optional notebook, meant to introduce more advanced concepts if you're up for a challenge, so don't worry if you don't completely follow!*\n\nIt is recommended that you should already be familiar with:\n - Residual blocks, from [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) (He et al. 2015)\n - Perceptual loss, from [Perceptual Losses for Real-Time Style Transfer and Super-Resolution](https://arxiv.org/abs/1603.08155) (Johnson et al. 2016)\n - VGG architecture, from [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556) (Simonyan et al. 2015)\n - Instance normalization (which you should know from StyleGAN), from [Instance Normalization: The Missing Ingredient for Fast Stylization](https://arxiv.org/abs/1607.08022) (Ulyanov et al. 2017)\n - Reflection padding, which Pytorch has implemented in [torch.nn.ReflectionPad2d](https://pytorch.org/docs/stable/generated/torch.nn.ReflectionPad2d.html)\n\n**Goals**\n\nIn this notebook, you will learn about Pix2PixHD, which synthesizes high-resolution images from semantic label maps. Proposed in [High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs](https://arxiv.org/abs/1711.11585) (Wang et al. 2018), Pix2PixHD improves upon Pix2Pix via multiscale architecture, improved adversarial loss, and instance maps.", "_____no_output_____" ], [ "## Residual Blocks\n\nThe residual block, which is relevant in many state-of-the-art computer vision models, is used in all parts of Pix2PixHD. If you're not familiar with residual blocks, please take a look [here](https://paperswithcode.com/method/residual-block). Now, you'll start by first implementing a basic residual block.", "_____no_output_____" ] ], [ [ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass ResidualBlock(nn.Module):\n '''\n ResidualBlock Class\n Values\n channels: the number of channels throughout the residual block, a scalar\n '''\n\n def __init__(self, channels):\n super().__init__()\n\n self.layers = nn.Sequential(\n nn.ReflectionPad2d(1),\n nn.Conv2d(channels, channels, kernel_size=3, padding=0),\n nn.InstanceNorm2d(channels, affine=False),\n\n nn.ReLU(inplace=True),\n\n nn.ReflectionPad2d(1),\n nn.Conv2d(channels, channels, kernel_size=3, padding=0),\n nn.InstanceNorm2d(channels, affine=False),\n )\n\n def forward(self, x):\n return x + self.layers(x)", "_____no_output_____" ] ], [ [ "## Multiscale Generator: Generating at multiple scales (resolutions)\n\nThe Pix2PixHD generator is comprised of two separate subcomponent generators: $G_1$ is called the global generator and operates at low resolution (1024 x 512) to transfer styles. $G_2$ is the local enhancer and operates at high resolution (2048 x 1024) to deal with higher resolution.\n\nThe architecture for each network is adapted from [Perceptual Losses for Real-Time Style Transfer and Super-Resolution](https://arxiv.org/abs/1603.08155) (Johnson et al. 2016) and is comprised of\n\n\\begin{align*}\n G = \\left[G^{(F)}, G^{(R)}, G^{(B)}\\right],\n\\end{align*}\n\nwhere $G^{(F)}$ is a frontend of convolutional blocks (downsampling), $G^{(R)}$ is a set of residual blocks, and $G^{(B)}$ is a backend of transposed convolutional blocks (upsampling). This is just a type of encoder-decoder generator that you learned about with Pix2Pix!\n\n$G_1$ is trained first on low-resolution images. Then, $G_2$ is added to the pre-trained $G_1$ and both are trained jointly on high-resolution images. Specifically, $G_2^{(F)}$ encodes a high-resolution image, $G_1$ encodes a downsampled, low-resolution image, and the outputs from both are summed and passed sequentially to $G_2^{(R)}$ and $G_2^{(B)}$. This pre-training and fine-tuning scheme works well because the model is able to learn accurate coarser representations before using them to touch up its refined representations, since learning high-fidelity representations is generally a pretty hard task.\n\n> ![Pix2PixHD Generator](https://github.com/https-deeplearning-ai/GANs-Public/blob/master/Pix2PixHD-Generator.png?raw=true)\n*Pix2PixHD Generator, taken from Figure 3 of [High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs](https://arxiv.org/abs/1711.11585) (Wang et al. 2018). Following our notation, $G = \\left[G_2^{(F)}, G_1^{(F)}, G_1^{(R)}, G_1^{(B)}, G_2^{(R)}, G_2^{(B)}\\right]$ from left to right.*", "_____no_output_____" ], [ "### Global Subgenerator ($G_1$)\n\nLet's first start by building the global generator ($G_1$). Even though the global generator is nested inside the local enhancer, you'll still need a separate module for training $G_1$ on its own first.", "_____no_output_____" ] ], [ [ "class GlobalGenerator(nn.Module):\n '''\n GlobalGenerator Class:\n Implements the global subgenerator (G1) for transferring styles at lower resolutions.\n Values:\n in_channels: the number of input channels, a scalar\n out_channels: the number of output channels, a scalar\n base_channels: the number of channels in first convolutional layer, a scalar\n fb_blocks: the number of frontend / backend blocks, a scalar\n res_blocks: the number of residual blocks, a scalar\n '''\n\n def __init__(self, in_channels=3, out_channels=3,\n base_channels=64, fb_blocks=3, res_blocks=9):\n super().__init__()\n\n # Initial convolutional layer\n g1 = [\n nn.ReflectionPad2d(3),\n nn.Conv2d(in_channels, base_channels, kernel_size=7, padding=0),\n nn.InstanceNorm2d(base_channels, affine=False),\n nn.ReLU(inplace=True),\n ]\n\n channels = base_channels\n # Frontend blocks\n for _ in range(fb_blocks):\n g1 += [\n nn.Conv2d(channels, 2 * channels, kernel_size=3, stride=2, padding=1),\n nn.InstanceNorm2d(2 * channels, affine=False),\n nn.ReLU(inplace=True),\n ]\n channels *= 2\n\n # Residual blocks\n for _ in range(res_blocks):\n g1 += [ResidualBlock(channels)]\n\n # Backend blocks\n for _ in range(fb_blocks):\n g1 += [\n nn.ConvTranspose2d(channels, channels // 2, kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.InstanceNorm2d(channels // 2, affine=False),\n nn.ReLU(inplace=True),\n ]\n channels //= 2\n\n # Output convolutional layer as its own nn.Sequential since it will be omitted in second training phase\n self.out_layers = nn.Sequential(\n nn.ReflectionPad2d(3),\n nn.Conv2d(base_channels, out_channels, kernel_size=7, padding=0),\n nn.Tanh(),\n )\n\n self.g1 = nn.Sequential(*g1)\n\n def forward(self, x):\n x = self.g1(x)\n x = self.out_layers(x)\n return x", "_____no_output_____" ] ], [ [ "### Local Enhancer Subgenerator ($G_2$)\n\nAnd now onto the local enhancer ($G_2$)! Recall that the local enhancer uses (a pretrained) $G_1$ as part of its architecture. Following our earlier notation, recall that the residual connections from the last layers of $G_2^{(F)}$ and $G_1^{(B)}$ are added together and passed through $G_2^{(R)}$ and $G_2^{(B)}$ to synthesize a high-resolution image. Because of this, you should reuse the $G_1$ implementation so that the weights are consistent for the second training phase.", "_____no_output_____" ] ], [ [ "class LocalEnhancer(nn.Module):\n '''\n LocalEnhancer Class: \n Implements the local enhancer subgenerator (G2) for handling larger scale images.\n Values:\n in_channels: the number of input channels, a scalar\n out_channels: the number of output channels, a scalar\n base_channels: the number of channels in first convolutional layer, a scalar\n global_fb_blocks: the number of global generator frontend / backend blocks, a scalar\n global_res_blocks: the number of global generator residual blocks, a scalar\n local_res_blocks: the number of local enhancer residual blocks, a scalar\n '''\n\n def __init__(self, in_channels, out_channels, base_channels=32, global_fb_blocks=3, global_res_blocks=9, local_res_blocks=3):\n super().__init__()\n\n global_base_channels = 2 * base_channels\n\n # Downsampling layer for high-res -> low-res input to g1\n self.downsample = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)\n\n # Initialize global generator without its output layers\n self.g1 = GlobalGenerator(\n in_channels, out_channels, base_channels=global_base_channels, fb_blocks=global_fb_blocks, res_blocks=global_res_blocks,\n ).g1\n\n self.g2 = nn.ModuleList()\n\n # Initialize local frontend block\n self.g2.append(\n nn.Sequential(\n # Initial convolutional layer\n nn.ReflectionPad2d(3),\n nn.Conv2d(in_channels, base_channels, kernel_size=7, padding=0), \n nn.InstanceNorm2d(base_channels, affine=False),\n nn.ReLU(inplace=True),\n\n # Frontend block\n nn.Conv2d(base_channels, 2 * base_channels, kernel_size=3, stride=2, padding=1), \n nn.InstanceNorm2d(2 * base_channels, affine=False),\n nn.ReLU(inplace=True),\n )\n )\n\n # Initialize local residual and backend blocks\n self.g2.append(\n nn.Sequential(\n # Residual blocks\n *[ResidualBlock(2 * base_channels) for _ in range(local_res_blocks)],\n\n # Backend blocks\n nn.ConvTranspose2d(2 * base_channels, base_channels, kernel_size=3, stride=2, padding=1, output_padding=1), \n nn.InstanceNorm2d(base_channels, affine=False),\n nn.ReLU(inplace=True),\n\n # Output convolutional layer\n nn.ReflectionPad2d(3),\n nn.Conv2d(base_channels, out_channels, kernel_size=7, padding=0),\n nn.Tanh(),\n )\n )\n\n def forward(self, x):\n # Get output from g1_B\n x_g1 = self.downsample(x)\n x_g1 = self.g1(x_g1)\n\n # Get output from g2_F\n x_g2 = self.g2[0](x)\n\n # Get final output from g2_B\n return self.g2[1](x_g1 + x_g2)", "_____no_output_____" ] ], [ [ "And voilà! You now have modules for both the global subgenerator and local enhancer subgenerator!", "_____no_output_____" ], [ "## Multiscale Discriminator: Discriminating at different scales too!\n\nPix2PixHD uses 3 separate subcomponents (subdiscriminators $D_1$, $D_2$, and $D_3$) to generate predictions. They all have the same architectures but $D_2$ and $D_3$ operate on inputs downsampled by 2x and 4x, respectively. The GAN objective is now modified as\n\n\\begin{align*}\n \\min_G \\max_{D_1,D_2,D_3}\\sum_{k=1,2,3}\\mathcal{L}_{\\text{GAN}}(G, D_k)\n\\end{align*}\n\nEach subdiscriminator is a PatchGAN, which you should be familiar with from Pix2Pix!\n\nLet's first implement a single PatchGAN - this implementation will be slightly different than the one you saw in Pix2Pix since the intermediate feature maps will be needed for computing loss.", "_____no_output_____" ] ], [ [ "class Discriminator(nn.Module):\n '''\n Discriminator Class\n Implements the discriminator class for a subdiscriminator, \n which can be used for all the different scales, just with different argument values.\n Values:\n in_channels: the number of channels in input, a scalar\n base_channels: the number of channels in first convolutional layer, a scalar\n n_layers: the number of convolutional layers, a scalar\n '''\n\n def __init__(self, in_channels=3, base_channels=64, n_layers=3):\n super().__init__()\n\n # Use nn.ModuleList so we can output intermediate values for loss.\n self.layers = nn.ModuleList()\n\n # Initial convolutional layer\n self.layers.append(\n nn.Sequential(\n nn.Conv2d(in_channels, base_channels, kernel_size=4, stride=2, padding=2),\n nn.LeakyReLU(0.2, inplace=True),\n )\n )\n\n # Downsampling convolutional layers\n channels = base_channels\n for _ in range(1, n_layers):\n prev_channels = channels\n channels = min(2 * channels, 512)\n self.layers.append(\n nn.Sequential(\n nn.Conv2d(prev_channels, channels, kernel_size=4, stride=2, padding=2),\n nn.InstanceNorm2d(channels, affine=False),\n nn.LeakyReLU(0.2, inplace=True),\n )\n )\n\n # Output convolutional layer\n prev_channels = channels\n channels = min(2 * channels, 512)\n self.layers.append(\n nn.Sequential(\n nn.Conv2d(prev_channels, channels, kernel_size=4, stride=1, padding=2),\n nn.InstanceNorm2d(channels, affine=False),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(channels, 1, kernel_size=4, stride=1, padding=2),\n )\n )\n\n def forward(self, x):\n outputs = [] # for feature matching loss\n for layer in self.layers:\n x = layer(x)\n outputs.append(x)\n\n return outputs", "_____no_output_____" ], [ "model = Discriminator()", "_____no_output_____" ], [ "p = model(torch.rand(1, 3, 256, 256))", "_____no_output_____" ], [ "for pi in p:\n print(pi.shape)", "torch.Size([1, 64, 129, 129])\ntorch.Size([1, 128, 65, 65])\ntorch.Size([1, 256, 33, 33])\ntorch.Size([1, 1, 35, 35])\n" ] ], [ [ "Now you're ready to implement the multiscale discriminator in full! This puts together the different subdiscriminator scales.", "_____no_output_____" ] ], [ [ "class MultiscaleDiscriminator(nn.Module):\n '''\n MultiscaleDiscriminator Class\n Values:\n in_channels: number of input channels to each discriminator, a scalar\n base_channels: number of channels in first convolutional layer, a scalar\n n_layers: number of downsampling layers in each discriminator, a scalar\n n_discriminators: number of discriminators at different scales, a scalar\n '''\n\n def __init__(self, in_channels, base_channels=64, n_layers=3, n_discriminators=3):\n super().__init__()\n\n # Initialize all discriminators\n self.discriminators = nn.ModuleList()\n for _ in range(n_discriminators):\n self.discriminators.append(\n Discriminator(in_channels, base_channels=base_channels, n_layers=n_layers)\n )\n\n # Downsampling layer to pass inputs between discriminators at different scales\n self.downsample = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)\n\n def forward(self, x):\n outputs = []\n\n for i, discriminator in enumerate(self.discriminators):\n # Downsample input for subsequent discriminators\n if i != 0:\n x = self.downsample(x)\n\n outputs.append(discriminator(x))\n\n # Return list of multiscale discriminator outputs\n return outputs\n\n @property\n def n_discriminators(self):\n return len(self.discriminators)", "_____no_output_____" ] ], [ [ "## Instance Boundary Map: Learning boundaries between instances\n\nHere's a new method that adds additional information as conditional input!\n\nThe authors observed that previous approaches have typically taken in a label map (aka. segmentation map) that labels all the pixels to be of a certain class (i.e. car) but doesn't differentiate between two instances of the same class (i.e. two cars in the image). This is the difference between *semantic label maps*, which have class labels but not instance labels, and *instance label maps*, which represent unique instances with unique numbers.\n\nThe authors found that the most important information in the instance lelab map is actually the boundaries between instances (i.e. the outline of each car). You can create boundary maps by mapping each pixel maps to a 1 if it's a different instance from its 4 neighbors, and 0 otherwise.\n\nTo include this information, the authors concatenate the boundary map with the semantic label map as input. From the figure below, you can see that including both as input results in much sharper generated images (right) than only inputting the semantic label map (left).\n\n> ![Semantic label map input vs instance boundary map input](https://github.com/https-deeplearning-ai/GANs-Public/blob/master/Pix2PixHD-Instance-Map.png?raw=true)\n![Semantic label map vs instance boundary map](https://github.com/https-deeplearning-ai/GANs-Public/blob/master/Pix2PixHD-Instance-Map-2.png?raw=true)\n*Semantic label map input (top left) and its blurry output between instances (bottom left) vs. instance boundary map (top right) and the much clearer output between instances from inputting both the semantic label map and the instance boundary map (bottom right). Taken from Figures 4 and 5 of [High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs](https://arxiv.org/abs/1711.11585) (Wang et al. 2018).*", "_____no_output_____" ], [ "## Instance-level Feature Encoder: Adding controllable diversity\n\nAs you already know, the task of generation has more than one possible realistic output. For example, an object of class `road` could be concrete, cobblestone, dirt, etc. To learn this diversity, the authors introduce an encoder $E$, which takes the original image as input and outputs a feature map (like the feature extractor from Course 2, Week 1). They apply *instance-wise averaging*, averaging the feature vectors across all occurrences of each instance (so that every pixel corresponding to the same instance has the same feature vector). They then concatenate this instance-level feature embedding with the semantic label and instance boundary maps as input to the generator.\n\nWhat's cool is that the encoder $E$ is trained jointly with $G_1$. One huge backprop! When training $G_2$, $E$ is fed a downsampled image and the corresponding output is upsampled to pass into $G_2$.\n\nTo allow for control over different features (e.g. concrete, cobblestone, and dirt) for inference, the authors first use K-means clustering to cluster all the feature vectors for each object class in the training set. You can think of this as a dictionary, mapping each class label to a set of feature vectors (so $K$ centroids, each representing different clusters of features). Now during inference, you can perform a random lookup from this dictionary for each class (e.g. road) in the semantic label map to generate one type of feature (e.g. dirt). To provide greater control, you can select among different feature types for each class to generate diverse feature types and, as a result, multi-modal outputs from the same input. \n\nHigher values of $K$ increase diversity and potentially decrease fidelity. You've seen this tradeoff between diversity and fidelity before with the truncation trick, and this is just another way to trade-off between them.\n", "_____no_output_____" ] ], [ [ "class Encoder(nn.Module):\n '''\n Encoder Class\n Values:\n in_channels: number of input channels to each discriminator, a scalar\n out_channels: number of channels in output feature map, a scalar\n base_channels: number of channels in first convolutional layer, a scalar\n n_layers: number of downsampling layers, a scalar\n '''\n\n def __init__(self, in_channels, out_channels, base_channels=16, n_layers=4):\n super().__init__()\n\n self.out_channels = out_channels\n channels = base_channels\n\n layers = [\n nn.ReflectionPad2d(3),\n nn.Conv2d(in_channels, base_channels, kernel_size=7, padding=0), \n nn.InstanceNorm2d(base_channels),\n nn.ReLU(inplace=True),\n ]\n\n # Downsampling layers\n for i in range(n_layers):\n layers += [\n nn.Conv2d(channels, 2 * channels, kernel_size=3, stride=2, padding=1),\n nn.InstanceNorm2d(2 * channels),\n nn.ReLU(inplace=True),\n ]\n channels *= 2\n \n # Upsampling layers\n for i in range(n_layers):\n layers += [\n nn.ConvTranspose2d(channels, channels // 2, kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.InstanceNorm2d(channels // 2),\n nn.ReLU(inplace=True),\n ]\n channels //= 2\n\n layers += [\n nn.ReflectionPad2d(3),\n nn.Conv2d(base_channels, out_channels, kernel_size=7, padding=0),\n nn.Tanh(),\n ]\n\n self.layers = nn.Sequential(*layers)\n\n def instancewise_average_pooling(self, x, inst):\n '''\n Applies instance-wise average pooling.\n\n Given a feature map of size (b, c, h, w), the mean is computed for each b, c\n across all h, w of the same instance\n '''\n x_mean = torch.zeros_like(x)\n classes = torch.unique(inst, return_inverse=False, return_counts=False) # gather all unique classes present\n\n for i in classes:\n for b in range(x.size(0)):\n indices = torch.nonzero(inst[b:b+1] == i, as_tuple=False) # get indices of all positions equal to class i\n for j in range(self.out_channels):\n x_ins = x[indices[:, 0] + b, indices[:, 1] + j, indices[:, 2], indices[:, 3]]\n mean_feat = torch.mean(x_ins).expand_as(x_ins)\n x_mean[indices[:, 0] + b, indices[:, 1] + j, indices[:, 2], indices[:, 3]] = mean_feat\n\n return x_mean\n\n def forward(self, x, inst):\n x = self.layers(x)\n x = self.instancewise_average_pooling(x, inst)\n return x", "_____no_output_____" ] ], [ [ "## Additional Loss Functions\n\nIn addition to the architectural and feature-map enhancements, the authors also incorporate a feature matching loss based on the discriminator. Essentially, they output intermediate feature maps at different resolutions from the discriminator and try to minimize the difference between the real and fake image features.\n\nThe authors found this to stabilize training. In this case, this forces the generator to produce natural statistics at multiple scales. This feature-matching loss is similar to StyleGAN's perceptual loss. For some semantic label map $s$ and corresponding image $x$,\n\n\\begin{align*}\n \\mathcal{L}_{\\text{FM}} = \\mathbb{E}_{s,x}\\left[\\sum_{i=1}^T\\dfrac{1}{N_i}\\left|\\left|D^{(i)}_k(s, x) - D^{(i)}_k(s, G(s))\\right|\\right|_1\\right]\n\\end{align*}\n\nwhere $T$ is the total number of layers, $N_i$ is the number of elements at layer $i$, and $D^{(i)}_k$ denotes the $i$th layer in discriminator $k$.\n\nThe authors also report minor improvements in performance when adding perceptual loss, formulated as\n\n\\begin{align*}\n \\mathcal{L}_{\\text{VGG}} = \\mathbb{E}_{s,x}\\left[\\sum_{i=1}^N\\dfrac{1}{M_i}\\left|\\left|F^i(x) - F^i(G(s))\\right|\\right|_1\\right]\n\\end{align*}\n\nwhere $F^i$ denotes the $i$th layer with $M_i$ elements of the VGG19 network. `torchvision` provides a pretrained VGG19 network, so you'll just need a simple wrapper for it to get the intermediate outputs.\n\nThe overall loss looks like this:\n\n\\begin{align*}\n \\mathcal{L} = \\mathcal{L}_{\\text{GAN}} + \\lambda_1\\mathcal{L}_{\\text{FM}} + \\lambda_2\\mathcal{L}_{\\text{VGG}}\n\\end{align*}\n\nwhere $\\lambda_1 = \\lambda_2 = 10$.", "_____no_output_____" ] ], [ [ "import torchvision.models as models\n\nclass VGG19(nn.Module):\n '''\n VGG19 Class\n Wrapper for pretrained torchvision.models.vgg19 to output intermediate feature maps\n '''\n\n def __init__(self):\n super().__init__()\n vgg_features = models.vgg19(pretrained=True).features\n\n self.f1 = nn.Sequential(*[vgg_features[x] for x in range(2)])\n self.f2 = nn.Sequential(*[vgg_features[x] for x in range(2, 7)])\n self.f3 = nn.Sequential(*[vgg_features[x] for x in range(7, 12)])\n self.f4 = nn.Sequential(*[vgg_features[x] for x in range(12, 21)])\n self.f5 = nn.Sequential(*[vgg_features[x] for x in range(21, 30)])\n\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, x):\n h1 = self.f1(x)\n h2 = self.f2(h1)\n h3 = self.f3(h2)\n h4 = self.f4(h3)\n h5 = self.f5(h4)\n return [h1, h2, h3, h4, h5]\n\nclass Loss(nn.Module):\n '''\n Loss Class\n Implements composite loss for GauGAN\n Values:\n lambda1: weight for feature matching loss, a float\n lambda2: weight for vgg perceptual loss, a float\n device: 'cuda' or 'cpu' for hardware to use\n norm_weight_to_one: whether to normalize weights to (0, 1], a bool\n '''\n\n def __init__(self, lambda1=10., lambda2=10., device='cuda', norm_weight_to_one=True):\n super().__init__()\n self.vgg = VGG19().to(device)\n self.vgg_weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]\n\n lambda0 = 1.0\n # Keep ratio of composite loss, but scale down max to 1.0\n scale = max(lambda0, lambda1, lambda2) if norm_weight_to_one else 1.0\n\n self.lambda0 = lambda0 / scale\n self.lambda1 = lambda1 / scale\n self.lambda2 = lambda2 / scale\n\n def adv_loss(self, discriminator_preds, is_real):\n '''\n Computes adversarial loss from nested list of fakes outputs from discriminator.\n '''\n target = torch.ones_like if is_real else torch.zeros_like\n\n adv_loss = 0.0\n for preds in discriminator_preds:\n pred = preds[-1]\n adv_loss += F.mse_loss(pred, target(pred))\n return adv_loss\n\n def fm_loss(self, real_preds, fake_preds):\n '''\n Computes feature matching loss from nested lists of fake and real outputs from discriminator.\n '''\n fm_loss = 0.0\n for real_features, fake_features in zip(real_preds, fake_preds):\n for real_feature, fake_feature in zip(real_features, fake_features):\n fm_loss += F.l1_loss(real_feature.detach(), fake_feature)\n return fm_loss\n\n def vgg_loss(self, x_real, x_fake):\n '''\n Computes perceptual loss with VGG network from real and fake images.\n '''\n vgg_real = self.vgg(x_real)\n vgg_fake = self.vgg(x_fake)\n\n vgg_loss = 0.0\n for real, fake, weight in zip(vgg_real, vgg_fake, self.vgg_weights):\n vgg_loss += weight * F.l1_loss(real.detach(), fake)\n return vgg_loss\n\n def forward(self, x_real, label_map, instance_map, boundary_map, encoder, generator, discriminator):\n '''\n Function that computes the forward pass and total loss for generator and discriminator.\n '''\n feature_map = encoder(x_real, instance_map)\n x_fake = generator(torch.cat((label_map, boundary_map, feature_map), dim=1))\n\n # Get necessary outputs for loss/backprop for both generator and discriminator\n fake_preds_for_g = discriminator(torch.cat((label_map, boundary_map, x_fake), dim=1))\n fake_preds_for_d = discriminator(torch.cat((label_map, boundary_map, x_fake.detach()), dim=1))\n real_preds_for_d = discriminator(torch.cat((label_map, boundary_map, x_real.detach()), dim=1))\n\n g_loss = (\n self.lambda0 * self.adv_loss(fake_preds_for_g, True) + \\\n self.lambda1 * self.fm_loss(real_preds_for_d, fake_preds_for_g) / discriminator.n_discriminators + \\\n self.lambda2 * self.vgg_loss(x_fake, x_real)\n )\n d_loss = 0.5 * (\n self.adv_loss(real_preds_for_d, True) + \\\n self.adv_loss(fake_preds_for_d, False)\n )\n\n return g_loss, d_loss, x_fake.detach()", "_____no_output_____" ] ], [ [ "## Training Pix2PixHD\n\nYou now have the Pix2PixHD model coded up! All you have to do now is prepare your dataset. Pix2PixHD is trained on the Cityscapes dataset, which unfortunately requires registration. You'll have to download the dataset and put it in your `data` folder to initialize the dataset code below.\n\nSpecifically, you should download the `gtFine_trainvaltest` and `leftImg8bit_trainvaltest` and specify the corresponding data splits into the dataloader.", "_____no_output_____" ] ], [ [ "import os\n\nimport numpy as np\nimport torchvision.transforms as transforms\nfrom PIL import Image\n\ndef scale_width(img, target_width, method):\n '''\n Function that scales an image to target_width while retaining aspect ratio.\n '''\n w, h = img.size\n if w == target_width: return img\n target_height = target_width * h // w\n return img.resize((target_width, target_height), method)\n\nclass CityscapesDataset(torch.utils.data.Dataset):\n '''\n CityscapesDataset Class\n Values:\n paths: (a list of) paths to load examples from, a list or string\n target_width: the size of image widths for resizing, a scalar\n n_classes: the number of object classes, a scalar\n '''\n\n def __init__(self, paths, target_width=1024, n_classes=35):\n super().__init__()\n\n self.n_classes = n_classes\n\n # Collect list of examples\n self.examples = {}\n if type(paths) == str:\n self.load_examples_from_dir(paths)\n elif type(paths) == list:\n for path in paths:\n self.load_examples_from_dir(path)\n else:\n raise ValueError('`paths` should be a single path or list of paths')\n\n self.examples = list(self.examples.values())\n assert all(len(example) == 3 for example in self.examples)\n\n # Initialize transforms for the real color image\n self.img_transforms = transforms.Compose([\n transforms.Lambda(lambda img: scale_width(img, target_width, Image.BICUBIC)),\n transforms.Lambda(lambda img: np.array(img)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n # Initialize transforms for semantic label and instance maps\n self.map_transforms = transforms.Compose([\n transforms.Lambda(lambda img: scale_width(img, target_width, Image.NEAREST)),\n transforms.Lambda(lambda img: np.array(img)),\n transforms.ToTensor(),\n ])\n\n def load_examples_from_dir(self, abs_path):\n '''\n Given a folder of examples, this function returns a list of paired examples.\n '''\n assert os.path.isdir(abs_path)\n\n img_suffix = '_leftImg8bit.png'\n label_suffix = '_gtFine_labelIds.png'\n inst_suffix = '_gtFine_instanceIds.png'\n\n for root, _, files in os.walk(abs_path):\n for f in files:\n if f.endswith(img_suffix):\n prefix = f[:-len(img_suffix)]\n attr = 'orig_img'\n elif f.endswith(label_suffix):\n prefix = f[:-len(label_suffix)]\n attr = 'label_map'\n elif f.endswith(inst_suffix):\n prefix = f[:-len(inst_suffix)]\n attr = 'inst_map'\n else:\n continue\n\n if prefix not in self.examples.keys():\n self.examples[prefix] = {}\n self.examples[prefix][attr] = root + '/' + f\n\n def __getitem__(self, idx):\n example = self.examples[idx]\n\n # Load image and maps\n img = Image.open(example['orig_img']).convert('RGB') # color image: (3, 512, 1024)\n inst = Image.open(example['inst_map']) # instance map: (512, 1024)\n label = Image.open(example['label_map']) # semantic label map: (512, 1024)\n\n # Apply corresponding transforms\n img = self.img_transforms(img)\n inst = self.map_transforms(inst)\n label = self.map_transforms(label).long() * 255\n\n # Convert labels to one-hot vectors\n label = torch.zeros(self.n_classes, img.shape[1], img.shape[2]).scatter_(0, label, 1.0).to(img.dtype)\n\n # Convert instance map to instance boundary map\n bound = torch.ByteTensor(inst.shape).zero_()\n bound[:, :, 1:] = bound[:, :, 1:] | (inst[:, :, 1:] != inst[:, :, :-1])\n bound[:, :, :-1] = bound[:, :, :-1] | (inst[:, :, 1:] != inst[:, :, :-1])\n bound[:, 1:, :] = bound[:, 1:, :] | (inst[:, 1:, :] != inst[:, :-1, :])\n bound[:, :-1, :] = bound[:, :-1, :] | (inst[:, 1:, :] != inst[:, :-1, :])\n bound = bound.to(img.dtype)\n\n return (img, label, inst, bound)\n\n def __len__(self):\n return len(self.examples)\n\n @staticmethod\n def collate_fn(batch):\n imgs, labels, insts, bounds = [], [], [], []\n for (x, l, i, b) in batch:\n imgs.append(x)\n labels.append(l)\n insts.append(i)\n bounds.append(b)\n return (\n torch.stack(imgs, dim=0),\n torch.stack(labels, dim=0),\n torch.stack(insts, dim=0),\n torch.stack(bounds, dim=0),\n )", "_____no_output_____" ], [ "from tqdm import tqdm\nfrom torch.utils.data import DataLoader\n\nn_classes = 35 # total number of object classes\nrgb_channels = n_features = 3\ndevice = 'cuda'\ntrain_dir = ['cityscapes_data/train']\nepochs = 200 # total number of train epochs\ndecay_after = 100 # number of epochs with constant lr\nlr = 0.0002\nbetas = (0.5, 0.999)\n\ndef lr_lambda(epoch):\n ''' Function for scheduling learning '''\n return 1. if epoch < decay_after else 1 - float(epoch - decay_after) / (epochs - decay_after)\n\ndef weights_init(m):\n ''' Function for initializing all model weights '''\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n nn.init.normal_(m.weight, 0., 0.02)\n\nloss_fn = Loss(device=device)\n\ndataloader2 = DataLoader(\n CityscapesDataset(train_dir, target_width=2048, n_classes=n_classes),\n collate_fn=CityscapesDataset.collate_fn, batch_size=1, shuffle=True, drop_last=False, pin_memory=True,\n)", "_____no_output_____" ] ], [ [ "Now initialize everything you'll need for training. Don't be worried if there looks like a lot of random code, it's all stuff you've seen before!", "_____no_output_____" ] ], [ [ "from tqdm import tqdm\nfrom torch.utils.data import DataLoader\n\nn_classes = 35 # total number of object classes\nrgb_channels = n_features = 3\ndevice = 'cuda'\ntrain_dir = ['data']\nepochs = 200 # total number of train epochs\ndecay_after = 100 # number of epochs with constant lr\nlr = 0.0002\nbetas = (0.5, 0.999)\n\ndef lr_lambda(epoch):\n ''' Function for scheduling learning '''\n return 1. if epoch < decay_after else 1 - float(epoch - decay_after) / (epochs - decay_after)\n\ndef weights_init(m):\n ''' Function for initializing all model weights '''\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n nn.init.normal_(m.weight, 0., 0.02)\n\nloss_fn = Loss(device=device)\n\n## Phase 1: Low Resolution (1024 x 512)\ndataloader1 = DataLoader(\n CityscapesDataset(train_dir, target_width=1024, n_classes=n_classes),\n collate_fn=CityscapesDataset.collate_fn, batch_size=1, shuffle=True, drop_last=False, pin_memory=True,\n)\nencoder = Encoder(rgb_channels, n_features).to(device).apply(weights_init)\ngenerator1 = GlobalGenerator(n_classes + n_features + 1, rgb_channels).to(device).apply(weights_init)\ndiscriminator1 = MultiscaleDiscriminator(n_classes + 1 + rgb_channels, n_discriminators=2).to(device).apply(weights_init)\n\ng1_optimizer = torch.optim.Adam(list(generator1.parameters()) + list(encoder.parameters()), lr=lr, betas=betas)\nd1_optimizer = torch.optim.Adam(list(discriminator1.parameters()), lr=lr, betas=betas)\ng1_scheduler = torch.optim.lr_scheduler.LambdaLR(g1_optimizer, lr_lambda)\nd1_scheduler = torch.optim.lr_scheduler.LambdaLR(d1_optimizer, lr_lambda)\n\n\n## Phase 2: High Resolution (2048 x 1024)\ndataloader2 = DataLoader(\n CityscapesDataset(train_dir, target_width=2048, n_classes=n_classes),\n collate_fn=CityscapesDataset.collate_fn, batch_size=1, shuffle=True, drop_last=False, pin_memory=True,\n)\ngenerator2 = LocalEnhancer(n_classes + n_features + 1, rgb_channels).to(device).apply(weights_init)\ndiscriminator2 = MultiscaleDiscriminator(n_classes + 1 + rgb_channels).to(device).apply(weights_init)\n\ng2_optimizer = torch.optim.Adam(list(generator2.parameters()) + list(encoder.parameters()), lr=lr, betas=betas)\nd2_optimizer = torch.optim.Adam(list(discriminator2.parameters()), lr=lr, betas=betas)\ng2_scheduler = torch.optim.lr_scheduler.LambdaLR(g2_optimizer, lr_lambda)\nd2_scheduler = torch.optim.lr_scheduler.LambdaLR(d2_optimizer, lr_lambda)", "_____no_output_____" ] ], [ [ "And now the training loop, which is pretty much the same between the two phases:", "_____no_output_____" ] ], [ [ "from torchvision.utils import make_grid\nimport matplotlib.pyplot as plt\n\n# Parse torch version for autocast\n# ######################################################\nversion = torch.__version__\nversion = tuple(int(n) for n in version.split('.')[:-1])\nhas_autocast = version >= (1, 6)\n# ######################################################\n\ndef show_tensor_images(image_tensor):\n '''\n Function for visualizing images: Given a tensor of images, number of images, and\n size per image, plots and prints the images in an uniform grid.\n '''\n image_tensor = (image_tensor + 1) / 2\n image_unflat = image_tensor.detach().cpu()\n image_grid = make_grid(image_unflat[:1], nrow=1)\n plt.imshow(image_grid.permute(1, 2, 0).squeeze())\n plt.show()\n\ndef train(dataloader, models, optimizers, schedulers, device):\n encoder, generator, discriminator = models\n g_optimizer, d_optimizer = optimizers\n g_scheduler, d_scheduler = schedulers\n\n cur_step = 0\n display_step = 100\n\n mean_g_loss = 0.0\n mean_d_loss = 0.0\n\n for epoch in range(epochs):\n # Training epoch\n for (x_real, labels, insts, bounds) in tqdm(dataloader, position=0):\n x_real = x_real.to(device)\n labels = labels.to(device)\n insts = insts.to(device)\n bounds = bounds.to(device)\n\n # Enable autocast to FP16 tensors (new feature since torch==1.6.0)\n # If you're running older versions of torch, comment this out\n # and use NVIDIA apex for mixed/half precision training\n if has_autocast:\n with torch.cuda.amp.autocast(enabled=(device=='cuda')):\n g_loss, d_loss, x_fake = loss_fn(\n x_real, labels, insts, bounds, encoder, generator, discriminator\n )\n else:\n g_loss, d_loss, x_fake = loss_fn(\n x_real, labels, insts, bounds, encoder, generator, discriminator\n )\n\n g_optimizer.zero_grad()\n g_loss.backward()\n g_optimizer.step()\n\n d_optimizer.zero_grad()\n d_loss.backward()\n d_optimizer.step()\n\n mean_g_loss += g_loss.item() / display_step\n mean_d_loss += d_loss.item() / display_step\n\n if cur_step % display_step == 0 and cur_step > 0:\n print('Step {}: Generator loss: {:.5f}, Discriminator loss: {:.5f}'\n .format(cur_step, mean_g_loss, mean_d_loss))\n show_tensor_images(x_fake.to(x_real.dtype))\n show_tensor_images(x_real)\n mean_g_loss = 0.0\n mean_d_loss = 0.0\n cur_step += 1\n\n g_scheduler.step()\n d_scheduler.step()", "_____no_output_____" ] ], [ [ "And now you can train your models! Remember to set the local enhancer subgenerator to the global subgenerator that you train in the first phase.\n\nIn their official repository, the authors don't continue to train the encoder. Instead, they precompute all feature maps upsample them, and concatenate this to the input to the local enhancer subgenerator. (They also leave a re-train option for it). For simplicity, the script below will just downsample and upsample high-resolution inputs.", "_____no_output_____" ] ], [ [ "# Phase 1: Low Resolution\n#######################################################################\ntrain(\n dataloader1,\n [encoder, generator1, discriminator1],\n [g1_optimizer, d1_optimizer],\n [g1_scheduler, d1_scheduler],\n device,\n)\n\n\n# Phase 2: High Resolution\n#######################################################################\n# Update global generator in local enhancer with trained\ngenerator2.g1 = generator1.g1\n\n# Freeze encoder and wrap to support high-resolution inputs/outputs\ndef freeze(encoder):\n encoder.eval()\n for p in encoder.parameters():\n p.requires_grad = False\n\n @torch.jit.script\n def forward(x, inst):\n x = F.interpolate(x, scale_factor=0.5, recompute_scale_factor=True)\n inst = F.interpolate(inst.float(), scale_factor=0.5, recompute_scale_factor=True)\n feat = encoder(x, inst.int())\n return F.interpolate(feat, scale_factor=2.0, recompute_scale_factor=True)\n return forward\n\ntrain(\n dataloader2,\n [freeze(encoder), generator2, discriminator2],\n [g2_optimizer, d2_optimizer],\n [g2_scheduler, d2_scheduler],\n device,\n)", "_____no_output_____" ] ], [ [ "## Inference with Pix2PixHD\n\nRecall that in inference time, the encoder feature maps from training are saved and clustered with K-means by object class. Again, you'll have to download the Cityscapes dataset into your `data` folder and then run these functions.", "_____no_output_____" ] ], [ [ "from sklearn.cluster import KMeans\n\n# Encode features by class label\nfeatures = {}\nfor (x, _, inst, _) in tqdm(dataloader2):\n x = x.to(device)\n inst = inst.to(device)\n area = inst.size(2) * inst.size(3)\n\n # Get pooled feature map\n with torch.no_grad():\n feature_map = encoder(x, inst)\n\n for i in torch.unique(inst):\n label = i if i < 1000 else i // 1000\n label = int(label.flatten(0).item())\n\n # All indices should have same feature per class from pooling\n idx = torch.nonzero(inst == i, as_tuple=False)\n n_inst = idx.size(0)\n idx = idx[0, :]\n\n # Retrieve corresponding encoded feature\n feature = feature_map[idx[0], :, idx[2], idx[3]].unsqueeze(0)\n\n # Compute rate of feature appearance (in official code, they compute per block)\n block_size = 32\n rate_per_block = 32 * n_inst / area\n rate = torch.ones((1, 1), device=device).to(feature.dtype) * rate_per_block\n\n feature = torch.cat((feature, rate), dim=1)\n if label in features.keys():\n features[label] = torch.cat((features[label], feature), dim=0)\n else:\n features[label] = feature\n\n\n# Cluster features by class label\nk = 10\ncentroids = {}\nfor label in range(n_classes):\n if label not in features.keys():\n continue\n feature = features[label]\n\n # Thresholding by 0.5 isn't mentioned in the paper, but is present in the\n # official code repository, probably so that only frequent features are clustered\n feature = feature[feature[:, -1] > 0.5, :-1].cpu().numpy()\n\n if feature.shape[0]:\n n_clusters = min(feature.shape[0], k)\n kmeans = KMeans(n_clusters=n_clusters).fit(feature)\n centroids[label] = kmeans.cluster_centers_", "100%|██████████| 174/174 [02:07<00:00, 1.36it/s]\n" ] ], [ [ "After getting the encoded feature centroids per class, you can now run inference! Remember that the generator is trained to take in a concatenation of the semantic label map, instance boundary map, and encoded feature map.\n\nCongrats on making it to the end of this complex notebook! Have fun with this powerful model and be responsible of course ;)", "_____no_output_____" ] ], [ [ "def infer(label_map, instance_map, boundary_map):\n # Sample feature vector centroids\n b, _, h, w = label_map.shape\n feature_map = torch.zeros((b, n_features, h, w), device=device).to(label_map.dtype)\n\n for i in torch.unique(instance_map):\n label = i if i < 1000 else i // 1000\n label = int(label.flatten(0).item())\n\n if label in centroids.keys():\n centroid_idx = random.randint(0, centroids[label].shape[0] - 1)\n idx = torch.nonzero(instance_map == int(i), as_tuple=False)\n\n feature = torch.from_numpy(centroids[label][centroid_idx, :]).to(device)\n feature_map[idx[:, 0], :, idx[:, 2], idx[:, 3]] = feature\n\n with torch.no_grad():\n x_fake = generator2(torch.cat((label_map, boundary_map, feature_map), dim=1))\n return x_fake\n\nfor x, labels, insts, bounds in dataloader2:\n x_fake = infer(labels.to(device), insts.to(device), bounds.to(device))\n show_tensor_images(x_fake.to(x.dtype))\n show_tensor_images(x)\n break", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0aa1c60acbcdb14257d7ce2142447127d264b82
585,154
ipynb
Jupyter Notebook
notebooks/Calibration - Figure.ipynb
XinweiYu/noise2self
04e0379a67e1cb0c807abd3f8d4fd1666db5a793
[ "MIT" ]
null
null
null
notebooks/Calibration - Figure.ipynb
XinweiYu/noise2self
04e0379a67e1cb0c807abd3f8d4fd1666db5a793
[ "MIT" ]
null
null
null
notebooks/Calibration - Figure.ipynb
XinweiYu/noise2self
04e0379a67e1cb0c807abd3f8d4fd1666db5a793
[ "MIT" ]
null
null
null
516.464254
220,136
0.944664
[ [ [ "# Calibrating Traditional Methods", "_____no_output_____" ], [ "In this notebook, we show how to calibrate a $\\mathcal{J}$-invariant denoiser, and compare its performance\nwith the original denoiser.", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2\n%matplotlib inline", "_____no_output_____" ], [ "import sys\nsys.path.append(\"..\")", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom skimage.morphology import disk\nfrom skimage.filters import gaussian, median\nfrom skimage import data, img_as_float, img_as_ubyte\nfrom skimage.color import gray2rgb\nfrom skimage.util import random_noise", "_____no_output_____" ], [ "from skimage.measure import compare_ssim as ssim\nfrom skimage.measure import compare_psnr as psnr\nfrom skimage.measure import compare_mse as mse\nfrom util import plot_grid, plot_images, expand", "_____no_output_____" ], [ "plt.rc('figure', figsize = (5,5))", "_____no_output_____" ], [ "show = lambda x: plt.imshow(x, cmap=plt.cm.gray)", "_____no_output_____" ], [ "image = data.camera()\nshow(image)", "_____no_output_____" ], [ "np.random.seed(3)\nnoisy_image = img_as_ubyte(random_noise(image, mode = 'gaussian', var=0.01))\nshow(noisy_image)", "/Users/josh/anaconda3/envs/pytorch/lib/python3.6/site-packages/skimage/util/dtype.py:141: UserWarning: Possible precision loss when converting from float64 to uint8\n .format(dtypeobj_in, dtypeobj_out))\n" ] ], [ [ "We begin by comparing an ordinary median filter to a \"donut\" median filter, with the center removed.", "_____no_output_____" ] ], [ [ "def mask_center(x):\n x[len(x)//2,len(x)//2] = 0\n return x\nplot_images([1-disk(4), 1-mask_center(disk(4))])", "_____no_output_____" ], [ "cm = plt.get_cmap(\"tab10\")\norange_regular_disk = (1 - disk(4))[:,:,np.newaxis] + (disk(4))[:,:,np.newaxis]*np.array(cm(1)[:-1])[np.newaxis, np.newaxis]\nblue_donut_disk = (1 - mask_center(disk(4)))[:,:,np.newaxis] + (mask_center(disk(4)))[:,:,np.newaxis]*np.array(cm(0)[:-1])[np.newaxis, np.newaxis]", "_____no_output_____" ], [ "plt.imsave(dir + 'regular_disk.png', expand(orange_regular_disk, 5))\nplt.imsave(dir + 'donut_disk.png', expand(blue_donut_disk, 5))", "_____no_output_____" ], [ "radii = range(1, 7)\nmask_med = np.array([median(noisy_image, mask_center(disk(i))) for i in radii])\nmed = np.array([median(noisy_image, disk(i)) for i in radii])", "_____no_output_____" ], [ "def stats(im_list, noisy_img, img):\n #img is the ground truth\n img = img_as_float(img)\n noisy_img = img_as_float(noisy_img)\n im_list = [img_as_float(x) for x in im_list]\n \n loss = [mse(x, noisy_img) for x in im_list]\n mse_gt = [mse(x, img) for x in im_list]\n psnr_gt = [psnr(x, img) for x in im_list]\n \n return loss, mse_gt, psnr_gt", "_____no_output_____" ], [ "loss_med, mse_med, psnr_med = stats(med, noisy_image, image)\nloss_mask_med, mse_mask_med, psnr_mask_med = stats(mask_med, noisy_image, image)", "_____no_output_____" ], [ "opt = radii[np.argmin(loss_mask_med)]\nprint(opt)", "3\n" ], [ "plt.figure(figsize=(7,5))\n\nplt.plot(radii, loss_mask_med, label = 'masked median -- noisy input', color = 'C0')\nplt.plot(radii, loss_med, label = 'median -- noisy input', color = 'C1')\n\nplt.axvline(radii[np.argmin(loss_mask_med)], color='k', linestyle='--')\nplt.title('Calibrating a Median Filter')\n\nplt.plot(radii, mse_mask_med, label = 'masked median -- ground truth', color = 'C0', linestyle='--')\nplt.plot(radii, mse_med, label = 'median -- ground truth', color = 'C1', linestyle='--')\nplt.ylabel('MSE')\nplt.xlabel('Radius of Median Filter')\n\nplt.yticks([0.002, 0.012])\nplt.ylim(0, 0.0143)\nplt.legend(loc='center right')\n\nplt.savefig(dir + 'median_filter.pdf')", "_____no_output_____" ], [ "plt.figure(figsize=(7,5))\n\nplt.plot(radii, loss_mask_med, label = 'masked median -- noisy input', color = 'C0')\nplt.plot(radii, loss_med, label = 'median -- noisy input', color = 'C1')\n\nplt.axvline(radii[np.argmin(loss_mask_med)], color='k', linestyle='--')\n\nplt.plot(radii, mse_mask_med, label = 'masked median -- ground truth', color = 'C0', linestyle='--')\nplt.plot(radii, mse_med, label = 'median -- ground truth', color = 'C1', linestyle='--')\n\nplt.yticks([0.002, 0.012])\nplt.ylim(0, 0.0143)\n\n# Hide the right and top spines\nplt.gca().spines['right'].set_visible(False)\nplt.gca().spines['top'].set_visible(False)\n\nplt.savefig(dir + 'median_filter_bare.pdf')", "_____no_output_____" ], [ "inset_x_min = 100\ninset_x_max = 160\ninset_y_min = 230\ninset_y_max = 290\n\nget_inset = lambda x: x[inset_x_min:inset_x_max, inset_y_min:inset_y_max]\n\nplt.imsave(dir + 'camera_noisy.png', get_inset(noisy_image), cmap = 'Greys_r')\nplt.imsave(dir + 'camera_clean.png', get_inset(image), cmap = 'Greys_r')\nfor i in range(len(mask_med)):\n plt.imsave(dir + 'camera_median_' + str(radii[i]) + '.png', get_inset(mask_med[i]), cmap = 'Greys_r')", "_____no_output_____" ] ], [ [ "# Conversion to J-invariance", "_____no_output_____" ], [ "Let $f$ be a classical denoiser, and consider some partition $\\mathcal{J}$ of the pixels. Let $s(x)$ be the \nfunction replacing each pixel with the average of its neighbors. Then the function $g$ defined by\n\n$g(x)_J := f_\\theta(\\mathbf{1}_{J}\\cdot s(x) + \\mathbf{1}_{J^c}\\cdot x)_J,$\n\nwill be $J$-invariant for each $J \\in \\mathcal{J}$.\n\nBelow, we implement this in a functional way: given a denoiser, the `invariant_denoise` is the appropriate invariant denoiser.", "_____no_output_____" ] ], [ [ "from scipy.signal import convolve2d\ndef interpolate_image(x, conv_filter=None):\n # use the mean of 4-connected neighbor to filter the image.\n if conv_filter is None:\n conv_filter = np.array([[0, 0.25, 0], [0.25, 0, 0.25], [0, 0.25, 0]])\n return convolve2d(x, conv_filter, mode = 'same')\ndef generate_mask(shape, idx, width=3):\n m = np.zeros(shape)\n # get x and y index from a single index.\n phasex = idx % width\n phasey = (idx // width) % width\n \n m[phasex::width, phasey::width] = 1\n return m\ndef invariant_denoise(img, width, denoiser):\n # denoiser is the f shown above, should be a function. \n \n # number of all pixels in a block\n n_masks = width*width\n # the interpolation image \n interp = interpolate_image(img)\n # Initialize output image.\n output = np.zeros(img.shape)\n \n for i in range(n_masks):\n # for each i there is a mask for masking every ith pixel in block\n m = generate_mask(img.shape, i, width=width)\n #1𝐽⋅𝑠(𝑥)+1𝐽𝑐⋅𝑥\n input_image = m*interp + (1 - m)*img\n input_image = input_image.astype(img.dtype)\n #𝑓𝜃(1𝐽⋅𝑠(𝑥)+1𝐽𝑐⋅𝑥)𝐽\n output += m*denoiser(input_image)\n return output", "_____no_output_____" ] ], [ [ "## Wavelet", "_____no_output_____" ] ], [ [ "from skimage.restoration import denoise_wavelet", "_____no_output_____" ], [ "sigma_range = np.arange(0.08, 0.3, 0.03)", "_____no_output_____" ], [ "reconstructions = [denoise_wavelet(noisy_image, sigma = sigma, mode='hard', multichannel = False)\n for sigma in sigma_range]", "_____no_output_____" ], [ "reconstructions_mask = [reconstruct(noisy_image, 4, lambda x: \n denoise_wavelet(x, sigma = sigma, mode='hard', multichannel = False))\n for sigma in sigma_range]", "_____no_output_____" ], [ "loss_wavelet, mse_wavelet, psnr_wavelet = stats(reconstructions, noisy_image, image)\nloss_mask_wavelet, mse_mask_wavelet, psnr_mask_wavelet = stats(reconstructions_mask, noisy_image, image)", "_____no_output_____" ], [ "plt.plot(sigma_range, psnr_wavelet)", "_____no_output_____" ], [ "opt = sigma_range[np.argmin(loss_mask_wavelet)]", "_____no_output_____" ], [ "plt.figure(figsize=(7,5))\n\nplt.plot(sigma_range, loss_mask_wavelet, label = 'masked wavelet -- noisy data', color = 'C0')\nplt.plot(sigma_range, loss_wavelet, label = 'wavelet -- noisy data', color = 'C1')\n\nplt.axvline(sigma_range[np.argmin(loss_mask_wavelet)], color='k', linestyle='--')\nplt.title('Calibrating Wavelet Denoiser')\n\nplt.plot(sigma_range, mse_mask_wavelet, label = 'masked wavelet -- ground truth', color = 'C0', linestyle='--')\nplt.plot(sigma_range, mse_wavelet, label = 'wavelet -- ground truth', color = 'C1', linestyle='--')\nplt.ylabel('MSE')\nplt.xlabel('Sigma Threshold')\nplt.yticks([0.002, 0.012])\nplt.ylim(0, 0.0143)\n\nplt.legend(loc='center right')\n\nplt.savefig(dir + 'wavelet_filter.pdf')", "_____no_output_____" ], [ "plt.figure(figsize=(7,5))\n\nplt.plot(sigma_range, loss_mask_wavelet, label = 'masked wavelet -- noisy data', color = 'C0')\nplt.plot(sigma_range, loss_wavelet, label = 'wavelet -- noisy data', color = 'C1')\n\nplt.axvline(sigma_range[np.argmin(loss_mask_wavelet)], color='k', linestyle='--')\n\nplt.plot(sigma_range, mse_mask_wavelet, label = 'masked wavelet -- ground truth', color = 'C0', linestyle='--')\nplt.plot(sigma_range, mse_wavelet, label = 'wavelet -- ground truth', color = 'C1', linestyle='--')\n\nplt.yticks([0.002, 0.012])\nplt.ylim(0, 0.0143)\n\n\n# Hide the right and top spines\nplt.gca().spines['right'].set_visible(False)\nplt.gca().spines['top'].set_visible(False)\n\nplt.savefig(dir + 'wavelet_filter_bare.pdf')", "_____no_output_____" ] ], [ [ "### Basic run of NL-Means", "_____no_output_____" ], [ "## NL-Means", "_____no_output_____" ] ], [ [ "from skimage.restoration import denoise_nl_means, estimate_sigma\n# non-local mean denoise.\nsigma_est = np.mean(estimate_sigma(noisy_image, multichannel=False))\nprint(sigma_est/255)\n\npatch_kw = dict(patch_size=5, # 5x5 patches\n patch_distance=6, # 13x13 search area\n multichannel=True)\n\nh_suggested = 0.8 * sigma_est\n\ndenoise_fast = denoise_nl_means(noisy_image, h=h_suggested, fast_mode=True,\n **patch_kw)\npsnr(denoise_fast.astype(np.uint8), image)", "0.0930814453935413\n" ], [ "h_range = sigma_est*np.arange(0.5, 2.0, 0.2)", "_____no_output_____" ], [ "reconstructions_nl = [denoise_nl_means(noisy_image, h=h, fast_mode=True,\n **patch_kw)/255 for h in h_range]\n\nreconstructions_nl_mask = [reconstruct(noisy_image, 4, lambda x: denoise_nl_means(x, h=h, fast_mode=True,\n **patch_kw))/255 for h in h_range]", "_____no_output_____" ], [ "loss_nl, mse_nl, psnr_nl = stats(reconstructions_nl, noisy_image, image)\nloss_mask_nl, mse_mask_nl, psnr_mask_nl = stats(reconstructions_nl_mask, noisy_image, image)", "_____no_output_____" ], [ "plt.plot(h_range, psnr_nl)", "_____no_output_____" ], [ "opt = sigma_range[np.argmin(loss_mask_nl)]", "_____no_output_____" ], [ "plt.figure(figsize=(7,5))\n\nplt.plot(h_range, loss_mask_nl, label = 'masked NL-means -- noisy data', color = 'C0')\nplt.plot(h_range, loss_nl, label = 'NL-means -- noisy data', color = 'C1')\n\nplt.axvline(h_range[np.argmin(loss_mask_nl)], color='k', linestyle='--')\nplt.title('Calibrating NL-means')\n\nplt.plot(h_range, mse_mask_nl, label = 'masked NL-means -- ground truth', color = 'C0', linestyle='--')\nplt.plot(h_range, mse_nl, label = 'NL-means -- ground truth', color = 'C1', linestyle='--')\nplt.ylabel('MSE')\nplt.xlabel('Cut-off Distance')\n\nplt.legend(loc='center right')\nplt.yticks([0.002, 0.012])\nplt.ylim(0, 0.0143)\n\nplt.savefig(dir + 'nl-means_filter.pdf')", "_____no_output_____" ], [ "plt.figure(figsize=(7,5))\n\nplt.plot(h_range, loss_mask_nl, label = 'masked NL-means -- noisy data', color = 'C0')\nplt.plot(h_range, loss_nl, label = 'NL-means -- noisy data', color = 'C1')\n\nplt.axvline(h_range[np.argmin(loss_mask_nl)], color='k', linestyle='--')\n\nplt.plot(h_range, mse_mask_nl, label = 'masked NL-means -- ground truth', color = 'C0', linestyle='--')\nplt.plot(h_range, mse_nl, label = 'NL-means -- ground truth', color = 'C1', linestyle='--')\n\nplt.yticks([0.002, 0.012])\nplt.ylim(0, 0.0143)\n\n\n# Hide the right and top spines\nplt.gca().spines['right'].set_visible(False)\nplt.gca().spines['top'].set_visible(False)\n\nplt.savefig(dir + 'nl-means_filter_bare.pdf')", "_____no_output_____" ] ], [ [ "# Which method is best?", "_____no_output_____" ] ], [ [ "min(loss_mask_nl), min(loss_mask_wavelet), min(loss_mask_med)", "_____no_output_____" ], [ "min(mse_mask_nl), min(mse_mask_wavelet), min(mse_mask_med)", "_____no_output_____" ], [ "max(psnr_mask_nl), max(psnr_mask_wavelet), max(psnr_mask_med)", "_____no_output_____" ], [ "show(get_inset(reconstructions_nl_mask[np.argmin(loss_mask_nl)]))", "_____no_output_____" ], [ "show(get_inset(reconstructions_mask[np.argmin(loss_mask_wavelet)]))", "_____no_output_____" ] ], [ [ "## Shrinkage", "_____no_output_____" ], [ "Given two uncorrelated and unbiased estimators $u$ and $v$ of some quantity $y$, we may form a linear combination:\n \n$\\lambda u + (1 - \\lambda)v.$\n\nThe variance of this estimator is\n\n$\\lambda^2 U + (1 - \\lambda)^2 V,$\n\nwhere $U$ and $V$ are the variances of $u$ and $v$ respectively. This expression is minimized at\n\n$\\lambda = V/(U + V).$\n\nThe variance of the result is $UV/(U+V) = V\\frac{1}{1+V/U}$. When $V$ is the term with lowest variance, here we can lower it by an amount depending on $V/U$. When the variance of $v$ is much lower than that of $u$, we just get $V$ out, but when they are the same our variance is exactly halved. Note that this is monotonic in $V$, so when comparing methods, mixing in the original signal will not change their order. In terms of PSNR, the new value is\n\n$10*\\log_{10}(\\frac{1+V/U}{V}) = \\operatorname{PSNR}(V) + 10*\\log_{10}(1 + V/U) \\approx \\operatorname{PSNR}(V) + 10/\\log_{10}(e) (\\frac{V}{U} - \\frac{1}{2}(\\frac{V}{U})^2) = \\operatorname{PSNR}(V) + 4.34 \\frac{V}{U}$\n\nIf we fix $y$, then $x_j$ and $\\mathbb{E} y_j|x_{-j}$ are both independent estimators of $y_j$, so the above reasoning applies. Note that the loss is the variance of $x_j|x_{-j}$, whose two components are the variance of $x_j|y_j$ and the variance of $y_j|x_{-j}$.\n\nIf we know the distribution of the noise, ie, we know the variance of $x_j|y_j$, then we can compute the variance of $y_j|x_{-j}$ by subtracting it from the variance of $x_j|x_{-j}$, aka, from the value of the loss. That will provide us with the optimal $\\lambda$ to use.", "_____no_output_____" ] ], [ [ "log10 x = log e x / log e 10", "_____no_output_____" ], [ "image_float = img_as_float(image)\nnoisy_image_float = img_as_float(noisy_image)", "_____no_output_____" ], [ "noise_var = mse(noisy_image_float, image_float)", "_____no_output_____" ], [ "idx = np.argmin(loss_mask_wavelet)\ny_pred = reconstructions_mask[idx]\ntotal_var = loss_mask_wavelet[idx]", "_____no_output_____" ], [ "idx = np.argmin(loss_mask_nl)\ny_pred = reconstructions_nl_mask[idx]\ntotal_var = loss_mask_nl[idx]", "_____no_output_____" ], [ "idx = np.argmin(loss_mask_med)\ny_pred = img_as_float(med[idx])\ntotal_var = loss_mask_med[idx]", "_____no_output_____" ], [ "for mode, recons, loss in [('median', mask_med, loss_mask_med),\n ('wavelet', reconstructions_mask, loss_mask_wavelet),\n ('nl_means', reconstructions_nl_mask, loss_mask_nl)]:\n\n print(\"mode: \", mode)\n idx = np.argmin(loss)\n y_pred = img_as_float(recons[idx])\n total_var = loss[idx]\n\n lam = noise_var/total_var\n improved_prediction = lam*y_pred + (1 - lam)*noisy_image_float\n print(\"Lambda weighting: \", np.round(lam, 2))\n print(\"Approx change in PSNR: \", np.round(4.34*(total_var - noise_var)/noise_var, 2))\n print(\"Loss: \", total_var.round(4))\n print(\"Original PSNR: \", psnr(y_pred, image_float).round(1))\n print(\"New PSNR: \", psnr(improved_prediction, image_float).round(1))\n print('---------------')", "_____no_output_____" ], [ "4.34*(total_var - noise_var)/noise_var", "_____no_output_____" ], [ "psnr(noisy_image_float, image_float), psnr(y_pred, image_float), psnr(improved_prediction, image_float)", "_____no_output_____" ], [ "plot_images([get_inset(noisy_image),\n get_inset(y_pred), \n get_inset(improved_prediction),\n get_inset(image)])", "_____no_output_____" ], [ "ssim(noisy_image_float, image_float), ssim(y_pred, image_float), ssim(improved_prediction, image_float)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0aa225e1c588ee396cfb5b9a1bf8afaa19bc3da
2,343
ipynb
Jupyter Notebook
MWE/Pathes.ipynb
thorade/jupyterNotebooks
8466465809a406e4729ce3479be629028aa1f0f5
[ "MIT" ]
1
2019-07-08T03:13:40.000Z
2019-07-08T03:13:40.000Z
MWE/Pathes.ipynb
thorade/jupyterNotebooks
8466465809a406e4729ce3479be629028aa1f0f5
[ "MIT" ]
null
null
null
MWE/Pathes.ipynb
thorade/jupyterNotebooks
8466465809a406e4729ce3479be629028aa1f0f5
[ "MIT" ]
1
2019-10-19T01:30:04.000Z
2019-10-19T01:30:04.000Z
20.734513
70
0.514725
[ [ [ "import os,sys\ntry:\n osenv_pypath = os.environ['PYTHONPATH'].split(os.pathsep)\n osenv_path = os.environ['PATH'].split(os.pathsep)\n sys_path = sys.path.split(os.pathsep)\nexcept KeyError:\n osenv_pypath = []\n osenv_path = []\n sys_path = []\n\nprint(osenv_pypath)\nprint(osenv_path)\nprint(sys_path)\n", "_____no_output_____" ], [ "import os\nprint(os.name)\n\nimport platform\nprint(platform.system())\nprint(platform.release())\n\nimport sys\nif sys.platform.lower().startswith('linux'):\n print(sys.platform)\nelif sys.platform.lower().startswith('win'):\n print(sys.platform)\n print(sys.version)\nelse:\n pass", "_____no_output_____" ], [ "import os\nkey = 'Python'\nenvpath = os.getenv('PATH')\nenvpaths = envpath.split(';')\nprint(envpath)\n# path_Python = next((s for s in envpaths if key in s), None)\n# print(path_Python)", "_____no_output_____" ], [ "import os, sys\nprint(sys.version)\nprint(sys.executable)\nprint(os.__file__)\nprint(sys.path)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
d0aa22d02f4624613069046ddcc82e6bc9a561ae
8,534
ipynb
Jupyter Notebook
stock_predictor.ipynb
pathompong-y/stock_predictor
28502ffaad9b8b3c2e7db9b712587a31f37544a9
[ "MIT" ]
2
2020-05-16T10:52:08.000Z
2021-06-11T16:53:14.000Z
stock_predictor.ipynb
pathompong-y/stock_predictor
28502ffaad9b8b3c2e7db9b712587a31f37544a9
[ "MIT" ]
null
null
null
stock_predictor.ipynb
pathompong-y/stock_predictor
28502ffaad9b8b3c2e7db9b712587a31f37544a9
[ "MIT" ]
3
2020-06-26T23:33:57.000Z
2021-04-02T10:26:03.000Z
45.636364
390
0.575932
[ [ [ "#Stock Price Predictor\n\nThis is a Jupyter notebook that you can use to get prediction of adjusted close stock price per the specified day range after the last day from the training data set. The prediction is made by training the machine learning model with historical trade of the stock data. This is the result of study from the following notebook - https://github.com/pathompong-y/stock_predictor. \n\nTo use this notebook, please follow this setup instruction.", "_____no_output_____" ], [ "##Setup Instructions\n\n1. Download `stock_predictor.ipynb` and `stock_predictor.py` from https://github.com/pathompong-y/stock_predictor.\n2. Go to https://colab.research.google.com and go to file and upload new notebook. Upload stock_predictor.ipynb.\n3. Upload `stock_predictor.py` to Files panel by drag and drop from your local computer to the root/outmost level folder.\n4. Follow how to use to do train the model and do prediction.", "_____no_output_____" ], [ "##How to use\n\nProvide input into the code cell below per this instruction.\n\n1. At `stock_list`, Provide the list of stock symbol separated by space. Make sure that the symbol is searchable on Yahoo Finance - https://finance.yahoo.com/.\n2. At `training_date_start_date` and `training_data_end_date`, specify the start date and end date for historical data of the stock to train the model. The date format is DD/MM/YYYY.\n\n3. Push \"Play\" button at the cell upper left corner (or alt+enter / cmd+enter). Please wait until you see \"Completed\" message. For one stock, it could take up to 15 minutes.", "_____no_output_____" ] ], [ [ "stock_list = 'ASK.BK GOOGL' \ntraining_data_start_date = '08/05/2000'\ntraining_data_end_date = '13/05/2020' \n\n# ------ DO NOT CHANGE CODE BELOW THIS LINE --------\n\n!pip install yfinance\nimport yfinance as yf\nimport os,sys\nsys.path.append(os.path.abspath(\"/content/stock_predictor.py\"))\nfrom stock_predictor import *\ntrain_model(stock_list,training_data_start_date,training_data_end_date)", "Collecting yfinance\n Downloading https://files.pythonhosted.org/packages/c2/31/8b374a12b90def92a4e27d0fc595fc43635f395984e36a075244d98bd265/yfinance-0.1.54.tar.gz\nRequirement already satisfied: pandas>=0.24 in /usr/local/lib/python3.6/dist-packages (from yfinance) (1.0.3)\nRequirement already satisfied: numpy>=1.15 in /usr/local/lib/python3.6/dist-packages (from yfinance) (1.18.4)\nRequirement already satisfied: requests>=2.20 in /usr/local/lib/python3.6/dist-packages (from yfinance) (2.23.0)\nRequirement already satisfied: multitasking>=0.0.7 in /usr/local/lib/python3.6/dist-packages (from yfinance) (0.0.9)\nRequirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24->yfinance) (2.8.1)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24->yfinance) (2018.9)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (2020.4.5.1)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (2.9)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (1.24.3)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.6.1->pandas>=0.24->yfinance) (1.12.0)\nBuilding wheels for collected packages: yfinance\n Building wheel for yfinance (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for yfinance: filename=yfinance-0.1.54-py2.py3-none-any.whl size=22409 sha256=46496be0c7ad98bc777515cec38d9321435301ade2d3256ba54bdfbdba03e2a8\n Stored in directory: /root/.cache/pip/wheels/f9/e3/5b/ec24dd2984b12d61e0abf26289746c2436a0e7844f26f2515c\nSuccessfully built yfinance\nInstalling collected packages: yfinance\nSuccessfully installed yfinance-0.1.54\n" ] ], [ [ "##How to use (cont.)\n\n4. You can query for the predicted stock price by adding the list of stock symbol in `query_list`. The symbol must be subset of `stock_list` that you provided in step 1.\n\n5. `prediction_range` is the day range of price prediction after `end_date` in step 2. For example, if `end_date` is 15/05/2020 and `prediction_range` is 5. You will get the prediction for 5 days after 15/05/2020.\n\n6. Push \"Play\" button at the cell upper left corner (or alt+enter / cmd+enter). You will get the predicted price (Adjusted Close) with mean squared error rate of prediction.", "_____no_output_____" ] ], [ [ "query_list = 'ASK.BK GOOGL'\nprediction_range = 5\n\n# ------ DO NOT CHANGE CODE BELOW THIS LINE --------\n\nquery_price(query_list,prediction_range)", "ASK.BK price prediction for 5 days : [16.706566 16.835562 16.86699 16.988611 16.975838]\nMean square error = [2.76239613] %\nGOOGL price prediction for 5 days : [1341.0127 1340.9868 1347.199 1344.8762 1349.1017]\nMean square error = [268.90700931] %\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0aa2ebce545b2f3eb16c154b3e928018f67966a
20,173
ipynb
Jupyter Notebook
Bronze/python/Python12_Basics_Loops.ipynb
SevdanurGENC/Quantum-Programming-With-Qiskit
61896b1ea135498b0b8c7b3c270547d382851de1
[ "MIT" ]
2
2022-03-30T04:56:20.000Z
2022-03-30T04:56:34.000Z
Bronze/python/Python12_Basics_Loops.ipynb
SevdanurGENC/Quantum-Programming-With-Qiskit
61896b1ea135498b0b8c7b3c270547d382851de1
[ "MIT" ]
null
null
null
Bronze/python/Python12_Basics_Loops.ipynb
SevdanurGENC/Quantum-Programming-With-Qiskit
61896b1ea135498b0b8c7b3c270547d382851de1
[ "MIT" ]
1
2021-09-17T18:04:05.000Z
2021-09-17T18:04:05.000Z
27.041555
309
0.501809
[ [ [ "<a href=\"https://qworld.net\" target=\"_blank\" align=\"left\"><img src=\"../qworld/images/header.jpg\" align=\"left\"></a>\n$ \\newcommand{\\bra}[1]{\\langle #1|} $\n$ \\newcommand{\\ket}[1]{|#1\\rangle} $\n$ \\newcommand{\\braket}[2]{\\langle #1|#2\\rangle} $\n$ \\newcommand{\\dot}[2]{ #1 \\cdot #2} $\n$ \\newcommand{\\biginner}[2]{\\left\\langle #1,#2\\right\\rangle} $\n$ \\newcommand{\\mymatrix}[2]{\\left( \\begin{array}{#1} #2\\end{array} \\right)} $\n$ \\newcommand{\\myvector}[1]{\\mymatrix{c}{#1}} $\n$ \\newcommand{\\myrvector}[1]{\\mymatrix{r}{#1}} $\n$ \\newcommand{\\mypar}[1]{\\left( #1 \\right)} $\n$ \\newcommand{\\mybigpar}[1]{ \\Big( #1 \\Big)} $\n$ \\newcommand{\\sqrttwo}{\\frac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\dsqrttwo}{\\dfrac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\onehalf}{\\frac{1}{2}} $\n$ \\newcommand{\\donehalf}{\\dfrac{1}{2}} $\n$ \\newcommand{\\hadamard}{ \\mymatrix{rr}{ \\sqrttwo & \\sqrttwo \\\\ \\sqrttwo & -\\sqrttwo }} $\n$ \\newcommand{\\vzero}{\\myvector{1\\\\0}} $\n$ \\newcommand{\\vone}{\\myvector{0\\\\1}} $\n$ \\newcommand{\\stateplus}{\\myvector{ \\sqrttwo \\\\ \\sqrttwo } } $\n$ \\newcommand{\\stateminus}{ \\myrvector{ \\sqrttwo \\\\ -\\sqrttwo } } $\n$ \\newcommand{\\myarray}[2]{ \\begin{array}{#1}#2\\end{array}} $\n$ \\newcommand{\\X}{ \\mymatrix{cc}{0 & 1 \\\\ 1 & 0} } $\n$ \\newcommand{\\I}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & 1} } $\n$ \\newcommand{\\Z}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & -1} } $\n$ \\newcommand{\\Htwo}{ \\mymatrix{rrrr}{ \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} } } $\n$ \\newcommand{\\CNOT}{ \\mymatrix{cccc}{1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\\\ 0 & 0 & 0 & 1 \\\\ 0 & 0 & 1 & 0} } $\n$ \\newcommand{\\norm}[1]{ \\left\\lVert #1 \\right\\rVert } $\n$ \\newcommand{\\pstate}[1]{ \\lceil \\mspace{-1mu} #1 \\mspace{-1.5mu} \\rfloor } $\n$ \\newcommand{\\greenbit}[1] {\\mathbf{{\\color{green}#1}}} $\n$ \\newcommand{\\bluebit}[1] {\\mathbf{{\\color{blue}#1}}} $\n$ \\newcommand{\\redbit}[1] {\\mathbf{{\\color{red}#1}}} $\n$ \\newcommand{\\brownbit}[1] {\\mathbf{{\\color{brown}#1}}} $\n$ \\newcommand{\\blackbit}[1] {\\mathbf{{\\color{black}#1}}} $", "_____no_output_____" ], [ "<font style=\"font-size:28px;\" align=\"left\"><b> Basics of Python: Loops </b></font>\n<br>\n_prepared by Abuzer Yakaryilmaz_\n<br><br>", "_____no_output_____" ], [ "We review using loops in Python here. \n\nRun each cell and check the results.", "_____no_output_____" ], [ "<h3> For-loop </h3>", "_____no_output_____" ] ], [ [ "# let's print all numbers between 0 and 9\nfor i in range(10): print(i)\n# range(n) represents the list of all numbers from 0 to n-1\n# i is the variable to take the values in the range(n) iteratively: 0,1,...,9 in our example", "0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n" ], [ "# let's write the same code in two lines\nfor i in range(10): # do not forget to use colon\n print(i)\n # the second line is indented\n # this means that the command in the second line will be executed inside the for-loop\n # any other code executed inside the for-loop must be intented in the same way\n #my_code_inside_for-loop_2 will come here\n #my_code_inside_for-loop_3 will come here\n #my_code_inside_for-loop_4 will come here\n# now I am out of the scope of for-loop\n#my_code_outside_for-loop_1 will come here\n#my_code_outside_for-loop_2 will come here", "0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n" ], [ "# let's calculate the summation 1+2+...+10 by using a for-loop\n\n# we use variable total for the total summation\ntotal = 0 \nfor i in range(11): # do not forget to use colon\n total = total + i # the value of total is increased by i in each iteration\n # alternatively, the same assignment can shortly be written as total += i similarly to the languages C, C++, Java, etc.\n# now I am out of the scope of for-loop\n# let's print the final value of total\nprint(total)", "55\n" ], [ "# let's calculate the summation 10+12+14+...+44\n# we create a list having all numbers in the summation\n# for this purpose, this time we will use three parameters in range\ntotal = 0\nfor j in range(10,45,2): # the range is defined between 10 and 44, and the value of j will be increased by 2 after each iteration\n total += j # let's use the shortened version of total = total + j this time\nprint(total)", "486\n" ], [ "# let's calculate the summation 1+2+4+8+16+...+256\n# remark that 256 = 2*2*...*2 (8 times)\ntotal = 0\ncurrent_number = 1 # this value will be multiplied by 2 after each iteration\nfor k in range(9):\n total = total + current_number # current_number is 1 at the beginning, and its value will be doubled after each iteration\n current_number = 2 * current_number # let's double the value of the current_number for the next iteration\n # short version of the same assignment: current_number *= 2 as in the languages C, C++, Java, etc.\n# now I am out of the scope of for-loop\n# let's print the latest value of total\nprint(total)", "511\n" ], [ "# instead of range, we may also directly use a list if it is short\nfor i in [1,10,100,1000,10000]:\n print(i)", "1\n10\n100\n1000\n10000\n" ], [ "# instead of [...], we may also use (...)\n# but this time it is a tuple, not a list (keep in your mind that the values in a tuple cannot be changed)\nfor i in (1,10,100,1000,10000):\n print(i)", "_____no_output_____" ], [ "# let's create a range between 10 and 91 that contains the multiples of 7\nfor j in range(14,92,7): \n # 14 is the first multiple of 7 greater than or equal to 10; so we should start with 14\n # 91 should be in the range, and so we end the range with 92\n print(j)", "14\n21\n28\n35\n42\n49\n56\n63\n70\n77\n84\n91\n" ], [ "# let's create a range between 11 and 22\nfor i in range(11,23):\n print(i)", "11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n" ], [ "# we can also use variables in range\nn = 5\nfor j in range(n,2*n): \n print(j) # we will print all numbers in {n,n+1,n+2,...,2n-1}", "5\n6\n7\n8\n9\n" ], [ "# we can use a list of strings\nfor name in (\"Asja\",\"Balvis\",\"Fyodor\"):\n print(\"Hello\",name,\":-)\")", "Hello Asja :-)\nHello Balvis :-)\nHello Fyodor :-)\n" ], [ "# any range indeed returns a list\nL1 = list(range(10))\nprint(L1)\n\nL2 = list(range(55,200,11))\nprint(L2)", "[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n[55, 66, 77, 88, 99, 110, 121, 132, 143, 154, 165, 176, 187, 198]\n" ] ], [ [ "<h3> Task 1 </h3>\n\nCalculate the value of summation $ 3+6+9+\\cdots+51 $, and then print the result.\n\nYour result should be 459.", "_____no_output_____" ] ], [ [ "#\n# your solution is here\n#\n", "_____no_output_____" ] ], [ [ "<a href=\"Python12_Basics_Loops_Solutions.ipynb#task1\">click for our solution</a>", "_____no_output_____" ], [ "<h3> Task 2 </h3>\n\n$ 3^k $ means $ 3 \\cdot 3 \\cdot \\cdots \\cdot 3 $ ($ k $ times) for $ k \\geq 2 $. \n\nMoreover, $ 3^0 $ is 1 and $ 3^1 = 3 $.\n\nCalculate the value of summation $ 3^0 + 3^1 + 3^2 + \\cdots + 3^8 $, and then print the result.\n\nYour result should be 9841.", "_____no_output_____" ] ], [ [ "#\n# your solution is here\n#\n", "_____no_output_____" ] ], [ [ "<a href=\"Python12_Basics_Loops_Solutions.ipynb#task2\">click for our solution</a>", "_____no_output_____" ], [ "<h3> While-loop </h3>", "_____no_output_____" ] ], [ [ "# let's calculate the summation 1+2+4+8+...+256 by using a while-loop\ntotal = 0\ni = 1\n\n#while condition(s):\n# your_code1\n# your_code2\n# your_code3\nwhile i < 257: # this loop iterates as long as i is less than 257\n total = total + i\n i = i * 2 # i is doubled in each iteration, and so soon it will be greater than 256\n \nprint(total)\n# we do the same summation by using for-loop above", "511\n" ], [ "L = [0,1,2,3,4,5,11] # this is a list containing 7 integer values\ni = 0\nwhile i in L: # this loop will be iterated as long as i is in L\n print(i)\n i = i + 1 # the value of i iteratively increased, and so soon it will hit a value not in the list L\n \n# the loop is terminated after i is set to 6, because 6 is not in L", "_____no_output_____" ], [ "# let's use negation in the condition of while-loop\nL = [10] # this list has a single element\ni = 0\nwhile i not in L: # this loop will be iterated as long as i is not equal to 10\n print(i)\n i = i+1 # the value of i will hit 10 after ten iterations\n ", "0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n" ], [ "# let's rewrite the same loop by using a direct inequality\ni = 0\nwhile i != 10: # \"!=\" is used for operator \"not equal to\" \n print(i) \n i=i+1", "_____no_output_____" ], [ "# let's rewrite the same loop by using negation of equality\ni = 0\nwhile not (i == 10): # \"==\" is used for operator \"equal to\" \n print(i) \n i=i+1\n \n# while-loop seems having more fun :-)\n# but we should be more careful when writing the condition(s)!", "_____no_output_____" ] ], [ [ "Consider the summation $ S(n) = 1+ 2+ 3 + \\cdots + n $ for some natural number $ n $.\n\nLet's find the minimum value of $ n $ such that $ S(n) \\geq 1000 $.\n\nWhile-loop works very well for this task.\n<ul>\n <li>We can iteratively increase $ n $ and update the value of $ S(n) $.</li>\n <li>The loop iterates as long as $S(n)$ is less than 1000.</li>\n <li>Once it hits 1000 or a greater number, the loop will be terminated.</li>\n</ul>", "_____no_output_____" ] ], [ [ "# summation and n are zeros at the beginning\nS = 0\nn = 0\nwhile S < 1000: # this loop will stop after S exceeds 999 (S = 1000 or S > 1000)\n n = n +1\n S = S + n\n# let's print n and S\nprint(\"n =\",n,\" S =\",S)", "n = 45 S = 1035\n" ] ], [ [ "<h3> Task 3 </h3>\n\nConsider the summation $ T(n) = 1 + \\dfrac{1}{2} + \\dfrac{1}{4}+ \\dfrac{1}{8} + \\cdots + \\dfrac{1}{2^n} $ for some natural number $ n $. \n\nRemark that $ T(0) = \\dfrac{1}{2^0} = \\dfrac{1}{1} = 1 $.\n\nThis summation can be arbitrarily close to $2$. \n\nFind the minimum value of $ n $ such that $ T(n) $ is close to $2$ by $ 0.01 $, i.e., $ 2 - T(n) < 0.01 $.\n\nIn other words, we find the minimum value of $n$ such that $ T(n) > 1.99 $.\n\nThe operator for \"less than or equal to\" in python is \"$ < = $\".", "_____no_output_____" ] ], [ [ "# three examples for the operator \"less than or equal to\"\n#print (4 <= 5)\n#print (5 <= 5)\n#print (6 <= 5)\n# you may comment out the above three lines and see the results by running this cell\n\n#\n# your solution is here\n#\n", "_____no_output_____" ] ], [ [ "<a href=\"Python12_Basics_Loops_Solutions.ipynb#task3\">click for our solution</a>", "_____no_output_____" ], [ "<h3> Task 4 </h3>\n\nRandomly pick number(s) between 0 and 9 until hitting 3, and then print the number of attempt(s).\n\nWe can use <i>randrange</i> function from <i>random</i> module for randomly picking a number in the given range.", "_____no_output_____" ] ], [ [ "# this is the code for including function randrange into our program \nfrom random import randrange\n# randrange(n) picks a number from the list [0,1,2,...,n-1] randomly\n#r = randrange(100)\n#print(r)\n\n#\n# your solution is here\n#\n", "_____no_output_____" ] ], [ [ "<a href=\"Python12_Basics_Loops_Solutions.ipynb#task4\">click for our solution</a>", "_____no_output_____" ], [ "<h3> Task 5 </h3>\n\nThis task is challenging . \n\nIt is designed for the usage of double nested loops: one loop inside of the other loop.\n\nIn the fourth task above, the expected number of attempt(s) to hit number 3 is 10. \n\nDo a series of experiments by using your solution for Task 4.\n\nExperiment 1: Execute your solution 20 times, and then calculate the average attempts.\n\nExperiment 2: Execute your solution 200 times, and then calculate the average attempts.\n\nExperiment 3: Execute your solution 2000 times, and then calculate the average attempts.\n\nExperiment 4: Execute your solution 20000 times, and then calculate the average attempts.\n\nExperiment 5: Execute your solution 200000 times, and then calculate the average attempts.\n\n<i>Your experimental average sgould get closer to 10 when the number of executions is increased.</i>\n\nRemark that all five experiments may also be automatically done by using triple loops.", "_____no_output_____" ], [ "<a href=\"Python12_Basics_Loops_Solutions.ipynb#task5\">click for our solution</a>", "_____no_output_____" ] ], [ [ "# here is a schematic example for double nested loops\n#for i in range(10):\n# your_code1\n# your_code2\n# while j != 7:\n# your_code_3\n# your_code_4\n\n#\n# your solution is here\n#\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ] ]
d0aa31b56def6095608dee7ca9e22c755c34a4c3
25,324
ipynb
Jupyter Notebook
tdx/test-tdx.ipynb
dizzy21c/easyqtrs
4704674d2175d40afdc306afd8a002a486c83220
[ "MIT" ]
2
2021-12-30T13:43:13.000Z
2022-01-23T13:39:54.000Z
tdx/test-tdx.ipynb
dizzy21c/easyqtrs
4704674d2175d40afdc306afd8a002a486c83220
[ "MIT" ]
null
null
null
tdx/test-tdx.ipynb
dizzy21c/easyqtrs
4704674d2175d40afdc306afd8a002a486c83220
[ "MIT" ]
4
2021-10-22T01:44:47.000Z
2022-01-05T05:49:20.000Z
26.940426
145
0.413797
[ [ [ "import os\nimport struct\nimport pandas as pd\nimport numpy as np\nimport talib as tdx", "_____no_output_____" ], [ "def readTdxLdayFile(fname=\"data/sh000001.day\"):\n dataSet=[]\n with open(fname,'rb') as fl:\n buffer=fl.read() #读取数据到缓存\n size=len(buffer) \n rowSize=32 #通信达day数据,每32个字节一组数据\n code=os.path.basename(fname).replace('.day','')\n for i in range(0,size,rowSize): #步长为32遍历buffer\n row=list( struct.unpack('IIIIIfII',buffer[i:i+rowSize]) )\n row[1]=row[1]/100\n row[2]=row[2]/100\n row[3]=row[3]/100\n row[4]=row[4]/100\n row.pop() #移除最后无意义字段\n row.insert(0,code)\n dataSet.append(row) \n\n data=pd.DataFrame(data=dataSet,columns=['code','tradeDate','open','high','low','close','amount','vol'])\n data=data.set_index(['tradeDate'])\n return code, data\n", "_____no_output_____" ], [ "def select1(code, data):\n # 连续三日缩量\n cn = data.close.iloc[-1]\n# df=pd.concat([tdx.MA(data.close, x) for x in (5,10,20,30,60,90,120,250,500,750,1000,1500,2000,2500,) ], axis = 1).dropna()[-1:]\n df=pd.concat([tdx.MA(data.close, x) for x in (5,10,20,30,60,90,120,250,500,750,1000,1500,2000,2500,) ], axis = 1)[-1:]\n df.columns = [u'm5',u'm10',u'm20',u'm30',u'm60',u'm90',u'm120', u'm250', u'm500', u'm750', u'm1000', u'm1500', u'm2000', u'm2500'] \n df_c2 = df.m5 > df.m10\n df_c1 = cn > df.m5\n df_c = cn > df.m5\n df_h = df.apply(lambda x:cn > x.max() , axis = 1 )\n# df_l = df.apply(lambda x:x.min() >= cl, axis = 1 )\n \n df['dfh'] = df_h\n df['dfc2'] = df_c2\n df['dfc1'] = df_c1\n df['code'] =code\n# out=df.iloc[-1].apply(lambda x: True if x>cl and x < ch else False)\n df=df.reset_index('tradeDate')\n df=df.set_index(['code','tradeDate'])\n return df\n", "_____no_output_____" ], [ "from threading import Thread, current_thread, Lock\nimport multiprocessing #import Pool, cpu_count, Queue", "_____no_output_____" ], [ "def asyncCalc(fname, queue):\n code, df = readTdxLdayFile(fname)\n queue.put(select1(code, df))\n \n", "_____no_output_____" ], [ "def readPath(path):\n files = os.listdir(path)\n # codes=[]\n q = multiprocessing.Queue()\n jobs = []\n # dataSet=[]multiprocessing\n pool_size = multiprocessing.cpu_count()\n pool = multiprocessing.Pool(pool_size)\n output=pd.DataFrame()\n for i in range(0,len(files)):\n fname = os.path.join(path,files[i])\n if os.path.isdir(fname):\n continue\n pool.apply_async(asyncCalc, args=(fname))\n p = multiprocessing.Process(target=asyncCalc, args=(fname, q))\n jobs.append(p)\n p.start()\n \n for p in jobs:\n p.join()\n\n for j in jobs:\n t = q.get()\n if t is not None:\n output=output.append(t)\n return output\n", "_____no_output_____" ], [ "output=readPath('/tmp/easyquant/tdx/data') #读取目录下面的所有文件", "_____no_output_____" ], [ "output", "_____no_output_____" ], [ "code, data = readTdxLdayFile('/tmp/easyquant/tdx/data/sh000001.day')", "_____no_output_____" ], [ "select1(code,data)", "_____no_output_____" ], [ "code", "_____no_output_____" ], [ "data=df", "_____no_output_____" ], [ "cn = data.close.iloc[-1]", "_____no_output_____" ], [ "cn=cn+1000", "_____no_output_____" ], [ "df=pd.concat([tdx.MA(data.close, x) for x in (5,10,20,30,60,90,120,250,500,750,1000,21500,20000,25000,) ], axis = 1)[-1:]", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.columns = [u'm5',u'm10',u'm20',u'm30',u'm60',u'm90',u'm120', u'm250', u'm500', u'm750', u'm1000', u'm1500', u'm2000', u'm2500']", "_____no_output_____" ], [ "df_c = df.m5 > df.m10\ndf_c1 = cn > df.m5\ndf_h = df.apply(lambda x:cn > x.max() , axis = 1 )", "_____no_output_____" ], [ "df_h", "_____no_output_____" ], [ "df_h", "_____no_output_____" ], [ "da=data_df.reset_index('tradeDate')", "_____no_output_____" ], [ "df_c1", "_____no_output_____" ], [ "import datetime", "_____no_output_____" ], [ "pd.to_datetime(da.tradeDate)", "_____no_output_____" ], [ "# data_df.to_csv('test.csv')", "_____no_output_____" ], [ "data_df.index[,-1:-1]", "_____no_output_____" ], [ "def select1(code,data):\n # 连续三日缩量\n ch= data.close.iloc[-1] * 1.1\n cl= data.close.iloc[-1] * 0.9\n# ch= data.close * 1.1\n# cl = data.close * 0.9\n \n df=pd.concat([tdx.MA(data.close, x) for x in (5,10,20,30,60,90,120,250) ], axis = 1).dropna()[-1:]\n df.columns = [u'm5',u'm10',u'm20',u'm30',u'm60',u'm90',u'm120', u'm250'] \n df_h = df.apply(lambda x:x.max() <= ch, axis = 1 )\n df_l = df.apply(lambda x:x.min() >= cl, axis = 1 )\n \n df['dfh'] = df_h\n df['dfl'] = df_l\n df['code'] =code\n# out=df.iloc[-1].apply(lambda x: True if x>cl and x < ch else False)\n df=df.reset_index('tradeDate')\n df=df.set_index(['code','tradeDate'])\n return df\n", "_____no_output_____" ], [ "bbb=select1('sh000001',data_df.loc['sh000001',])", "_____no_output_____" ], [ "bbb", "_____no_output_____" ], [ "bbb=bbb.set_index(['code','tradeDate'])", "_____no_output_____" ], [ "data=bbb.set_index(['code','tradeDate'])", "_____no_output_____" ], [ "output=None\nfor code in codes:\n aaa=data_df.loc[code,]\n out=select1(code, aaa)\n if output is None:\n output = out\n else:\n# print(code)\n output=output.append(out)", "_____no_output_____" ], [ "output", "_____no_output_____" ], [ "output.query('dfh==True and dfl==True').to_csv('out1.csv')", "_____no_output_____" ], [ "bb=select1('000001',aaa)", "_____no_output_____" ], [ "type(bb)", "_____no_output_____" ], [ "import talib as tdx", "_____no_output_____" ], [ "aaa=pd.read_csv('test.csv')", "_____no_output_____" ], [ "aaa.set_index('vol').sort_index()", "_____no_output_____" ], [ "df=readTdxLdayFile()\ndf['mon'] = df.tradeDate.apply(lambda x : str(x)[0:6])\ndf=df.set_index(['tradeDate'])", "_____no_output_____" ], [ "dfmax=df.groupby(['mon']).apply(lambda x: x[x.high ==x.high.max()])\ndfmax.drop_duplicates(subset=['high','mon'],keep='first',inplace=True)\ndfmin=df.groupby(['mon']).apply(lambda x: x[x.low ==x.low.min()])\ndfmin.drop_duplicates(subset=['low','mon'],keep='first',inplace=True)", "_____no_output_____" ], [ "dfmax.to_csv('max.csv')", "_____no_output_____" ], [ "dfmin.to_csv('min.csv')", "_____no_output_____" ], [ "dfmax", "_____no_output_____" ], [ "for x in dfmax.index:\n print(df.loc[x[1]])", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0aa398181436ab89895a37c92fdcf6beeb54bd1
6,650
ipynb
Jupyter Notebook
training/test_dac.ipynb
heymesut/SJTU_microe
7a862d03b4d8fe4c8608173a16082f44001f3f13
[ "BSD-3-Clause" ]
1
2022-03-08T16:20:51.000Z
2022-03-08T16:20:51.000Z
training/test_dac.ipynb
heymesut/SJTU_microe
7a862d03b4d8fe4c8608173a16082f44001f3f13
[ "BSD-3-Clause" ]
null
null
null
training/test_dac.ipynb
heymesut/SJTU_microe
7a862d03b4d8fe4c8608173a16082f44001f3f13
[ "BSD-3-Clause" ]
1
2022-03-21T08:09:27.000Z
2022-03-21T08:09:27.000Z
39.583333
156
0.544211
[ [ [ "import argparse\n\nimport torch.distributed as dist\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\n\nimport test # import test.py to get mAP after each epoch\nfrom models import *\nfrom utils.datasets import *\nfrom utils.utils import *\n\nfrom mymodel import *\n\n# Hyperparameters (results68: 59.9 [email protected] yolov3-spp-416) https://github.com/ultralytics/yolov3/issues/310\n\nhyp = {'giou': 3.54, # giou loss gain\n 'cls': 37.4, # cls loss gain\n 'cls_pw': 1.0, # cls BCELoss positive_weight\n 'obj': 64.3, # obj loss gain (*=img_size/320 if img_size != 320)\n 'obj_pw': 1.0, # obj BCELoss positive_weight\n 'iou_t': 0.225, # iou training threshold\n 'lr0': 0.01, # initial learning rate (SGD=5E-3, Adam=5E-4)\n 'lrf': -4., # final LambdaLR learning rate = lr0 * (10 ** lrf)\n 'momentum': 0.937, # SGD momentum\n 'weight_decay': 0.000484, # optimizer weight decay\n 'fl_gamma': 0.5, # focal loss gamma\n 'hsv_h': 0.0138, # image HSV-Hue augmentation (fraction)\n 'hsv_s': 0.678, # image HSV-Saturation augmentation (fraction)\n 'hsv_v': 0.36, # image HSV-Value augmentation (fraction)\n 'degrees': 1.98, # image rotation (+/- deg)\n 'translate': 0.05, # image translation (+/- fraction)\n 'scale': 0.05, # image scale (+/- gain)\n 'shear': 0.641} # image shear (+/- deg)\n", "_____no_output_____" ], [ "parser = argparse.ArgumentParser()\n\nparser.add_argument('--batch-size', type=int, default=16) # effective bs = batch_size * accumulate = 16 * 4 = 64\nparser.add_argument('--accumulate', type=int, default=4, help='batches to accumulate before optimizing')\nparser.add_argument('--cfg', type=str, default='cfg/yolov3-tiny-1cls_1.cfg', help='*.cfg path')\nparser.add_argument('--data', type=str, default='data/coco2017.data', help='*.data path')\nparser.add_argument('--img-size', nargs='+', type=int, default=[320], help='train and test image-sizes')\nparser.add_argument('--rect', action='store_true', help='rectangular training')\nparser.add_argument('--cache-images', action='store_true', help='cache images for faster training')\nparser.add_argument('--weights', type=str, default='/home/denggc/DAC2021/dgc/April/ultra_bypass/weights/test_best.pt', help='initial weights path')\nparser.add_argument('--arc', type=str, default='default', help='yolo architecture') # default, uCE, uBCE\nparser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')\nparser.add_argument('--device', default='1', help='device id (i.e. 0 or 0,1 or cpu)')\nparser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')\nparser.add_argument('--var', type=float, help='debug variable')\nopt = parser.parse_known_args()[0]\nprint(opt)\nprint(opt.weights)\ndevice = torch_utils.select_device(opt.device, batch_size=opt.batch_size)\nprint(device)\n\n\nimg_size, img_size_test = opt.img_size if len(opt.img_size) == 2 else opt.img_size * 2 # train, test sizes\nbatch_size = opt.batch_size\naccumulate = opt.accumulate # effective bs = batch_size * accumulate = 16 * 4 = 64\nweights = opt.weights # initial training weights\n\n\ntest_path = '../DAC-SDC2021/dataset/sample'\nnc = 1 ", "_____no_output_____" ], [ "model = UltraNet_Bypass().to(device)\nmodel.hyp = hyp\nmodel.nc = 1\nmodel.arc = 'default'", "_____no_output_____" ], [ "if weights.endswith('.pt'): # pytorch format\n # possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc.\n print(\"load weights...\")\n model.load_state_dict(torch.load(weights, map_location=device)['model'])", "_____no_output_____" ], [ "batch_size = min(batch_size, 1)\nnw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers\ndataset = LoadImagesAndLabels(test_path, img_size_test, batch_size * 2,\n hyp=hyp,\n rect=False,\n cache_images=opt.cache_images,\n single_cls=opt.single_cls)\ntestloader = torch.utils.data.DataLoader(dataset,\n batch_size=batch_size * 2,\n num_workers=nw,\n pin_memory=True,\n collate_fn=dataset.collate_fn)", "_____no_output_____" ], [ "results = test.test(opt.cfg,\n opt.data,\n batch_size=batch_size * 2,\n img_size=img_size_test,\n model=model,\n conf_thres=0.001, # 0.001 if opt.evolve or (final_epoch and is_coco) else 0.01,\n iou_thres=0.6,\n save_json=False,\n single_cls=opt.single_cls,\n dataloader=testloader)\nprint(results)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
d0aa4e4d797bd3792b989d8355ff3d585e856626
30,636
ipynb
Jupyter Notebook
old/.ipynb_checkpoints/Histogram2-checkpoint.ipynb
albarrom/GII_O_MA_21.05
c3b8563f0ed20f4ef7e045ce08add84bf91595ed
[ "BSD-3-Clause" ]
null
null
null
old/.ipynb_checkpoints/Histogram2-checkpoint.ipynb
albarrom/GII_O_MA_21.05
c3b8563f0ed20f4ef7e045ce08add84bf91595ed
[ "BSD-3-Clause" ]
16
2022-03-14T22:23:24.000Z
2022-03-31T17:14:45.000Z
old/Histogram2.ipynb
albarrom/GII_O_MA_21.05
c3b8563f0ed20f4ef7e045ce08add84bf91595ed
[ "BSD-3-Clause" ]
null
null
null
41.852459
142
0.37012
[ [ [ "# [Histogram](https://plotly.com/python/histograms/)\n\n## 1. importar las librerías + csv con los datos de la encuesta. ", "_____no_output_____" ] ], [ [ "# importar librerias\n\nimport pandas as pd\nimport plotly.express as px \nfrom dash import Dash, dcc, html, Input, Output\n\n\n#crear un dataframe con toda la informacion de la encuesta\ndf_csv = pd.read_csv ('../data/survey_results_public2021.csv', index_col = [0]) # El indice sera la columna con el ID de la respuesta\ndf_csv #mostrar df ()", "_____no_output_____" ] ], [ [ "## 2. Preprocesar datos.\n\nTratar las columnas/conjunto de datos para comenzar a crear los gráficos. En este caso Age1stcode", "_____no_output_____" ] ], [ [ "df_csv['Age1stCode'].value_counts() ", "_____no_output_____" ] ], [ [ "Para lidiar con rangos de edades, algunos de los cuales tienen texto, se va a calcular una nueva columna con la media de todos ellos. \n", "_____no_output_____" ] ], [ [ "#se hace una copia del df.\ndf= df_csv.copy()\n\n#normalizar todos los datos.\n\ndf = df[df['Age1stCode'].notna()] #eliminar los nulos\n\n\ndf.loc[df[\"Age1stCode\"] == \"Younger than 5 years\", \"Age1stCode\"] = \"04 - 04 years\" #ya hay un 05 anyos en el df. \ndf.loc[df[\"Age1stCode\"] == \"Older than 64 years\", \"Age1stCode\"] = \"65 - 65 years\"\ndf.loc[df[\"Age1stCode\"] == \"5 - 10 years\", \"Age1stCode\"] = \"05 - 10 years\"\n\n#primero se seleccionan los digitos del string (la columna del df es string) y el resultado se convierte a entero\ndf['min'] = df.Age1stCode.astype(str).str[:2].astype(int) #la edad minima del rango es el primer numero\ndf['max'] = df.Age1stCode.astype(str).str[5:7].astype(int) # el maximo es el segundo numero\n\n#una vez ya se tiene la edad minima y la maxima, se calcula la media de ambas columnas.\ndf['media'] = df[['min', 'max']].mean(axis=1)\n", "_____no_output_____" ] ], [ [ "## 3. Grafico. \n\nEn este caso, un diagrama de barras.", "_____no_output_____" ] ], [ [ "app = Dash(__name__)\nserver = app.server #heroku\napp.layout = html.Div([\n\n html.H1(\"Tipo de desarrollador\", style={'text-align': 'center'}), #cabecero h1. Header\n \n #primera mini prueba con un menu desplegable.\n dcc.Dropdown(id=\"select_opt\", \n options=[ #el usuario va a ver las label.\n {\"label\": \"#\", \"value\": \"numero\"},\n {\"label\": \"%\", \"value\": \"porcentaje\"}],\n multi=False,\n value=\"numero\",\n style={'width': \"40%\"}\n ),\n\n dcc.Graph(id='my_survey', figure={}) # graph container\n\n])", "_____no_output_____" ], [ "@app.callback(\n Output(component_id='my_survey', component_property='figure'),\n Input(component_id='select_opt', component_property='value'))\ndef update_graph(option_slctd):\n #filtered_df = df[df.year == selected_year]\n fig = px.histogram(df, x=\"media\",\n title='Histograma de edad',\n labels={'media':'media', 'count':'total'}, # can specify one label per df column\n opacity=0.8,\n color_discrete_sequence=['indianred'] # color of histogram bars\n )\n # no implementado la opcion con el porcentaje\n \n return fig", "_____no_output_____" ] ], [ [ "## 4. run server", "_____no_output_____" ] ], [ [ "app.run_server(debug=True, use_reloader=False)", "Dash is running on http://127.0.0.1:8050/\n\n * Serving Flask app \"__main__\" (lazy loading)\n * Environment: production\n\u001b[31m WARNING: This is a development server. Do not use it in a production deployment.\u001b[0m\n\u001b[2m Use a production WSGI server instead.\u001b[0m\n * Debug mode: on\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d0aa4f926ff856508e0ea6fb8bf3d272ccf5ee78
72,388
ipynb
Jupyter Notebook
_notebooks/2021-02-21-portfolio_analysis.ipynb
jinchao-chen/blog_posts
08907d3f3db3a4e0cd0793a008effa077fee9f33
[ "Apache-2.0" ]
null
null
null
_notebooks/2021-02-21-portfolio_analysis.ipynb
jinchao-chen/blog_posts
08907d3f3db3a4e0cd0793a008effa077fee9f33
[ "Apache-2.0" ]
null
null
null
_notebooks/2021-02-21-portfolio_analysis.ipynb
jinchao-chen/blog_posts
08907d3f3db3a4e0cd0793a008effa077fee9f33
[ "Apache-2.0" ]
null
null
null
100.121715
27,640
0.665262
[ [ [ "# How to Build a Personalized Trading Dashboard? \n> A personalized dashboard to viusalize trading actions and stock development\n\n- toc: false\n- badges: true\n- comments: true\n- categories: [data analysis, trading, jupternotebook]", "_____no_output_____" ] ], [ [ "# hide \nfrom datetime import datetime, timedelta\n\nimport altair as alt\nimport numpy as np\nimport pandas as pd\nimport panel as pn\nimport yfinance as yf\nfrom altair import datum\nfrom utility import *\n\nalt.renderers.enable(\"default\")\npn.extension(\"vega\")\n\ncopy_to_post = 1", "_____no_output_____" ] ], [ [ "**Update 11-May**: The [web app](https://share.streamlit.io/jinchao-chen/portfolio-dashboard/streamlit/web_app.py) is now deployed on streamlit. Please check it out and let me know how it works. ", "_____no_output_____" ], [ "TL;NR: if you are mainly interested in the codes, here is the link to [GitHub](https://github.com/jinchao-chen/portfolio-dashboard)", "_____no_output_____" ], [ "# Motivation\n\nYesterday I came across an article [The Boredom Economy](https://www.nytimes.com/2021/02/20/business/gamestop-investing-economy.html). Sydney Ember explained the GameStop phenomenon as investors' reaction to the boredom experienced during the pandemic. \n\nBeing one *amateur day traders* new to the market, I fully concur with the explanation Sydney put forth. I noticed myself spending hours daily *analyzing* the market and *trading* frequently for profits, as an escape from boredom. Resultantly, I I generated an amount of data in 2020 that could pertentially be used for a study on my 'trading style'. \n\nWith this in mind, I decided started a project on analysing my trading activities. To start with, I create a dashboard to visualize the activities, to understand how and when I tend to buy/sell a stock. ", "_____no_output_____" ], [ "# Preparation\n\nMy primary trading platform is [Trading212](https://www.trading212.com). The platform recently included a [new feature](https://community.trading212.com/t/new-feature-export-your-investing-history/35612) that allows exporting transaction history in csv format. The exported data is clean neatly structured, which is ready for analysis. \n\nFor the tools, I noticed a repo on GitHub [panel-altair-dashboard](https://github.com/bendoesdata/panel-altair-dashboard) that creates a simple, yet powerful, visualization tool (dashboard) in roughly 25 lines of codes. It is achieved by with Panel and Altair. \n\nTo achieve visualizing my trading activities, I include the following features: \n\n- mark transaction actions (sell or buy), in the stock time history\n- plot the stock historical data using candlestick\n\n", "_____no_output_____" ], [ "# Time to visualize the time series!\n\nBelow is a screenshot of the dashboard. In it, the transaction data (buy or sell)is visualzied along with the market data. There is an option to visualize the market data in either line plot or candlestick, depending on whether you are interested in long term trends or the variations within the day. For a demo, please refer to [binder](https://mybinder.org/v2/gh/jinchao-chen/portfolio-dashboard/HEAD). \n\nIt is still at the very early stage of development. In the future, I would like to add the following, \n\n- provide a summary of my portfolio \n- normalize the stock price for the selected during \n\nAnd more features will be inlcuded, if I find anything interesting! \n", "_____no_output_____" ], [ "![chart](./figs/trading_dashboard.png)", "_____no_output_____" ] ], [ [ "# hide\nfln = \"dummy_transactions.csv\"\ntransactions = read_transactions(fln)", "_____no_output_____" ], [ "# hide\ntitle = '# Trading Dashboard'\nsubtitle = 'A personalized visualization tool for **Trading 212** trading activities and market data'\n\ncompanies = transactions[\"Ticker\"].dropna().unique().tolist()\nticker = pn.widgets.Select(name=\"Company\", options=companies)\nstyle = pn.widgets.Select(name=\"Plot Style\", options=[\"Candelstick\", \"Line\"])\n\n# this creates the date range slider\ndate_range_slider = pn.widgets.DateRangeSlider(\n name=\"Date Range\",\n start=datetime(2020, 1, 1),\n end=datetime.today(),\n value=(datetime(2020, 1, 1), datetime.today()),\n)\n\n# tell Panel what your plot \"depends\" on.\[email protected](ticker.param.value, date_range_slider.param.value, style.param.value)\ndef get_plots(ticker, date_range, style): # start function\n\n # filter based on ticker\n subset = transactions[transactions[\"Ticker\"] == ticker]\n\n start_date = date_range_slider.value[\n 0\n ] # store the first date range slider value in a var\n end_date = date_range_slider.value[1] # store the end date in a var\n\n ts = read_ticker_ts(ticker=ticker, start=start_date, end=end_date)\n\n if style == \"Candelstick\":\n chart = plot_transactions(subset, ts)\n else:\n chart = plot_transactions_2(subset, ts)\n\n return chart\n\ndashboard = pn.Row(\n pn.Column(title, subtitle, ticker, style, date_range_slider),\n get_plots, # draw chart function!\n)", "_____no_output_____" ], [ "# hide\ndashboard.servable()", "_____no_output_____" ], [ "# hide \nfrom shutil import copyfile\n\nif copy_to_post:\n src = \"transation_view_altair.ipynb\"\n dst = \"../blog_posts/_notebooks/2021-02-21-portfolio_analysis.ipynb\"\n copyfile(src, dst)\n\n print(\"copied\")", "copied\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ] ]
d0aa514f476286fe490cc8c5408061820234afee
6,916
ipynb
Jupyter Notebook
Code/Spark/example2/6_dataframe_quiz_solution.ipynb
dSalazar10/Course-Exploring_NoSQL_Databases
58f8f4ae8097ca53c1a46c68d63b065cac78b075
[ "MIT" ]
null
null
null
Code/Spark/example2/6_dataframe_quiz_solution.ipynb
dSalazar10/Course-Exploring_NoSQL_Databases
58f8f4ae8097ca53c1a46c68d63b065cac78b075
[ "MIT" ]
null
null
null
Code/Spark/example2/6_dataframe_quiz_solution.ipynb
dSalazar10/Course-Exploring_NoSQL_Databases
58f8f4ae8097ca53c1a46c68d63b065cac78b075
[ "MIT" ]
null
null
null
28.460905
730
0.530364
[ [ [ "# Answer Key to the Data Wrangling with DataFrames Coding Quiz\n\nHelpful resources:\nhttp://spark.apache.org/docs/latest/api/python/pyspark.sql.html", "_____no_output_____" ] ], [ [ "from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import isnan, count, when, col, desc, udf, col, sort_array, asc, avg\nfrom pyspark.sql.functions import sum as Fsum\nfrom pyspark.sql.window import Window\nfrom pyspark.sql.types import IntegerType", "_____no_output_____" ], [ "# 1) import any other libraries you might need\n# 2) instantiate a Spark session \n# 3) read in the data set located at the path \"data/sparkify_log_small.json\"\n# 4) write code to answer the quiz questions \n\nspark = SparkSession \\\n .builder \\\n .appName(\"Data Frames practice\") \\\n .getOrCreate()\n\ndf = spark.read.json(\"data/sparkify_log_small.json\")", "_____no_output_____" ] ], [ [ "# Question 1\n\nWhich page did user id \"\" (empty string) NOT visit?", "_____no_output_____" ] ], [ [ "df.printSchema()", "root\n |-- artist: string (nullable = true)\n |-- auth: string (nullable = true)\n |-- firstName: string (nullable = true)\n |-- gender: string (nullable = true)\n |-- itemInSession: long (nullable = true)\n |-- lastName: string (nullable = true)\n |-- length: double (nullable = true)\n |-- level: string (nullable = true)\n |-- location: string (nullable = true)\n |-- method: string (nullable = true)\n |-- page: string (nullable = true)\n |-- registration: long (nullable = true)\n |-- sessionId: long (nullable = true)\n |-- song: string (nullable = true)\n |-- status: long (nullable = true)\n |-- ts: long (nullable = true)\n |-- userAgent: string (nullable = true)\n |-- userId: string (nullable = true)\n\n" ], [ "# filter for users with blank user id\nblank_pages = df.filter(df.userId == '') \\\n .select(col('page') \\\n .alias('blank_pages')) \\\n .dropDuplicates()\n\n# get a list of possible pages that could be visited\nall_pages = df.select('page').dropDuplicates()\n\n# find values in all_pages that are not in blank_pages\n# these are the pages that the blank user did not go to\nfor row in set(all_pages.collect()) - set(blank_pages.collect()):\n print(row.page)", "Submit Upgrade\nDowngrade\nLogout\nUpgrade\nSettings\nNextSong\nError\nSave Settings\nSubmit Downgrade\n" ] ], [ [ "# Question 2 - Reflect\n\nWhat type of user does the empty string user id most likely refer to?\n", "_____no_output_____" ], [ "Perhaps it represents users who have not signed up yet or who are signed out and are about to log in.", "_____no_output_____" ], [ "# Question 3\n\nHow many female users do we have in the data set?", "_____no_output_____" ] ], [ [ "df.filter(df.gender == 'F') \\\n .select('userId', 'gender') \\\n .dropDuplicates() \\\n .count()", "_____no_output_____" ] ], [ [ "# Question 4\n\nHow many songs were played from the most played artist?", "_____no_output_____" ] ], [ [ "df.filter(df.page == 'NextSong') \\\n .select('Artist') \\\n .groupBy('Artist') \\\n .agg({'Artist':'count'}) \\\n .withColumnRenamed('count(Artist)', 'Artistcount') \\\n .sort(desc('Artistcount')) \\\n .show(1)", "+--------+-----------+\n| Artist|Artistcount|\n+--------+-----------+\n|Coldplay| 83|\n+--------+-----------+\nonly showing top 1 row\n\n" ] ], [ [ "# Question 5 (challenge)\n\nHow many songs do users listen to on average between visiting our home page? Please round your answer to the closest integer.\n\n", "_____no_output_____" ] ], [ [ "# TODO: filter out 0 sum and max sum to get more exact answer\n\nfunction = udf(lambda ishome : int(ishome == 'Home'), IntegerType())\n\nuser_window = Window \\\n .partitionBy('userID') \\\n .orderBy(desc('ts')) \\\n .rangeBetween(Window.unboundedPreceding, 0)\n\ncusum = df.filter((df.page == 'NextSong') | (df.page == 'Home')) \\\n .select('userID', 'page', 'ts') \\\n .withColumn('homevisit', function(col('page'))) \\\n .withColumn('period', Fsum('homevisit').over(user_window))\n\ncusum.filter((cusum.page == 'NextSong')) \\\n .groupBy('userID', 'period') \\\n .agg({'period':'count'}) \\\n .agg({'count(period)':'avg'}).show()", "+------------------+\n|avg(count(period))|\n+------------------+\n| 6.898347107438017|\n+------------------+\n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0aa5ed3c2fe7d3bb1087c2b25fcbf4a2b48e9f0
20,554
ipynb
Jupyter Notebook
EDA/EDA-1.ipynb
ekdnam/NLP-Summit-Hackathon
fed66f0b1d3b0b4d1fecac643d1741628c8972c8
[ "Apache-2.0" ]
null
null
null
EDA/EDA-1.ipynb
ekdnam/NLP-Summit-Hackathon
fed66f0b1d3b0b4d1fecac643d1741628c8972c8
[ "Apache-2.0" ]
null
null
null
EDA/EDA-1.ipynb
ekdnam/NLP-Summit-Hackathon
fed66f0b1d3b0b4d1fecac643d1741628c8972c8
[ "Apache-2.0" ]
null
null
null
36.443262
108
0.43033
[ [ [ "# Exploratory Data Analysis", "_____no_output_____" ], [ "## Importing the necessary libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport re\nimport nltk\n\nprint(pd.__version__)\nprint(np.__version__)\nprint(nltk.__version__)\nprint(matplotlib.__version__)", "1.0.5\n1.16.6\n3.5\n3.2.2\n" ], [ "path_to_input = \"../input/\"", "_____no_output_____" ], [ "# drop first column containing sr nos\n\ndf = pd.read_csv(path_to_input + \"mtsamples.csv\", index_col = 0)\n\nprint(df)", " description \\\n0 A 23-year-old white female presents with comp... \n1 Consult for laparoscopic gastric bypass. \n2 Consult for laparoscopic gastric bypass. \n3 2-D M-Mode. Doppler. \n4 2-D Echocardiogram \n... ... \n4994 Patient having severe sinusitis about two to ... \n4995 This is a 14-month-old baby boy Caucasian who... \n4996 A female for a complete physical and follow u... \n4997 Mother states he has been wheezing and coughing. \n4998 Acute allergic reaction, etiology uncertain, ... \n\n medical_specialty sample_name \\\n0 Allergy / Immunology Allergic Rhinitis \n1 Bariatrics Laparoscopic Gastric Bypass Consult - 2 \n2 Bariatrics Laparoscopic Gastric Bypass Consult - 1 \n3 Cardiovascular / Pulmonary 2-D Echocardiogram - 1 \n4 Cardiovascular / Pulmonary 2-D Echocardiogram - 2 \n... ... ... \n4994 Allergy / Immunology Chronic Sinusitis \n4995 Allergy / Immunology Kawasaki Disease - Discharge Summary \n4996 Allergy / Immunology Followup on Asthma \n4997 Allergy / Immunology Asthma in a 5-year-old \n4998 Allergy / Immunology Allergy Evaluation Consult \n\n transcription \\\n0 SUBJECTIVE:, This 23-year-old white female pr... \n1 PAST MEDICAL HISTORY:, He has difficulty climb... \n2 HISTORY OF PRESENT ILLNESS: , I have seen ABC ... \n3 2-D M-MODE: , ,1. Left atrial enlargement wit... \n4 1. The left ventricular cavity size and wall ... \n... ... \n4994 HISTORY:, I had the pleasure of meeting and e... \n4995 ADMITTING DIAGNOSIS: , Kawasaki disease.,DISCH... \n4996 SUBJECTIVE: , This is a 42-year-old white fema... \n4997 CHIEF COMPLAINT: , This 5-year-old male presen... \n4998 HISTORY: , A 34-year-old male presents today s... \n\n keywords \n0 allergy / immunology, allergic rhinitis, aller... \n1 bariatrics, laparoscopic gastric bypass, weigh... \n2 bariatrics, laparoscopic gastric bypass, heart... \n3 cardiovascular / pulmonary, 2-d m-mode, dopple... \n4 cardiovascular / pulmonary, 2-d, doppler, echo... \n... ... \n4994 NaN \n4995 allergy / immunology, mucous membranes, conjun... \n4996 NaN \n4997 NaN \n4998 NaN \n\n[4999 rows x 5 columns]\n" ], [ "print(df.shape)\ndf.head()", "(4999, 5)\n" ] ], [ [ "There are 4999 records in the dataset", "_____no_output_____" ] ], [ [ "df_v1 = df[['medical_specialty', 'sample_name', 'transcription', 'keywords']]", "_____no_output_____" ], [ "df_v1.head()", "_____no_output_____" ], [ "print(df_v1['medical_specialty'].value_counts())\nprint(\"Unique records in medical_specialty :\" + str(df_v1['medical_specialty'].value_counts().size))", " Surgery 1103\n Consult - History and Phy. 516\n Cardiovascular / Pulmonary 372\n Orthopedic 355\n Radiology 273\n General Medicine 259\n Gastroenterology 230\n Neurology 223\n SOAP / Chart / Progress Notes 166\n Obstetrics / Gynecology 160\n Urology 158\n Discharge Summary 108\n ENT - Otolaryngology 98\n Neurosurgery 94\n Hematology - Oncology 90\n Ophthalmology 83\n Nephrology 81\n Emergency Room Reports 75\n Pediatrics - Neonatal 70\n Pain Management 62\n Psychiatry / Psychology 53\n Office Notes 51\n Podiatry 47\n Dermatology 29\n Cosmetic / Plastic Surgery 27\n Dentistry 27\n Letters 23\n Physical Medicine - Rehab 21\n Sleep Medicine 20\n Endocrinology 19\n Bariatrics 18\n IME-QME-Work Comp etc. 16\n Chiropractic 14\n Diets and Nutritions 10\n Rheumatology 10\n Speech - Language 9\n Autopsy 8\n Lab Medicine - Pathology 8\n Allergy / Immunology 7\n Hospice - Palliative Care 6\nName: medical_specialty, dtype: int64\nUnique records in medical_specialty :40\n" ], [ "df_v1['sample_name'].value_counts()", "_____no_output_____" ] ], [ [ "Dropping rows having na values", "_____no_output_____" ] ], [ [ "df_v1 = df_v1.dropna()\nprint(df_v1.shape[0])", "3898\n" ] ], [ [ "Thus we can see that the size of the dataset has decreased by 1101 rows", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d0aa639280ac1cb7eff0e7b4e2f60f0024d4fe90
55,267
ipynb
Jupyter Notebook
machine-learning/Python/test.ipynb
amirhkiani77/Risk-Management
f6a868200fb90bdae21c14381084f8f01f6faf6c
[ "MIT" ]
null
null
null
machine-learning/Python/test.ipynb
amirhkiani77/Risk-Management
f6a868200fb90bdae21c14381084f8f01f6faf6c
[ "MIT" ]
null
null
null
machine-learning/Python/test.ipynb
amirhkiani77/Risk-Management
f6a868200fb90bdae21c14381084f8f01f6faf6c
[ "MIT" ]
null
null
null
270.916667
23,887
0.727758
[ [ [ "from sklearn.datasets import make_classification\nX, y = make_classification(n_features=2, n_redundant=0, n_informative=2, random_state=3)\nprint(X)\nprint(y)", "[[ 1.29619246 -1.19338286]\n [ 0.76455807 -2.44539487]\n [-0.56135339 1.44773498]\n [ 0.09018381 -0.21892726]\n [ 3.20977007 -1.03881206]\n [ 0.80001485 -1.66246266]\n [ 0.88468364 -0.41310735]\n [ 1.44727831 0.57410216]\n [ 0.17890668 1.6394023 ]\n [-0.58100008 0.33584532]\n [-0.40007267 2.04119894]\n [ 0.86736895 -2.33598454]\n [ 2.57067557 -0.2757088 ]\n [-0.50604244 0.36615269]\n [ 0.58326712 1.22009748]\n [ 2.35993321 -0.12424544]\n [-0.25023328 0.0491287 ]\n [ 1.54520785 0.55053486]\n [-0.12132899 -0.58306026]\n [ 1.93823769 0.08503077]\n [ 0.26842648 -0.03321614]\n [ 1.28259341 -0.6052909 ]\n [-1.24399007 0.44172354]\n [-0.33252253 -0.4085363 ]\n [-0.25906554 2.1460918 ]\n [-2.04183382 1.62989998]\n [ 0.75356541 0.74284632]\n [ 0.74494799 -1.69867877]\n [-2.08503765 1.69398556]\n [ 0.64755294 1.35713573]\n [ 1.36023958 -0.42680268]\n [-1.20002643 1.52879135]\n [-1.16926167 -2.94592209]\n [ 1.50713573 0.49784377]\n [-0.86717188 0.44292896]\n [-0.65100851 -0.18139162]\n [-0.61231169 1.9581328 ]\n [ 0.76943194 -1.27209495]\n [ 1.42047213 0.52835339]\n [ 1.15720441 0.62857476]\n [ 1.2110195 -0.99340173]\n [ 1.08891926 1.01427378]\n [-1.21637928 -0.67049437]\n [ 0.7540882 -0.59292029]\n [ 0.65104016 -0.87540635]\n [ 0.61228622 -0.66760619]\n [-1.15322318 -0.55930594]\n [ 0.9751491 -0.07935889]\n [-0.68752925 2.70557335]\n [ 1.45467028 0.5305809 ]\n [-1.02934082 1.61882875]\n [ 0.73121797 0.78491622]\n [-0.76836066 2.64536187]\n [ 1.36803133 -0.42683556]\n [ 1.5712194 -0.56634981]\n [-1.17635927 -0.4082274 ]\n [ 0.04109935 1.09921185]\n [ 1.02875005 1.09087416]\n [-2.66474084 2.49277994]\n [ 0.66721754 -1.10970556]\n [ 1.41977144 -1.15516349]\n [ 0.72549452 -0.50767362]\n [-0.87117928 -0.54828912]\n [ 1.4248824 0.71838145]\n [ 0.7897617 0.31888107]\n [-2.55673111 0.48095735]\n [-2.06574644 1.18794747]\n [-2.16875069 -1.45409905]\n [-1.82761265 1.00873328]\n [-0.92431911 -3.11057891]\n [ 0.64138689 -0.83568997]\n [ 0.60949189 1.31324877]\n [ 0.87593712 -0.74419114]\n [-0.71866586 -1.81113273]\n [-3.10109522 1.89328788]\n [-1.86364246 0.58521398]\n [-2.37591355 -1.96095268]\n [ 1.7770297 0.32045557]\n [-2.42774861 -0.53244395]\n [ 2.80187717 -0.52159417]\n [ 0.94455323 -0.82339639]\n [-1.36513923 0.70919674]\n [ 1.0867211 -0.7351246 ]\n [ 1.36152909 0.5689553 ]\n [ 0.92367965 -0.61803853]\n [-0.21235292 -0.34674362]\n [-1.39689542 3.18322911]\n [ 0.51446525 -0.26083015]\n [ 1.74504674 0.24178588]\n [ 1.1187257 0.37451255]\n [ 0.68360034 1.05726979]\n [-0.02464677 0.17999889]\n [-1.09661596 0.63259348]\n [-2.59347523 -2.28949135]\n [-0.278784 0.14422342]\n [ 0.56662388 -1.17736331]\n [-1.68605938 -1.1761977 ]\n [-0.17340918 1.97480402]\n [ 1.50088126 -1.69179892]\n [ 0.85639208 1.09821007]]\n[0 0 1 0 1 0 1 1 1 1 1 0 1 0 1 1 1 1 0 1 1 0 1 0 1 1 1 0 1 1 0 0 0 1 1 0 1\n 0 1 1 0 1 0 0 0 0 0 0 1 1 1 1 1 0 0 0 1 1 1 0 0 0 0 1 0 1 1 0 1 0 0 1 0 1\n 1 0 0 1 0 1 0 0 0 1 0 1 1 0 1 0 1 0 0 0 0 0 0 1 0 1]\n" ], [ "from matplotlib import pyplot as plt\nplt.scatter(X[y==0][:, 0], X[y==0][:, 1], s=100, edgecolors='k')\nplt.scatter(X[y==1][:, 0], X[y==1][:, 1], s=120, edgecolors='k', marker='^')\nplt.show()", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\nfrom sklearn.neural_network import MLPClassifier\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=3)\nmlp = MLPClassifier(max_iter=1000)\nmlp.fit(X_train, y_train)", "_____no_output_____" ], [ "mlp.score(X_test, y_test)", "_____no_output_____" ], [ "from sklearn.datasets import load_digits\nX, y = load_digits(n_class=2, return_X_y=True)", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nplt.matshow(X[260].reshape(8,8), cmap=plt.cm.gray)\nplt.xticks(())\nplt.yticks(()) ", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=3)\nmlp = MLPClassifier(max_iter=1000)\nmlp.fit(X_train, y_train)", "_____no_output_____" ], [ "plt.matshow(X[240].reshape(8,8), cmap=plt.cm.gray)\nmlp.predict([X[240]])", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0aa7544eb7d0c404cdd8b6fe9075eef49a322ff
142,073
ipynb
Jupyter Notebook
nbs/05_analysis.ipynb
royshadmon/pybats
1dd8c2042adb0f4a057b36215a6ba30234e23c3a
[ "Apache-2.0" ]
29
2019-11-11T14:49:11.000Z
2022-03-11T13:23:57.000Z
nbs/05_analysis.ipynb
royshadmon/pybats
1dd8c2042adb0f4a057b36215a6ba30234e23c3a
[ "Apache-2.0" ]
7
2020-08-04T15:08:54.000Z
2022-02-26T10:10:16.000Z
nbs/05_analysis.ipynb
royshadmon/pybats
1dd8c2042adb0f4a057b36215a6ba30234e23c3a
[ "Apache-2.0" ]
12
2019-11-20T15:21:48.000Z
2022-02-16T23:18:44.000Z
110.048799
68,156
0.776573
[ [ [ "#hide\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "# default_exp analysis", "_____no_output_____" ] ], [ [ "# Analysis\n\n\n> The analysis functions help a modeler quickly run a full time series analysis.", "_____no_output_____" ], [ "An analysis consists of:\n1. Initializing a DGLM, using `define_dglm`.\n2. Updating the model coefficients at each time step, using `dglm.update`.\n3. Forecasting at each time step between `forecast_start` and `forecast_end`, using `dglm.forecast_marginal` or `dglm.forecast_path`.\n4. Returning the desired output, specified in the argument `ret`. The default is to return the model and forecast samples.\n\nThe analysis starts by defining a new DGLM with `define_dglm`. The default number of observations to use is set at `prior_length=20`. Any arguments that are used to define a model in `define_dglm` can be passed into analysis as keyword arguments. Alternatively, you may define the model beforehand, and pass the pre-initialized DGLM into analysis as the argument `model_prior`.\n\nOnce the model has been initialized, the analysis loop begins. If $\\text{forecast_start} \\leq t \\leq \\text{forecast_end}$, then the model will forecast ahead. The forecast horizon k must be specified. The default is to simulate `nsamps=500` times from the forecast distribution using `forecast_marginal`, from $1$ to `k` steps into the future. To simulate from the joint forecast distribution over the next `k` steps, set the flag `forecast_path=True`. Note that all forecasts are *out-of-sample*, i.e. they are made before the model has seen the observation. This is to ensure than the forecast accuracy is a more fair representation of future model performance.\n\nAfter the forecast has been made, the model sees the observation $y_t$, and updates the state vector accordingly. \n\nThe analysis ends after seeing the last observation in `Y`. The output is a list specified by the argument `ret`, which may contain:\n- `mod`: The final model\n- `forecast`: The forecast samples, stored in a 3-dimensional array with axes *nsamps* $\\times$ *forecast length* $\\times$ *k*\n- `model_coef`: A time series of the state vector mean vector and variance matrix\n\nPlease note that `analysis` is used on a historic dataset that already exists. This means that a typical sequence of events is to run an analysis on the data you current have, and return the model and forecast samples. The forecast samples are used to evaluate the past forecast performance. Then you can use `dglm.forecast_marginal` and `dglm.forecast_path` to forecast into the future.", "_____no_output_____" ] ], [ [ "#hide\n#exporti\nimport numpy as np\nimport pandas as pd\n\nfrom pybats.define_models import define_dglm, define_dcmm, define_dbcm, define_dlmm\nfrom pybats.shared import define_holiday_regressors\nfrom collections.abc import Iterable", "_____no_output_____" ] ], [ [ "## Analysis for a DGLM", "_____no_output_____" ] ], [ [ "#export\ndef analysis(Y, X=None, k=1, forecast_start=0, forecast_end=0,\n nsamps=500, family = 'normal', n = None,\n model_prior = None, prior_length=20, ntrend=1,\n dates = None, holidays = [],\n seasPeriods = [], seasHarmComponents = [],\n latent_factor = None, new_latent_factors = None,\n ret=['model', 'forecast'],\n mean_only = False, forecast_path = False,\n **kwargs):\n \"\"\"\n This is a helpful function to run a standard analysis. The function will:\n 1. Automatically initialize a DGLM\n 2. Run sequential updating\n 3. Forecast at each specified time step\n \"\"\"\n\n # Add the holiday indicator variables to the regression matrix\n nhol = len(holidays)\n X = define_holiday_regressors(X, dates, holidays)\n\n # Check if it's a latent factor DGLM\n if latent_factor is not None:\n is_lf = True\n nlf = latent_factor.p\n else:\n is_lf = False\n nlf = 0\n\n if model_prior is None:\n mod = define_dglm(Y, X, family=family, n=n, prior_length=prior_length, ntrend=ntrend, nhol=nhol, nlf=nlf,\n seasPeriods=seasPeriods, seasHarmComponents=seasHarmComponents,\n **kwargs)\n else:\n mod = model_prior\n\n\n # Convert dates into row numbers\n if dates is not None:\n dates = pd.Series(dates)\n if type(forecast_start) == type(dates.iloc[0]):\n forecast_start = np.where(dates == forecast_start)[0][0]\n if type(forecast_end) == type(dates.iloc[0]):\n forecast_end = np.where(dates == forecast_end)[0][0]\n\n # Define the run length\n T = len(Y) + 1\n\n if ret.__contains__('model_coef'):\n m = np.zeros([T-1, mod.a.shape[0]])\n C = np.zeros([T-1, mod.a.shape[0], mod.a.shape[0]])\n if family == 'normal':\n n = np.zeros(T)\n s = np.zeros(T)\n\n if new_latent_factors is not None:\n if not ret.__contains__('new_latent_factors'):\n ret.append('new_latent_factors')\n\n if not isinstance(new_latent_factors, Iterable):\n new_latent_factors = [new_latent_factors]\n\n tmp = []\n for lf in new_latent_factors:\n tmp.append(lf.copy())\n new_latent_factors = tmp\n\n # Create dummy variable if there are no regression covariates\n if X is None:\n X = np.array([None]*(T+k)).reshape(-1,1)\n else:\n if len(X.shape) == 1:\n X = X.reshape(-1,1)\n\n # Initialize updating + forecasting\n horizons = np.arange(1, k + 1)\n\n if mean_only:\n forecast = np.zeros([1, forecast_end - forecast_start + 1, k])\n else:\n forecast = np.zeros([nsamps, forecast_end - forecast_start + 1, k])\n\n for t in range(prior_length, T):\n\n if forecast_start <= t <= forecast_end:\n if t == forecast_start:\n print('beginning forecasting')\n\n if ret.__contains__('forecast'):\n if is_lf:\n if forecast_path:\n pm, ps, pp = latent_factor.get_lf_forecast(dates.iloc[t])\n forecast[:, t - forecast_start, :] = mod.forecast_path_lf_copula(k=k, X=X[t + horizons - 1, :],\n nsamps=nsamps,\n phi_mu=pm, phi_sigma=ps, phi_psi=pp)\n else:\n pm, ps = latent_factor.get_lf_forecast(dates.iloc[t])\n pp = None # Not including path dependency in latent factor\n\n forecast[:, t - forecast_start, :] = np.array(list(map(\n lambda k, x, pm, ps:\n mod.forecast_marginal_lf_analytic(k=k, X=x, phi_mu=pm, phi_sigma=ps, nsamps=nsamps, mean_only=mean_only),\n horizons, X[t + horizons - 1, :], pm, ps))).squeeze().T.reshape(-1, k)#.reshape(-1, 1)\n else:\n if forecast_path:\n forecast[:, t - forecast_start, :] = mod.forecast_path(k=k, X = X[t + horizons - 1, :], nsamps=nsamps)\n else:\n if family == \"binomial\":\n forecast[:, t - forecast_start, :] = np.array(list(map(\n lambda k, n, x:\n mod.forecast_marginal(k=k, n=n, X=x, nsamps=nsamps, mean_only=mean_only),\n horizons, n[t + horizons - 1], X[t + horizons - 1, :]))).squeeze().T.reshape(-1, k) # .reshape(-1, 1)\n else:\n # Get the forecast samples for all the items over the 1:k step ahead marginal forecast distributions\n forecast[:, t - forecast_start, :] = np.array(list(map(\n lambda k, x:\n mod.forecast_marginal(k=k, X=x, nsamps=nsamps, mean_only=mean_only),\n horizons, X[t + horizons - 1, :]))).squeeze().T.reshape(-1, k)#.reshape(-1, 1)\n\n if ret.__contains__('new_latent_factors'):\n for lf in new_latent_factors:\n lf.generate_lf_forecast(date=dates[t], mod=mod, X=X[t + horizons - 1],\n k=k, nsamps=nsamps, horizons=horizons)\n\n # Now observe the true y value, and update:\n if t < len(Y):\n if is_lf:\n pm, ps = latent_factor.get_lf(dates.iloc[t])\n mod.update_lf_analytic(y=Y[t], X=X[t],\n phi_mu=pm, phi_sigma=ps)\n else:\n if family == \"binomial\":\n mod.update(y=Y[t], X=X[t], n=n[t])\n else:\n mod.update(y=Y[t], X=X[t])\n\n if ret.__contains__('model_coef'):\n m[t,:] = mod.m.reshape(-1)\n C[t,:,:] = mod.C\n if family == 'normal':\n n[t] = mod.n / mod.delVar\n s[t] = mod.s\n\n if ret.__contains__('new_latent_factors'):\n for lf in new_latent_factors:\n lf.generate_lf(date=dates[t], mod=mod, Y=Y[t], X=X[t], k=k, nsamps=nsamps)\n\n out = []\n for obj in ret:\n if obj == 'forecast': out.append(forecast)\n if obj == 'model': out.append(mod)\n if obj == 'model_coef':\n mod_coef = {'m':m, 'C':C}\n if family == 'normal':\n mod_coef.update({'n':n, 's':s})\n\n out.append(mod_coef)\n if obj == 'new_latent_factors':\n #for lf in new_latent_factors:\n # lf.append_lf()\n # lf.append_lf_forecast()\n if len(new_latent_factors) == 1:\n out.append(new_latent_factors[0])\n else:\n out.append(new_latent_factors)\n\n if len(out) == 1:\n return out[0]\n else:\n return out", "_____no_output_____" ] ], [ [ "This function is core to the PyBATS package, because it allows a modeler to easily run a full time series analysis in one step. Below is a quick example of analysis of quarterly inflation in the US using a normal DLM. We'll start by loading in the data:", "_____no_output_____" ] ], [ [ "from pybats.shared import load_us_inflation\nfrom pybats.analysis import analysis\nimport pandas as pd\nfrom pybats.plot import plot_data_forecast\nfrom pybats.point_forecast import median\nimport matplotlib.pyplot as plt\nfrom pybats.loss_functions import MAPE\n\ndata = load_us_inflation()\npd.concat([data.head(3), data.tail(3)])", "_____no_output_____" ] ], [ [ "And then running an analysis. We're going to use the previous (lag-1) value of inflation as a predictor.", "_____no_output_____" ] ], [ [ "forecast_start = '1990-Q1'\nforecast_end = '2014-Q3'\nX = data.Inflation.values[:-1]\n\nmod, samples = analysis(Y = data.Inflation.values[1:], X=X, family=\"normal\",\n k = 1, prior_length = 12,\n forecast_start = forecast_start, forecast_end = forecast_end,\n dates=data.Date,\n ntrend = 2, deltrend=.99,\n seasPeriods=[4], seasHarmComponents=[[1,2]], delseas=.99,\n nsamps = 5000)", "beginning forecasting\n" ] ], [ [ "A couple of things to note here:\n- `forecast_start` and `forecast_end` were specified as elements in the `dates` vector. You can also specify forecast_start and forecast_end by row numbers in `Y`, and avoid providing the `dates` argument.\n\n- `ntrend=2` creates a model with an intercept and a local slope term, and `deltrend=.98` discounts the impact of older observations on the trend component by $2\\%$ at each time step.\n\n- The seasonal component was set as `seasPeriods=[4]`, because we think the seasonal effect has a cycle of length $4$ in this quarterly inflation data.\n\nLet's examine the output. Here is the mean and standard deviation of the state vector (aka the coefficients) after the model has seen the last observation in `Y`:", "_____no_output_____" ] ], [ [ "mod.get_coef()", "_____no_output_____" ] ], [ [ "It's clear that the lag-1 regression term is dominant, with a mean of $0.92$. The only other large coefficient is the intercept, with a mean of $0.10$.\n\nThe seasonal coefficients turned out to be very small. Most likely this is because the publicly available dataset for US inflation is pre-adjusted for seasonality.\n\nThe forecast samples are stored in a 3-dimensional array, with axes *nsamps* $\\times$ *forecast length* $\\times$ *k*:\n- **nsamps** is the number of samples drawn from the forecast distribution\n- **forecast length** is the number of time steps between `forecast_start` and `forecast_end`\n- **k** is the forecast horizon, or the number of steps that were forecast ahead\n\nWe can plot the forecasts using `plot_data_forecast`. We'll plot the 1-quarter ahead forecasts, using the median as our point estimate.", "_____no_output_____" ] ], [ [ "forecast = median(samples)\n\n# Plot the 1-quarter ahead forecast\nh = 1\nstart = data[data.Date == forecast_start].index[0] + h\nend = data[data.Date == forecast_end].index[0] + h + 1\n\nfig, ax = plt.subplots(figsize=(12, 6))\nplot_data_forecast(fig, ax, y = data[start:end].Inflation.values,\n f = forecast[:,h-1],\n samples = samples[:,:,h-1],\n dates = pd.to_datetime(data[start:end].Date.values),\n xlabel='Time', ylabel='Quarterly US Inflation', title='1-Quarter Ahead Forecasts');", "_____no_output_____" ] ], [ [ "We can see that the forecasts are quite good, and nearly all of the observations fall within the $95\\%$ credible interval.\n\nThere's also a clear pattern - the forecasts look as if they're shifted forward from the data by 1 step. This is because the lag-1 predictor is very strong, with a coefficient mean of $0.91$. The model is primarily using the previous month's value as its forecast, with some small modifications. Having the previous value as our best forecast is common in many time series.\n\nWe can put a number on the quality of the forecast by using a loss function, the Mean Absolute Percent Error (MAPE). We see that on average, our forecasts of quarterly inflation have an error of under $15\\%$.", "_____no_output_____" ] ], [ [ "MAPE(data[start:end].Inflation.values, forecast[:,0]).round(1)", "_____no_output_____" ], [ "assert(MAPE(data[start:end].Inflation.values, forecast[:,0]).round(0) <= 15)", "_____no_output_____" ] ], [ [ "Finally, we can use the returned model to forecast $1-$step ahead to Q1 2015, which is past the end of the dataset. We need the `X` value to forecast into the future. Luckily, in this model the predictor `X` is simply the previous value of Inflation from Q4 2014.", "_____no_output_____" ] ], [ [ "x_future = data.Inflation.iloc[-1]\none_step_forecast_samples = mod.forecast_marginal(k=1,\n X=x_future,\n nsamps=1000000)", "_____no_output_____" ] ], [ [ "From here, we can find the mean and standard deviation of the forecast for next quarter's inflation:", "_____no_output_____" ] ], [ [ "print('Mean: ' + str(np.mean(one_step_forecast_samples).round(2)))\nprint('Std Dev: ' + str(np.std(one_step_forecast_samples).round(2)))", "Mean: 1.21\nStd Dev: 0.29\n" ] ], [ [ "We can also plot the full forecast distribution for Q1 2015:", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(10,6))\nax.hist(one_step_forecast_samples.reshape(-1),\n bins=200, alpha=0.3, color='b', density=True,\n label='Forecast Distribution');\nax.vlines(x=np.mean(one_step_forecast_samples),\n ymin=0, ymax=ax.get_ylim()[1],\n label='Forecast Mean');\nax.set_title('1-Step Ahead Forecast Distribution for Q1 2015 Inflation');\nax.set_ylabel('Forecast Density')\nax.set_xlabel('Q1 2015 Inflation')\nax.legend();", "_____no_output_____" ] ], [ [ "## Analysis for a DCMM", "_____no_output_____" ] ], [ [ "#export\ndef analysis_dcmm(Y, X=None, k=1, forecast_start=0, forecast_end=0,\n nsamps=500, rho=.6,\n model_prior=None, prior_length=20, ntrend=1,\n dates=None, holidays=[],\n seasPeriods=[], seasHarmComponents=[], \n latent_factor=None, new_latent_factors=None,\n mean_only=False, \n ret=['model', 'forecast'],\n **kwargs):\n \"\"\"\n This is a helpful function to run a standard analysis using a DCMM.\n \"\"\"\n\n if latent_factor is not None:\n is_lf = True\n # Note: This assumes that the bernoulli & poisson components have the same number of latent factor components\n if isinstance(latent_factor, (list, tuple)):\n nlf = latent_factor[0].p\n else:\n nlf = latent_factor.p\n else:\n is_lf = False\n nlf = 0\n\n # Convert dates into row numbers\n if dates is not None:\n dates = pd.Series(dates)\n # dates = pd.to_datetime(dates, format='%y/%m/%d')\n if type(forecast_start) == type(dates.iloc[0]):\n forecast_start = np.where(dates == forecast_start)[0][0]\n if type(forecast_end) == type(dates.iloc[0]):\n forecast_end = np.where(dates == forecast_end)[0][0]\n\n # Add the holiday indicator variables to the regression matrix\n nhol = len(holidays)\n if nhol > 0:\n X = define_holiday_regressors(X, dates, holidays)\n\n # Initialize the DCMM\n if model_prior is None:\n mod = define_dcmm(Y, X, prior_length = prior_length, seasPeriods = seasPeriods, seasHarmComponents = seasHarmComponents,\n ntrend=ntrend, nlf = nlf, rho = rho, nhol = nhol, **kwargs)\n else:\n mod = model_prior\n\n if ret.__contains__('new_latent_factors'):\n if not isinstance(new_latent_factors, Iterable):\n new_latent_factors = [new_latent_factors]\n\n tmp = []\n for sig in new_latent_factors:\n tmp.append(sig.copy())\n new_latent_factors = tmp\n \n T = len(Y) + 1 # np.min([len(Y), forecast_end]) + 1\n nu = 9\n \n if X is None:\n X = np.array([None]*(T+k)).reshape(-1,1)\n else:\n if len(X.shape) == 1:\n X = X.reshape(-1,1)\n\n # Initialize updating + forecasting\n horizons = np.arange(1,k+1)\n\n if mean_only:\n forecast = np.zeros([1, forecast_end - forecast_start + 1, k])\n else:\n forecast = np.zeros([nsamps, forecast_end - forecast_start + 1, k])\n\n # Run updating + forecasting\n for t in range(prior_length, T):\n # if t % 100 == 0:\n # print(t)\n if ret.__contains__('forecast'):\n if t >= forecast_start and t <= forecast_end:\n if t == forecast_start:\n print('beginning forecasting')\n\n # Get the forecast samples for all the items over the 1:k step ahead path\n if is_lf:\n if isinstance(latent_factor, (list, tuple)):\n pm_bern, ps_bern = latent_factor[0].get_lf_forecast(dates.iloc[t])\n pm_pois, ps_pois = latent_factor[1].get_lf_forecast(dates.iloc[t])\n pm = (pm_bern, pm_pois)\n ps = (ps_bern, ps_pois)\n else:\n pm, ps = latent_factor.get_lf_forecast(dates.iloc[t])\n\n pp = None # Not including the path dependency of the latent factor\n\n if mean_only:\n forecast[:, t - forecast_start, :] = np.array(list(map(\n lambda k, x, pm, ps: mod.forecast_marginal_lf_analytic(\n k=k, X=(x, x), phi_mu=(pm, pm), phi_sigma=(ps, ps), nsamps=nsamps, mean_only=mean_only),\n horizons, X[t + horizons - 1, :], pm, ps))).reshape(1, -1)\n else:\n forecast[:, t - forecast_start, :] = mod.forecast_path_lf_copula(\n k=k, X=(X[t + horizons - 1, :], X[t + horizons - 1, :]),\n phi_mu=(pm, pm), phi_sigma=(ps, ps), phi_psi=(pp, pp), nsamps=nsamps, t_dist=True, nu=nu)\n else:\n if mean_only:\n forecast[:, t - forecast_start, :] = np.array(list(map(\n lambda k, x: mod.forecast_marginal(\n k=k, X=(x, x), nsamps=nsamps, mean_only=mean_only),\n horizons, X[t + horizons - 1, :]))).reshape(1,-1)\n else:\n forecast[:, t - forecast_start, :] = mod.forecast_path_copula(\n k=k, X=(X[t + horizons - 1, :], X[t + horizons - 1, :]), nsamps=nsamps, t_dist=True, nu=nu)\n\n if ret.__contains__('new_latent_factors'):\n if t >= forecast_start and t <= forecast_end:\n for lf in new_latent_factors:\n lf.generate_lf_forecast(date=dates.iloc[t], mod=mod, X=X[t + horizons - 1, :],\n k=k, nsamps=nsamps, horizons=horizons)\n\n # Update the DCMM\n if t < len(Y):\n if is_lf:\n if isinstance(latent_factor, (list, tuple)):\n pm_bern, ps_bern = latent_factor[0].get_lf(dates.iloc[t])\n pm_pois, ps_pois = latent_factor[1].get_lf(dates.iloc[t])\n pm = (pm_bern, pm_pois)\n ps = (ps_bern, ps_pois)\n else:\n pm, ps = latent_factor.get_lf(dates.iloc[t])\n\n mod.update_lf_analytic(y=Y[t], X=(X[t], X[t]),\n phi_mu=(pm, pm), phi_sigma=(ps, ps))\n else:\n mod.update(y = Y[t], X=(X[t], X[t]))\n\n if ret.__contains__('new_latent_factors'):\n for lf in new_latent_factors:\n lf.generate_lf(date=dates.iloc[t], mod=mod, X=X[t + horizons - 1, :],\n k=k, nsamps=nsamps, horizons=horizons)\n\n out = []\n for obj in ret:\n if obj == 'forecast': out.append(forecast)\n if obj == 'model': out.append(mod)\n if obj == 'new_latent_factors':\n #for lf in new_latent_factors:\n # lf.append_lf()\n # lf.append_lf_forecast()\n if len(new_latent_factors) == 1:\n out.append(new_latent_factors[0])\n else:\n out.append(new_latent_factors)\n\n if len(out) == 1:\n return out[0]\n else:\n return out", "_____no_output_____" ] ], [ [ "`analysis_dcmm` works identically to the standard `analysis`, but is specialized for a DCMM.\n\nThe observations must be integer counts, which are modeled as a combination of a Poisson and Bernoulli DGLM. Typically a DCMM is equally good as a Poisson DGLM for modeling series with consistently large integers, while being significantly better at modeling series with many zeros.\n\nNote that by default, all simulated forecasts made with `analysis_dcmm` are *path* forecasts, meaning that they account for the dependence across forecast horizons.", "_____no_output_____" ], [ "## Analysis for a DBCM", "_____no_output_____" ] ], [ [ "#export\ndef analysis_dbcm(Y_transaction, X_transaction, Y_cascade, X_cascade, excess,\n k, forecast_start, forecast_end, nsamps = 500, rho = .6,\n model_prior=None, prior_length=20, ntrend=1,\n dates=None, holidays = [],\n latent_factor = None, new_latent_factors = None,\n seasPeriods = [], seasHarmComponents = [],\n mean_only=False, \n ret=['model', 'forecast'],\n **kwargs):\n \"\"\"\n This is a helpful function to run a standard analysis using a DBCM.\n \"\"\"\n\n if latent_factor is not None:\n is_lf = True\n # Note: This assumes that the bernoulli & poisson components have the same number of latent factor components\n if isinstance(latent_factor, (list, tuple)):\n nlf = latent_factor[0].p\n else:\n nlf = latent_factor.p\n else:\n is_lf = False\n nlf = 0\n\n # Convert dates into row numbers\n if dates is not None:\n dates = pd.Series(dates)\n # dates = pd.to_datetime(dates, format='%y/%m/%d')\n if type(forecast_start) == type(dates.iloc[0]):\n forecast_start = np.where(dates == forecast_start)[0][0]\n if type(forecast_end) == type(dates.iloc[0]):\n forecast_end = np.where(dates == forecast_end)[0][0]\n\n # Add the holiday indicator variables to the regression matrix\n nhol = len(holidays)\n if nhol > 0:\n X_transaction = define_holiday_regressors(X_transaction, dates, holidays)\n\n\n if model_prior is None:\n mod = define_dbcm(Y_transaction, X_transaction, Y_cascade, X_cascade,\n excess_values = excess, prior_length = prior_length,\n seasPeriods = seasPeriods, seasHarmComponents=seasHarmComponents,\n nlf = nlf, rho = rho, nhol=nhol, **kwargs)\n else:\n mod = model_prior\n\n if ret.__contains__('new_latent_factors'):\n if not isinstance(new_latent_factors, Iterable):\n new_latent_factors = [new_latent_factors]\n\n tmp = []\n for sig in new_latent_factors:\n tmp.append(sig.copy())\n new_latent_factors = tmp\n\n # Initialize updating + forecasting\n horizons = np.arange(1,k+1)\n\n if mean_only:\n forecast = np.zeros([1, forecast_end - forecast_start + 1, k])\n else:\n forecast = np.zeros([nsamps, forecast_end - forecast_start + 1, k])\n\n T = len(Y_transaction) + 1 #np.min([len(Y_transaction)- k, forecast_end]) + 1\n nu = 9\n\n # Run updating + forecasting\n for t in range(prior_length, T):\n # if t % 100 == 0:\n # print(t)\n # print(mod.dcmm.pois_mod.param1)\n # print(mod.dcmm.pois_mod.param2)\n if ret.__contains__('forecast'):\n if t >= forecast_start and t <= forecast_end:\n if t == forecast_start:\n print('beginning forecasting')\n\n # Get the forecast samples for all the items over the 1:k step ahead path\n if is_lf:\n if isinstance(latent_factor, (list, tuple)):\n pm_bern, ps_bern = latent_factor[0].get_lf_forecast(dates.iloc[t])\n pm_pois, ps_pois = latent_factor[1].get_lf_forecast(dates.iloc[t])\n pm = (pm_bern, pm_pois)\n ps = (ps_bern, ps_pois)\n pp = None # Not including path dependency in latent factor\n else:\n if latent_factor.forecast_path:\n pm, ps, pp = latent_factor.get_lf_forecast(dates.iloc[t])\n else:\n pm, ps = latent_factor.get_lf_forecast(dates.iloc[t])\n pp = None\n\n if mean_only:\n forecast[:, t - forecast_start, :] = np.array(list(map(\n lambda k, x_trans, x_cascade, pm, ps: mod.forecast_marginal_lf_analytic(\n k=k, X_transaction=x_trans, X_cascade=x_cascade,\n phi_mu=pm, phi_sigma=ps, nsamps=nsamps, mean_only=mean_only),\n horizons, X_transaction[t + horizons - 1, :], X_cascade[t + horizons - 1, :], pm, ps))).reshape(1, -1)\n else:\n forecast[:, t - forecast_start, :] = mod.forecast_path_lf_copula(\n k=k, X_transaction=X_transaction[t + horizons - 1, :], X_cascade=X_cascade[t + horizons - 1, :],\n phi_mu=pm, phi_sigma=ps, phi_psi=pp, nsamps=nsamps, t_dist=True, nu=nu)\n else:\n if mean_only:\n forecast[:, t - forecast_start, :] = np.array(list(map(\n lambda k, x_trans, x_cascade: mod.forecast_marginal(\n k=k, X_transaction=x_trans, X_cascade=x_cascade, nsamps=nsamps, mean_only=mean_only),\n horizons, X_transaction[t + horizons - 1, :], X_cascade[t + horizons - 1, :]))).reshape(1,-1)\n else:\n forecast[:, t - forecast_start, :] = mod.forecast_path_copula(\n k=k, X_transaction=X_transaction[t + horizons - 1, :], X_cascade=X_cascade[t + horizons - 1, :],\n nsamps=nsamps, t_dist=True, nu=nu)\n\n if ret.__contains__('new_latent_factors'):\n if t >= forecast_start and t <= forecast_end:\n for lf in new_latent_factors:\n lf.generate_lf_forecast(date=dates.iloc[t], mod=mod, X_transaction=X_transaction[t + horizons - 1, :],\n X_cascade = X_cascade[t + horizons - 1, :],\n k=k, nsamps=nsamps, horizons=horizons)\n # Update the DBCM\n if t < len(Y_transaction):\n if is_lf:\n if isinstance(latent_factor, (list, tuple)):\n pm_bern, ps_bern = latent_factor[0].get_lf(dates.iloc[t])\n pm_pois, ps_pois = latent_factor[1].get_lf(dates.iloc[t])\n pm = (pm_bern, pm_pois)\n ps = (ps_bern, ps_pois)\n else:\n pm, ps = latent_factor.get_lf(dates.iloc[t])\n\n mod.update_lf_analytic(y_transaction=Y_transaction[t], X_transaction=X_transaction[t, :],\n y_cascade=Y_cascade[t,:], X_cascade=X_cascade[t, :],\n phi_mu=pm, phi_sigma=ps, excess=excess[t])\n else:\n mod.update(y_transaction=Y_transaction[t], X_transaction=X_transaction[t, :],\n y_cascade=Y_cascade[t,:], X_cascade=X_cascade[t, :], excess=excess[t])\n\n if ret.__contains__('new_latent_factors'):\n for lf in new_latent_factors:\n lf.generate_lf(date=dates.iloc[t], mod=mod, X_transaction=X_transaction[t + horizons - 1, :],\n X_cascade = X_cascade[t + horizons - 1, :],\n k=k, nsamps=nsamps, horizons=horizons)\n\n out = []\n for obj in ret:\n if obj == 'forecast': out.append(forecast)\n if obj == 'model': out.append(mod)\n if obj == 'new_latent_factors':\n #for lf in new_latent_factors:\n # lf.append_lf()\n # lf.append_lf_forecast()\n if len(new_latent_factors) == 1:\n out.append(new_latent_factors[0])\n else:\n out.append(new_latent_factors)\n\n if len(out) == 1:\n return out[0]\n else:\n return out", "_____no_output_____" ] ], [ [ "`analysis_dbcm` works identically to the standard `analysis`, but is specialized for a DBCM.\n\nSeparate data must be specified for the DCMM on transactions, `y_transaction` and `X_transaction`, the binomial cascade,`y_cascade`, `X_cascade`, and any excess counts, `excess`.\n\nNote that by default, all simulated forecasts made with `analysis_dbcm` are *path* forecasts, meaning that they account for the dependence across forecast horizons.", "_____no_output_____" ], [ "## Analysis for a DLMM", "_____no_output_____" ] ], [ [ "#export\ndef analysis_dlmm(Y, X, k=1, forecast_start=0, forecast_end=0,\n nsamps=500, rho=.6,\n model_prior=None, prior_length=20, ntrend=1,\n dates=None, holidays=[],\n seasPeriods=[], seasHarmComponents=[], \n latent_factor=None, new_latent_factors=None,\n mean_only=False, \n ret=['model', 'forecast'],\n **kwargs):\n \"\"\"\n This is a helpful function to run a standard analysis using a DLMM.\n \"\"\"\n\n if latent_factor is not None:\n is_lf = True\n # Note: This assumes that the bernoulli & poisson components have the same number of latent factor components\n if isinstance(latent_factor, (list, tuple)):\n nlf = latent_factor[0].p\n else:\n nlf = latent_factor.p\n else:\n is_lf = False\n nlf = 0\n\n # Convert dates into row numbers\n if dates is not None:\n dates = pd.Series(dates)\n # dates = pd.to_datetime(dates, format='%y/%m/%d')\n if type(forecast_start) == type(dates.iloc[0]):\n forecast_start = np.where(dates == forecast_start)[0][0]\n if type(forecast_end) == type(dates.iloc[0]):\n forecast_end = np.where(dates == forecast_end)[0][0]\n\n # Add the holiday indicator variables to the regression matrix\n nhol = len(holidays)\n if nhol > 0:\n X = define_holiday_regressors(X, dates, holidays)\n\n # Initialize the DCMM\n if model_prior is None:\n mod = define_dlmm(Y, X, prior_length = prior_length, seasPeriods = seasPeriods, seasHarmComponents = seasHarmComponents,\n ntrend=ntrend, nlf = nlf, rho = rho, nhol = nhol, **kwargs)\n else:\n mod = model_prior\n\n if ret.__contains__('new_latent_factors'):\n if not isinstance(new_latent_factors, Iterable):\n new_latent_factors = [new_latent_factors]\n\n tmp = []\n for sig in new_latent_factors:\n tmp.append(sig.copy())\n new_latent_factors = tmp\n \n if ret.__contains__('model_coef'): ## Return normal dlm params \n m = np.zeros([T, mod.dlm_mod.a.shape[0]])\n C = np.zeros([T, mod.dlm_mod.a.shape[0], mod.dlm_mod.a.shape[0]])\n a = np.zeros([T, mod.dlm_mod.a.shape[0]])\n R = np.zeros([T, mod.dlm_mod.a.shape[0], mod.dlm_mod.a.shape[0]])\n n = np.zeros(T)\n s = np.zeros(T)\n\n # Initialize updating + forecasting\n horizons = np.arange(1,k+1)\n\n if mean_only:\n forecast = np.zeros([1, forecast_end - forecast_start + 1, k])\n else:\n forecast = np.zeros([nsamps, forecast_end - forecast_start + 1, k])\n\n T = len(Y) + 1\n nu = 9\n \n # Run updating + forecasting\n for t in range(prior_length, T):\n # if t % 100 == 0:\n # print(t)\n if ret.__contains__('forecast'):\n if t >= forecast_start and t <= forecast_end:\n if t == forecast_start:\n print('beginning forecasting')\n\n # Get the forecast samples for all the items over the 1:k step ahead path\n if is_lf:\n if isinstance(latent_factor, (list, tuple)):\n pm_bern, ps_bern = latent_factor[0].get_lf_forecast(dates.iloc[t])\n pm_dlm, ps_dlm = latent_factor[1].get_lf_forecast(dates.iloc[t])\n pm = (pm_bern, pm_dlm)\n ps = (ps_bern, ps_dlm)\n else:\n pm, ps = latent_factor.get_lf_forecast(dates.iloc[t])\n\n pp = None # Not including the path dependency of the latent factor\n\n if mean_only:\n forecast[:, t - forecast_start, :] = np.array(list(map(\n lambda k, x, pm, ps: mod.forecast_marginal_lf_analytic(\n k=k, X=(x, x), phi_mu=(pm, pm), phi_sigma=(ps, ps), nsamps=nsamps, mean_only=mean_only),\n horizons, X[t + horizons - 1, :], pm, ps))).reshape(1, -1)\n else:\n forecast[:, t - forecast_start, :] = np.array(list(map(\n lambda k, x, pm, ps: mod.forecast_marginal_lf_analytic(\n k=k, X=(x, x), phi_mu=(pm, pm), phi_sigma=(ps, ps), nsamps=nsamps, mean_only=mean_only),\n horizons, X[t + horizons - 1, :], pm, ps))).squeeze().T.reshape(-1, k)\n \n else:\n if mean_only:\n forecast[:, t - forecast_start, :] = np.array(list(map(\n lambda k, x: mod.forecast_marginal(\n k=k, X=(x, x), nsamps=nsamps, mean_only=mean_only),\n horizons, X[t + horizons - 1, :]))).reshape(1,-1)\n else:\n forecast[:, t - forecast_start, :] = mod.forecast_path_copula(\n k=k, X=(X[t + horizons - 1, :], X[t + horizons - 1, :]), nsamps=nsamps, t_dist=True, nu=nu)\n\n if ret.__contains__('new_latent_factors'):\n if t >= forecast_start and t <= forecast_end:\n for lf in new_latent_factors:\n lf.generate_lf_forecast(date=dates.iloc[t], mod=mod, X=X[t + horizons - 1, :],\n k=k, nsamps=nsamps, horizons=horizons)\n\n # Update the DLMM\n if t < len(Y):\n if is_lf:\n if isinstance(latent_factor, (list, tuple)):\n pm_bern, ps_bern = latent_factor[0].get_lf(dates.iloc[t])\n pm_dlm, ps_dlm = latent_factor[1].get_lf(dates.iloc[t])\n pm = (pm_bern, pm_dlm)\n ps = (ps_bern, ps_dlm)\n else:\n pm, ps = latent_factor.get_lf(dates.iloc[t])\n\n mod.update_lf_analytic(y=Y[t], X=(X[t], X[t]),\n phi_mu=(pm, pm), phi_sigma=(ps, ps))\n else:\n mod.update(y = Y[t], X=(X[t], X[t]))\n\n if ret.__contains__('new_latent_factors'):\n for lf in new_latent_factors:\n lf.generate_lf(date=dates.iloc[t], mod=mod, X=X[t + horizons - 1, :],\n k=k, nsamps=nsamps, horizons=horizons)\n \n # Store the dlm coefficients \n if ret.__contains__('model_coef'):\n m[t,:] = mod.dlm.m.reshape(-1)\n C[t,:,:] = mod.dlm.C\n a[t,:] = mod.dlm.a.reshape(-1)\n R[t,:,:] = mod.dlm.R\n n[t] = mod.dlm.n / mod.dlm.delVar\n s[t] = mod.dlm.s\n\n out = []\n for obj in ret:\n if obj == 'forecast': out.append(forecast)\n if obj == 'model': out.append(mod)\n if obj == 'model_coef':\n mod_coef = {'m':m, 'C':C, 'a':a, 'R':R, 'n':n, 's':s}\n out.append(mod_coef)\n if obj == 'new_latent_factors':\n #for lf in new_latent_factors:\n # lf.append_lf()\n # lf.append_lf_forecast()\n if len(new_latent_factors) == 1:\n out.append(new_latent_factors[0])\n else:\n out.append(new_latent_factors)\n\n if len(out) == 1:\n return out[0]\n else:\n return out", "_____no_output_____" ] ], [ [ "`analysis_dlmm` works identically to the standard `analysis`, but is specialized for a DLMM. `analysis_dlmm` returns the model coefficients for the Normal DLM portion of the model only.\n\nThe observations are continuous and are modeled as a combination of a Bernoulli DGLM and a Normal DLM. \n\nNote that by default, all simulated forecasts made with `analysis_dlmm` are *path* forecasts, meaning that they account for the dependence across forecast horizons. The exception is for latent factor DLMMs, which default to marginal forecasting.", "_____no_output_____" ] ], [ [ "#hide\nfrom nbdev.export import notebook2script\nnotebook2script()", "Converted 00_dglm.ipynb.\nConverted 01_update.ipynb.\nConverted 02_forecast.ipynb.\nConverted 03_define_models.ipynb.\nConverted 04_seasonal.ipynb.\nConverted 05_analysis.ipynb.\nConverted 06_conjugates.ipynb.\nConverted 07_point_forecast.ipynb.\nConverted 08_loss_functions.ipynb.\nConverted 09_plot.ipynb.\nConverted 10_shared.ipynb.\nConverted 11_dcmm.ipynb.\nConverted 12_dbcm.ipynb.\nConverted 13_latent_factor.ipynb.\nConverted 14_latent_factor_fxns.ipynb.\nConverted 15_dlmm.ipynb.\nConverted index.ipynb.\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0aa7ead6aaba396a4979346c3dd472bad408921
847,458
ipynb
Jupyter Notebook
classification/ClassificationSVM.ipynb
tonio73/data-science
e8a365751e7f6956d515f85df4818562b8da3b63
[ "MIT" ]
16
2019-07-25T09:44:23.000Z
2022-03-15T07:14:27.000Z
classification/ClassificationSVM.ipynb
tonio73/data-science
e8a365751e7f6956d515f85df4818562b8da3b63
[ "MIT" ]
null
null
null
classification/ClassificationSVM.ipynb
tonio73/data-science
e8a365751e7f6956d515f85df4818562b8da3b63
[ "MIT" ]
3
2019-12-09T16:00:02.000Z
2021-12-22T13:22:25.000Z
560.488095
75,876
0.942404
[ [ [ "# Support Vector Machines\n\nSupport Vector Machines (SVM) are an extension of the linear methods that attempt to separate classes with hyperplans.\n\nThese extensions come in three steps:\n1. When classes are linearly separable, maximize the margin between the two classes\n2. When classes are not linearly separable, maximize the margin but allow some samples within the margin. That is the soft margin\n3. The \"Kernel trick\" to extend the separation to non linear frontieres\n\nThe boost in performance of the Kernel trick has made the SVM the best classification method of the 2000's until the deep neural nets.\n\n### Learning goals\n\n- Understand and implement SVM concepts stated above\n- Reminder to the Lagrange multiplier and optimization theory\n- Deal with a general purpose solver with constraints\n- Apply SVM to a non linear problem (XOR) with a non linear kernel (G-RBF)\n\n### References\n\n- [1] [The Elements of Statistical Learning](https://web.stanford.edu/~hastie/ElemStatLearn/) - Trevor Hastie, Robert Tibshirani, Jerome Friedman, Springer\n- [2] Convex Optimization - Stephen Boyd, Lieven Vandenberghe, Cambridge University Press\n- [3] [Pattern Recognition and Machine Learning - Ch 7 demo](https://github.com/yiboyang/PRMLPY/blob/master/ch7/svm.py) - Christopher M Bishop, Github", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as pltcolors\nfrom sklearn import linear_model, svm, discriminant_analysis, metrics\nfrom scipy import optimize\nimport seaborn as sns", "_____no_output_____" ] ], [ [ "## Helpers", "_____no_output_____" ] ], [ [ "def plotLine(ax, xRange, w, x0, label, color='grey', linestyle='-', alpha=1.):\n \"\"\" Plot a (separating) line given the normal vector (weights) and point of intercept \"\"\"\n if type(x0) == int or type(x0) == float or type(x0) == np.float64:\n x0 = [0, -x0 / w[1]]\n yy = -(w[0] / w[1]) * (xRange - x0[0]) + x0[1]\n ax.plot(xRange, yy, color=color, label=label, linestyle=linestyle)\n \ndef plotSvm(X, y, support=None, w=None, intercept=0., label='Data', separatorLabel='Separator', \n ax=None, bound=[[-1., 1.], [-1., 1.]]):\n \"\"\" Plot the SVM separation, and margin \"\"\"\n if ax is None:\n fig, ax = plt.subplots(1)\n \n im = ax.scatter(X[:,0], X[:,1], c=y, cmap=cmap, alpha=0.5, label=label)\n if support is not None:\n ax.scatter(support[:,0], support[:,1], label='Support', s=80, facecolors='none', \n edgecolors='y', color='y')\n print(\"Number of support vectors = %d\" % (len(support)))\n if w is not None:\n xx = np.array(bound[0])\n plotLine(ax, xx, w, intercept, separatorLabel)\n # Plot margin\n if support is not None:\n signedDist = np.matmul(support, w)\n margin = np.max(signedDist) - np.min(signedDist) * np.sqrt(np.dot(w, w))\n supportMaxNeg = support[np.argmin(signedDist)]\n plotLine(ax, xx, w, supportMaxNeg, 'Margin -', linestyle='-.', alpha=0.8)\n supportMaxPos = support[np.argmax(signedDist)]\n plotLine(ax, xx, w, supportMaxPos, 'Margin +', linestyle='--', alpha=0.8)\n ax.set_title('Margin = %.3f' % (margin))\n ax.legend(loc='upper left')\n ax.grid()\n ax.set_xlim(bound[0])\n ax.set_ylim(bound[1])\n cb = plt.colorbar(im, ax=ax)\n loc = np.arange(-1,1,1)\n cb.set_ticks(loc)\n cb.set_ticklabels(['-1','1'])", "_____no_output_____" ] ], [ [ "## The data model\n\nLet's use a simple model with two Gaussians that are faraway in order to be separable", "_____no_output_____" ] ], [ [ "colors = ['blue','red']\ncmap = pltcolors.ListedColormap(colors)\nnFeatures = 2\nN = 100", "_____no_output_____" ], [ "def generateBatchBipolar(n, mu=0.5, sigma=0.2):\n \"\"\" Two gaussian clouds on each side of the origin \"\"\"\n X = np.random.normal(mu, sigma, (n, 2))\n yB = np.random.uniform(0, 1, n) > 0.5\n # y is in {-1, 1}\n y = 2. * yB - 1\n X *= y[:, np.newaxis]\n X -= X.mean(axis=0)\n return X, y", "_____no_output_____" ] ], [ [ "# 1. Maximum margin separator", "_____no_output_____" ], [ "The following explanation is about the binary classification but generalizes to more classes.\n\nLet $X$ be the matrix of $n$ samples of the $p$ features. We want to separate the two classes of $y$ with an hyperplan (a straight line in 2D, that is $p=2$). The separation equation is:\n\n$$ w^T x + b = 0, w \\in \\mathbb{R}^{p}, x \\in \\mathbb{R}^{p}, b \\in \\mathbb{R} $$\n\nGiven $x_0$ a point on the hyperplan, the signed distance of any point $x$ to the hyperplan is :\n$$ \\frac{w}{\\Vert w \\Vert} (x - x_0) = \\frac{1}{\\Vert w \\Vert} (w^T x + b) $$\n\nIf $y$, such that $y \\in \\{-1, 1\\}$, is the corresponding label of $x$, the (unsigned) distance is : \n$$ \\frac{y}{\\Vert w \\Vert} (w^T x + b) $$\n\nThis is the update quantity used by the Rosenblatt Perceptron.", "_____no_output_____" ], [ "The __Maximum margin separator__ is aiming at maximizing $M$ such that : \n$$ \\underset{w, b}{\\max} M $$\n__Subject to :__\n- $y_i(x_i^T w + b) \\ge M, i = 1..n$\n- $\\Vert w \\Vert = 1$\n\n$x_i$ and $y_i$ are samples of $x$ and $y$, a row of the matrix $X$ and the vector $y$.\n\nHowever, we may change the condition on the norm of $w$ such that : $\\Vert w \\Vert = \\frac 1M$\n\nLeading to the equivalent statement of the maximum margin classifier : \n$$ \\min_{w, b} \\frac 12 \\Vert w \\Vert^2 $$\n__Subject to : $y_i(x_i^T w + b) \\ge 1, i = 1..n$__ \n\nFor more details, see [1, chap 4.5]", "_____no_output_____" ], [ "The corresponding Lagrange primal problem is :\n\n$$\\mathcal{L}_p(w, b, \\alpha) = \\frac 12 \\Vert w \\Vert^2 - \\sum_{i=0}^n \\alpha_i (y_i(x_i^T w + b) - 1)$$\n\n__Subject to:__\n- $\\alpha_i \\ge 0, i\\in 1..n$\n\nThis shall be __minimized__ on $w$ and $b$, using the corresponding partial derivates equal to 0, we get : \n$$\\begin{align}\n\\sum_{i=0}^n \\alpha_i y_i x_i &= w \\\\\n\\sum_{i=0}^n \\alpha_i y_i &= 0\n\\end{align}$$\n\nFrom $\\mathcal{L}_p$, we get the (Wolfe) dual : \n$$\\begin{align}\n\\mathcal{L}_d (\\alpha)\n&= \\sum_{i=0}^n \\alpha_i - \\frac 12 \\sum_{i=0}^n \\sum_{k=0}^n \\alpha_i \\alpha_k y_i y_k x_i^T x_k \\\\\n&= \\sum_{i=0}^n \\alpha_i - \\frac 12 \\sum_{i=0}^n \\sum_{k=0}^n \\langle \\alpha_i y_i x_i, \\alpha_k y_k x_k \\rangle \\\\\n\\end{align}$$\n\n__Subject to :__\n- $\\alpha_i \\ge 0, i\\in 1..n$\n- $\\sum_{i=0}^n \\alpha_i y_i = 0$\n\nWhich is a concave problem that is __maximized__ using a solver.\n\nStrong duality requires (KKT) [2, chap. 5.5]: \n- $\\alpha_i (y_i(x_i^T w + b) - 1) = 0, \\forall i \\in 1..n$\n\nImplying that : \n- If $\\alpha_i > 0$, then $y_i(x_i^T w + b) = 1$, meaning that $x_i$ is on one of the two hyperplans located at the margin distance from the separating hyperplan. $x_i$ is said to be a support vector\n- If $y_i(x_i^T w + b) > 1$, the distance of $x_i$ to the hyperplan is larger than the margin.", "_____no_output_____" ], [ "### Train data\n\nTo demonstrate the maximum margin classifier, a dataset with separable classes is required. Let's use a mixture of two gaussian distributed classes with mean and variance such that the two classes are separated.", "_____no_output_____" ] ], [ [ "xTrain0, yTrain0 = generateBatchBipolar(N, sigma=0.2)\nplotSvm(xTrain0, yTrain0)", "_____no_output_____" ] ], [ [ "## Implementation of the Maximum margin separator", "_____no_output_____" ], [ "$$\\mathcal{L}_d = \\sum_{i=0}^n \\alpha_i - \\frac 12 \\sum_{i=0}^n \\sum_{k=0}^n \\alpha_i \\alpha_k y_i y_k x_i^T x_k $$\n\n__Subject to :__\n- $\\sum_{i=0}^n \\alpha_i y_i = \\langle \\alpha, y \\rangle = 0$\n- $\\alpha_i \\ge 0, i\\in 1..n$\n\nThe classifier is built on the scipy.optimize.minimum solver. The implementation is correct but inefficient as it is not taking into account for the sparsity of the $\\alpha$ vector.", "_____no_output_____" ] ], [ [ "class MaxMarginClassifier:\n \n def __init__(self):\n self.alpha = None\n self.w = None\n self.supportVectors = None\n \n def fit(self, X, y):\n N = len(y)\n # Gram matrix of (X.y)\n Xy = X * y[:, np.newaxis]\n GramXy = np.matmul(Xy, Xy.T)\n\n # Lagrange dual problem\n def Ld0(G, alpha):\n return alpha.sum() - 0.5 * alpha.dot(alpha.dot(G))\n\n # Partial derivate of Ld on alpha\n def Ld0dAlpha(G, alpha):\n return np.ones_like(alpha) - alpha.dot(G)\n\n # Constraints on alpha of the shape :\n # - d - C*alpha = 0\n # - b - A*alpha >= 0\n A = -np.eye(N)\n b = np.zeros(N)\n constraints = ({'type': 'eq', 'fun': lambda a: np.dot(a, y), 'jac': lambda a: y},\n {'type': 'ineq', 'fun': lambda a: b - np.dot(A, a), 'jac': lambda a: -A})\n\n # Maximize by minimizing the opposite\n optRes = optimize.minimize(fun=lambda a: -Ld0(GramXy, a),\n x0=np.ones(N), \n method='SLSQP', \n jac=lambda a: -Ld0dAlpha(GramXy, a), \n constraints=constraints)\n self.alpha = optRes.x\n self.w = np.sum((self.alpha[:, np.newaxis] * Xy), axis=0) \n epsilon = 1e-6\n self.supportVectors = X[self.alpha > epsilon]\n # Any support vector is at a distance of 1 to the separation plan\n # => use support vector #0 to compute the intercept, assume label is in {-1, 1}\n supportLabels = y[self.alpha > epsilon]\n self.intercept = supportLabels[0] - np.matmul(self.supportVectors[0].T, self.w)\n \n def predict(self, X):\n \"\"\" Predict y value in {-1, 1} \"\"\"\n assert(self.w is not None)\n assert(self.w.shape[0] == X.shape[1])\n return 2 * (np.matmul(X, self.w) > 0) - 1", "_____no_output_____" ] ], [ [ "Reference: \n- https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize", "_____no_output_____" ] ], [ [ "model00 = MaxMarginClassifier()\nmodel00.fit(xTrain0, yTrain0)\nmodel00.w, model00.intercept", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, figsize=(12, 7))\nplotSvm(xTrain0, yTrain0, model00.supportVectors, model00.w, model00.intercept, label='Training', ax=ax)", "Number of support vectors = 3\n" ] ], [ [ "## Maximum margin classifier using Scikit Learn (SVC)\n\nSVC is used in place of LinearSVC as the support vectors are provided. These vectors are displayed in the graph here below.\n\nSet a high $C$ parameter to disable soft margin", "_____no_output_____" ] ], [ [ "model01 = svm.SVC(kernel='linear', gamma='auto', C = 1e6)\nmodel01.fit(xTrain0, yTrain0)\nmodel01.coef_[0], model01.intercept_[0]", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, figsize=(11, 7))\nplotSvm(xTrain0, yTrain0, model01.support_vectors_, model01.coef_[0], model01.intercept_[0], \n label='Training', ax=ax)", "Number of support vectors = 3\n" ] ], [ [ "The two implementations of the linear SVM agree on the ceofficients and margin. Good !", "_____no_output_____" ], [ "### Comparison of the maximum margin classifier to the Logistic regression and Linear Discriminant Analysis (LDA)\n\nLogistic regression is based on the linear regression that is the computation of the square error of any point $x$ to the separation plan and a projection on the probability space using the sigmoid in order to compute the binary cross entropy, see ([HTML](ClassificationContinuous2Features.html) / [Jupyter](ClassificationContinuous2Features.ipynb)).\n\nLDA is assuming a Gaussian mixture prior (our case) and performs bayesian inference.", "_____no_output_____" ] ], [ [ "model02 = linear_model.LogisticRegression(solver='lbfgs')\nmodel02.fit(xTrain0, yTrain0)\nmodel02.coef_[0], model02.intercept_[0]", "_____no_output_____" ], [ "model03 = discriminant_analysis.LinearDiscriminantAnalysis(solver='svd')\nmodel03.fit(xTrain0, yTrain0)\nmodel03.coef_[0], model03.intercept_[0]", "_____no_output_____" ] ], [ [ "We observe that the coefficients of the three models are very different in amplitude but globally draw a separator line with slope $-\\frac \\pi4$ in the 2D plan", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1, figsize=(11, 7))\nplotSvm(xTrain0, yTrain0, w=model01.coef_[0], intercept=model01.intercept_[0], \n separatorLabel='Max Margin SVM', label='Training', ax=ax)\nxx = np.array([-1., 1.])\nplotLine(ax, xx, w=model02.coef_[0], x0=model02.intercept_[0], label='Logistic', color='g')\nplotLine(ax, xx, w=model03.coef_[0], x0=model03.intercept_[0], label='LDA', color='c')\nax.legend();", "_____no_output_____" ] ], [ [ "# 2. Soft Margin Linear SVM for non separable classes\n\nThe example above has little interest as the separation is trivial.\n\nUsing the same SVM implementation on a non separable case would not be possible, the solver would fail.\n\nHere comes the soft margin: some $x_i$ are allowed to lie in between the two margin bars.", "_____no_output_____" ], [ "The __Soft margin linear SVM__ is adding a regularization parameter in maximizing $M$: \n\n$$ \\underset{w, b}{\\max} M ( 1 - \\xi_i) $$\n__Subject to $\\forall i = 1..n$:__\n- $y_i(x_i^T w + b) \\ge M$\n- $\\Vert w \\Vert = 1$\n- $\\xi_i \\ge 0$\n\nEquivalently :\n$$ \\min_{w, b} \\frac 12 \\Vert w \\Vert^2 + C \\sum_{i=1}^n \\xi_i$$\n__Subject to $\\forall i = 1..n$:__\n- $\\xi_i \\ge 0$\n- $y_i(x_i^T w + b) \\ge 1 - \\xi_i$", "_____no_output_____" ], [ "The corresponding Lagrange primal problem is :\n\n$$\\mathcal{L}_p(w, b, \\alpha, \\mu) = \\frac 12 \\Vert w \\Vert^2 - \\sum_{i=0}^n \\alpha_i (y_i(x_i^T w + b) - (1 - \\xi_i) - \\sum_{i=0}^n \\mu_i \\xi_i $$\n\n__Subject to $\\forall i\\in 1..n$:__\n- $\\alpha_i \\ge 0$\n- $\\mu_i \\ge 0$\n- $\\xi_i \\ge 0$\n\nThis shall be minimized on $w$, $b$ and $\\xi_i$, using the corresponding partial derivates equal to 0, we get : \n$$\\begin{align}\n\\sum_{i=0}^n \\alpha_i y_i x_i &= w \\\\\n\\sum_{i=0}^n \\alpha_i y_i &= 0 \\\\\n\\alpha_i &= C - \\mu_i\n\\end{align}$$\n\nFrom $\\mathcal{L}_p$, we get the (Wolfe) dual : \n$$\\begin{align}\n\\mathcal{L}_d (\\alpha)\n&= \\sum_{i=0}^n \\alpha_i - \\frac 12 \\sum_{i=0}^n \\sum_{k=0}^n \\alpha_i \\alpha_k y_i y_k x_i^T x_k \\\\\n&= \\sum_{i=0}^n \\alpha_i - \\frac 12 \\sum_{i=0}^n \\sum_{k=0}^n \\langle \\alpha_i y_i x_i, \\alpha_k y_k x_k \\rangle \\\\\n\\end{align}$$\n\n__Subject to $\\forall i\\in 1..n$:__\n- $0 \\le \\alpha_i \\le C$\n- $\\sum_{i=0}^n \\alpha_i y_i = 0$\n\nThis problem is very similar to the one of the Maximum margin separator, but with one more constraint on $\\alpha$.\n\nIt is a concave problem that is maximized using a solver.\n\nExtra conditions to get strong duality are required (KKT), $\\forall i \\in 1..n$:\n- $\\alpha_i (y_i(x_i^T w + b) - (1 - \\xi_i)) = 0$\n- $\\mu_i \\xi_i = 0$\n- $y_i(x_i^T w + b) - (1 - \\xi_i) \\ge 0$\n\nMore detailed explainations are in [1 chap. 12.1, 12.2]", "_____no_output_____" ], [ "## Data model\n\nLet's reuse the same model made of two gaussians, but with larger variance in order to mix the positive and negative points", "_____no_output_____" ] ], [ [ "xTrain1, yTrain1 = generateBatchBipolar(N, mu=0.3, sigma=0.3)\nplotSvm(xTrain1, yTrain1, label='Training')", "_____no_output_____" ] ], [ [ "## Custom implementation\n\nChanges to the Maximum margin classifier are identified by \"# <---\"", "_____no_output_____" ] ], [ [ "class LinearSvmClassifier:\n \n def __init__(self, C):\n self.C = C # <---\n self.alpha = None\n self.w = None\n self.supportVectors = None\n \n def fit(self, X, y):\n N = len(y)\n # Gram matrix of (X.y)\n Xy = X * y[:, np.newaxis]\n GramXy = np.matmul(Xy, Xy.T)\n\n # Lagrange dual problem\n def Ld0(G, alpha):\n return alpha.sum() - 0.5 * alpha.dot(alpha.dot(G))\n\n # Partial derivate of Ld on alpha\n def Ld0dAlpha(G, alpha):\n return np.ones_like(alpha) - alpha.dot(G)\n\n # Constraints on alpha of the shape :\n # - d - C*alpha = 0\n # - b - A*alpha >= 0\n A = np.vstack((-np.eye(N), np.eye(N))) # <---\n b = np.hstack((np.zeros(N), self.C * np.ones(N))) # <---\n constraints = ({'type': 'eq', 'fun': lambda a: np.dot(a, y), 'jac': lambda a: y},\n {'type': 'ineq', 'fun': lambda a: b - np.dot(A, a), 'jac': lambda a: -A})\n\n # Maximize by minimizing the opposite\n optRes = optimize.minimize(fun=lambda a: -Ld0(GramXy, a),\n x0=np.ones(N), \n method='SLSQP', \n jac=lambda a: -Ld0dAlpha(GramXy, a), \n constraints=constraints)\n self.alpha = optRes.x\n self.w = np.sum((self.alpha[:, np.newaxis] * Xy), axis=0) \n epsilon = 1e-6\n self.supportVectors = X[self.alpha > epsilon]\n # Support vectors is at a distance <= 1 to the separation plan\n # => use min support vector to compute the intercept, assume label is in {-1, 1}\n signedDist = np.matmul(self.supportVectors, self.w)\n minDistArg = np.argmin(signedDist)\n supportLabels = y[self.alpha > epsilon]\n self.intercept = supportLabels[minDistArg] - signedDist[minDistArg]\n \n def predict(self, X):\n \"\"\" Predict y value in {-1, 1} \"\"\"\n assert(self.w is not None)\n assert(self.w.shape[0] == X.shape[1])\n return 2 * (np.matmul(X, self.w) > 0) - 1", "_____no_output_____" ], [ "model10 = LinearSvmClassifier(C=1)\nmodel10.fit(xTrain1, yTrain1)\nmodel10.w, model10.intercept", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, figsize=(11, 7))\nplotSvm(xTrain1, yTrain1, model10.supportVectors, model10.w, model10.intercept, label='Training', ax=ax)", "Number of support vectors = 38\n" ] ], [ [ "### Linear SVM using Scikit Learn", "_____no_output_____" ] ], [ [ "model11 = svm.SVC(kernel='linear', gamma='auto', C = 1)\nmodel11.fit(xTrain1, yTrain1)\nmodel11.coef_[0], model11.intercept_[0]", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, figsize=(11, 7))\nplotSvm(xTrain1, yTrain1, model11.support_vectors_, model11.coef_[0], model11.intercept_[0], \n label='Training', ax=ax)", "Number of support vectors = 38\n" ] ], [ [ "With the soft margin, the support vectors are all the vectors on the boundary or within the margin slab.\n\nThe custom and SKLearn implementations are matching !", "_____no_output_____" ], [ "### Comparison of the soft margin classifier to the Logistic regression and Linear Discriminant Analysis (LDA)", "_____no_output_____" ] ], [ [ "model12 = linear_model.LogisticRegression(solver='lbfgs')\nmodel12.fit(xTrain1, yTrain1)\nmodel12.coef_[0], model12.intercept_[0]", "_____no_output_____" ], [ "model13 = discriminant_analysis.LinearDiscriminantAnalysis(solver='svd')\nmodel13.fit(xTrain1, yTrain1)\nmodel13.coef_[0], model13.intercept_[0]", "_____no_output_____" ] ], [ [ "As shown below, the three models separator hyperplans are very similar, negative slope.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1, figsize=(11, 7))\nplotSvm(xTrain1, yTrain1, w=model11.coef_[0], intercept=model11.intercept_[0], label='Training',\n separatorLabel='Soft Margin SVM', ax=ax)\nxx = np.array([-1., 1.])\nplotLine(ax, xx, w=model12.coef_[0], x0=model12.intercept_[0], label='Logistic reg', color='orange')\nplotLine(ax, xx, w=model13.coef_[0], x0=model13.intercept_[0], label='LDA', color='c')\nax.legend();", "_____no_output_____" ] ], [ [ "### Validation with test data", "_____no_output_____" ] ], [ [ "xTest1, yTest1 = generateBatchBipolar(2*N, mu=0.3, sigma=0.3)", "_____no_output_____" ] ], [ [ "#### Helpers for binary classification performance", "_____no_output_____" ] ], [ [ "def plotHeatMap(X, classes, title=None, fmt='.2g', ax=None, xlabel=None, ylabel=None):\n \"\"\" Fix heatmap plot from Seaborn with pyplot 3.1.0, 3.1.1\n https://stackoverflow.com/questions/56942670/matplotlib-seaborn-first-and-last-row-cut-in-half-of-heatmap-plot\n \"\"\"\n ax = sns.heatmap(X, xticklabels=classes, yticklabels=classes, annot=True, \\\n fmt=fmt, cmap=plt.cm.Blues, ax=ax) #notation: \"annot\" not \"annote\"\n bottom, top = ax.get_ylim()\n ax.set_ylim(bottom + 0.5, top - 0.5)\n if title:\n ax.set_title(title)\n if xlabel:\n ax.set_xlabel(xlabel)\n if ylabel:\n ax.set_ylabel(ylabel)\n \ndef plotConfusionMatrix(yTrue, yEst, classes, title=None, fmt='.2g', ax=None):\n plotHeatMap(metrics.confusion_matrix(yTrue, yEst), classes, title, fmt, ax, xlabel='Estimations', \\\n ylabel='True values');", "_____no_output_____" ] ], [ [ "### Confusion matrices", "_____no_output_____" ] ], [ [ "fig, axes = plt.subplots(1, 3, figsize=(16, 3))\nfor model, ax, title in zip([model10, model12, model13], axes, ['Custom linear SVM', 'Logistic reg', 'LDA']):\n yEst = model.predict(xTest1)\n plotConfusionMatrix(yTest1, yEst, colors, title, ax=ax)", "_____no_output_____" ] ], [ [ "There is no clear winner, all models are performing equally well.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1, figsize=(11, 7))\nplotSvm(xTest1, yTest1, w=model10.w, intercept=model10.intercept, separatorLabel='Cust. linear SVM', ax=ax)\nxx = np.array([-1., 1.])\nplotLine(ax, xx, w=model12.coef_[0], x0=model12.intercept_[0], label='Logistic reg', color='orange')\nplotLine(ax, xx, w=model13.coef_[0], x0=model13.intercept_[0], label='LDA', color='c')\nax.legend();", "_____no_output_____" ] ], [ [ "# 3. The \"kernel trick\" for non linearly separable classes\n\nLet's use a very famous dataset showing the main limitation of the Logistic regression and LDA : the XOR.", "_____no_output_____" ] ], [ [ "def generateBatchXor(n, mu=0.5, sigma=0.5):\n \"\"\" Four gaussian clouds in a Xor fashion \"\"\"\n X = np.random.normal(mu, sigma, (n, 2))\n yB0 = np.random.uniform(0, 1, n) > 0.5\n yB1 = np.random.uniform(0, 1, n) > 0.5\n # y is in {-1, 1}\n y0 = 2. * yB0 - 1\n y1 = 2. * yB1 - 1\n X[:,0] *= y0\n X[:,1] *= y1\n X -= X.mean(axis=0)\n return X, y0*y1", "_____no_output_____" ], [ "xTrain3, yTrain3 = generateBatchXor(2*N, sigma=0.25)\nplotSvm(xTrain3, yTrain3)\nxTest3, yTest3 = generateBatchXor(2*N, sigma=0.25)", "_____no_output_____" ] ], [ [ "## Logistic regression and LDA on XOR problem", "_____no_output_____" ] ], [ [ "model32 = linear_model.LogisticRegression(solver='lbfgs')\nmodel32.fit(xTrain3, yTrain3)\nmodel32.coef_[0], model32.intercept_[0]", "_____no_output_____" ], [ "model33 = discriminant_analysis.LinearDiscriminantAnalysis(solver='svd')\nmodel33.fit(xTrain3, yTrain3)\nmodel33.coef_[0], model33.intercept_[0]", "_____no_output_____" ] ], [ [ "The linear separators are sometimes mitigating the issue by isolating a single class within a corner. Or they are simply fully failing (separator is of limit).", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1, figsize=(11, 7))\nplotSvm(xTrain3, yTrain3, w=model32.coef_[0], intercept=model32.intercept_[0], label='Training',\n separatorLabel='Logistic reg', ax=ax)\nxx = np.array([-1., 1.])\nplotLine(ax, xx, w=model33.coef_[0], x0=model33.intercept_[0], label='LDA', color='c')\nax.legend();", "_____no_output_____" ] ], [ [ "## Introducing the Kernel trick\n\nWhen using linear separators like the regression, the traditional way to deal with non linear functions is to expand the feature space using powers and products of the initial features. This is also necessary in case of multiclass problems as shown in [1 chap. 4.2].\n\nThere are limits to this trick. For example, the XOR problem is not handled proprely.\n\nThe SVM has used a new method known as the \"Kernel trick\".\n\nLet's apply a transformation to $x$ using function $h(x)$. \n\nThe Lagrange (Wolfe) dual problem becomes : \n$$\\begin{align}\n\\mathcal{L}_d (\\alpha)\n&= \\sum_{i=0}^n \\alpha_i - \\frac 12 \\sum_{i=0}^n \\sum_{k=0}^n \\alpha_i \\alpha_k y_i y_k h(x_i)^T h(x_k) \\\\\n&= \\sum_{i=0}^n \\alpha_i - \\frac 12 \\sum_{i=0}^n \\sum_{k=0}^n \\alpha_i \\alpha_k \\langle y_i h(x_i), y_k h(x_k) \\rangle \\\\\n\\end{align}$$\n\n__Subject to $\\forall i\\in 1..n$:__\n- $0 \\le \\alpha_i \\le C$\n- $\\sum_{i=0}^n \\alpha_i y_i = 0$\n\nSince $ w = \\sum_{i=0}^n \\alpha_i y_i h(x_i)$, the prediction function is now : \n$$ f(x) = sign(w^T h(x) + b) = sign \\left(\\sum_{i=0}^n \\alpha_i y_i \\langle h(x_i), h(x) \\rangle \\right) $$\n\nThis prediction needs to be computed for $\\alpha_i > 0$, that are support vectors.\n\nBoth the fit and prediction are based on the inner product $K(x, x') = \\langle h(x), h(x') \\rangle$, also known as the kernel function. This function shall be symmetric, semi-definite.\n\nPopular kernel is the Gaussian Radial Basis Function (RBF) : $K(x, x') = exp(- \\gamma \\Vert x - x' \\Vert^2 )$", "_____no_output_____" ], [ "### Custom implementation of the SVM with G-RBF kernel\n\nModifications made on the Linear SVM implementation are enclosed in blocks starting with _\"# --->\"_ and ending with _\"# <---\"_", "_____no_output_____" ] ], [ [ "class KernelSvmClassifier:\n \n def __init__(self, C, kernel):\n self.C = C \n self.kernel = kernel # <---\n self.alpha = None\n self.supportVectors = None\n \n def fit(self, X, y):\n N = len(y)\n # --->\n # Gram matrix of h(x) y\n hXX = np.apply_along_axis(lambda x1 : np.apply_along_axis(lambda x2: self.kernel(x1, x2), 1, X),\n 1, X) \n yp = y.reshape(-1, 1)\n GramHXy = hXX * np.matmul(yp, yp.T) \n # <---\n\n # Lagrange dual problem\n def Ld0(G, alpha):\n return alpha.sum() - 0.5 * alpha.dot(alpha.dot(G))\n\n # Partial derivate of Ld on alpha\n def Ld0dAlpha(G, alpha):\n return np.ones_like(alpha) - alpha.dot(G)\n\n # Constraints on alpha of the shape :\n # - d - C*alpha = 0\n # - b - A*alpha >= 0\n A = np.vstack((-np.eye(N), np.eye(N))) # <---\n b = np.hstack((np.zeros(N), self.C * np.ones(N))) # <---\n constraints = ({'type': 'eq', 'fun': lambda a: np.dot(a, y), 'jac': lambda a: y},\n {'type': 'ineq', 'fun': lambda a: b - np.dot(A, a), 'jac': lambda a: -A})\n\n # Maximize by minimizing the opposite\n optRes = optimize.minimize(fun=lambda a: -Ld0(GramHXy, a),\n x0=np.ones(N), \n method='SLSQP', \n jac=lambda a: -Ld0dAlpha(GramHXy, a), \n constraints=constraints)\n self.alpha = optRes.x\n # --->\n epsilon = 1e-8\n supportIndices = self.alpha > epsilon\n self.supportVectors = X[supportIndices]\n self.supportAlphaY = y[supportIndices] * self.alpha[supportIndices]\n # <---\n \n def predict(self, X):\n \"\"\" Predict y values in {-1, 1} \"\"\"\n # --->\n def predict1(x):\n x1 = np.apply_along_axis(lambda s: self.kernel(s, x), 1, self.supportVectors)\n x2 = x1 * self.supportAlphaY\n return np.sum(x2)\n \n d = np.apply_along_axis(predict1, 1, X)\n return 2 * (d > 0) - 1\n # <---", "_____no_output_____" ], [ "def GRBF(x1, x2):\n diff = x1 - x2\n return np.exp(-np.dot(diff, diff) * len(x1) / 2)\n\nmodel30 = KernelSvmClassifier(C=5, kernel=GRBF)\nmodel30.fit(xTrain3, yTrain3)", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, figsize=(11, 7))\nplotSvm(xTrain3, yTrain3, support=model30.supportVectors, label='Training', ax=ax)\n\n# Estimate and plot decision boundary\nxx = np.linspace(-1, 1, 50)\nX0, X1 = np.meshgrid(xx, xx)\nxy = np.vstack([X0.ravel(), X1.ravel()]).T\nY30 = model30.predict(xy).reshape(X0.shape)\nax.contour(X0, X1, Y30, colors='k', levels=[-1, 0], alpha=0.3, linestyles=['-.', '-']);", "Number of support vectors = 34\n" ] ], [ [ "## Scikit Learn SVM with Radial basis kernel", "_____no_output_____" ] ], [ [ "model31 = svm.SVC(kernel='rbf', C=10, gamma=1/2, shrinking=False)\nmodel31.fit(xTrain3, yTrain3);", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, figsize=(11, 7))\nplotSvm(xTrain3, yTrain3, support=model31.support_vectors_, label='Training', ax=ax)\n\n# Estimate and plot decision boundary\nY31 = model31.predict(xy).reshape(X0.shape)\nax.contour(X0, X1, Y31, colors='k', levels=[-1, 0], alpha=0.3, linestyles=['-.', '-']);", "Number of support vectors = 39\n" ] ], [ [ "### SVM with RBF performance on XOR", "_____no_output_____" ] ], [ [ "fig, axes = plt.subplots(1, 2, figsize=(11, 3))\nfor model, ax, title in zip([model30, model31], axes, [\"Custom SVM with RBF\", \"SKLearn SVM with RBF\"]):\n yEst3 = model.predict(xTest3)\n plotConfusionMatrix(yTest3, yEst3, colors, title, ax=ax)", "_____no_output_____" ] ], [ [ "Both models' predictions are almost matching on the XOR example.", "_____no_output_____" ], [ "## Conclusion\n\nWe have shown the power of SVM classifiers for non linearly separable problems. From the end of the 1990's, SVM was the leading machine learning algorithm family for many problems. This situation has changed a little since 2010 as deep learning has shown better performance for some classes of problems. However, SVM remains stronger in many contexts. For example, the amount of training data for SVM is lower than the one required for deep learning.\n\n### Where to go from here\n\n- Multiclass classifier using Neural Nets in Keras ([HTML](ClassificationMulti2Features-Keras.html) / [Jupyter](ClassificationMulti2Features-Keras.ipynb))\n- Multiclass classifier using Decision Trees ([HTML](ClassificationMulti2Features-Tree.html) / [Jupyter](ClassificationMulti2Features-Tree.ipynb))\n- Bivariate continuous function approximation with Linear Regression ([HTML](ClassificationContinuous2Features.html) / [Jupyter](ClassificationContinuous2Features.ipynb))\n- Bivariate continuous function approximation with k Nearest Neighbors ([HTML](ClassificationContinuous2Features-KNN.html) / [Jupyter](ClassificationContinuous2Features-KNN.ipynb))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d0aa95892d50fed477443bee7eaf430704c808ba
612,579
ipynb
Jupyter Notebook
notebooks/caffe/introspection.ipynb
Petr-By/qtpyvis
0b9a151ee6b9a56b486c2bece9c1f03414629efc
[ "MIT" ]
3
2017-10-04T14:51:26.000Z
2017-10-22T09:35:50.000Z
notebooks/caffe/introspection.ipynb
CogSciUOS/DeepLearningToolbox
bf07578b9486d8c48e25df357bc4b9963b513b46
[ "MIT" ]
13
2017-11-26T10:05:00.000Z
2018-03-11T14:08:40.000Z
notebooks/caffe/introspection.ipynb
CogSciUOS/DeepLearningToolbox
bf07578b9486d8c48e25df357bc4b9963b513b46
[ "MIT" ]
2
2017-09-24T21:39:42.000Z
2017-10-04T15:29:54.000Z
330.231267
6,838
0.918556
[ [ [ "import caffe\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom keras.datasets import mnist\nfrom caffe.proto import caffe_pb2\nimport google.protobuf.text_format\nplt.rcParams['image.cmap'] = 'gray'\n%matplotlib inline", "Using TensorFlow backend.\n" ] ], [ [ "Loading the model", "_____no_output_____" ] ], [ [ "model_def = 'example_caffe_mnist_model.prototxt'\nmodel_weights = 'mnist.caffemodel'\nnet = caffe.Net(model_def, model_weights, caffe.TEST)", "_____no_output_____" ] ], [ [ "A Caffe net offers a layer dict that maps layer names to layer objects. These objects do not provide very much information though, but access to their weights and the type of the layer.", "_____no_output_____" ] ], [ [ "net.layer_dict", "_____no_output_____" ], [ "conv_layer = net.layer_dict['conv2d_1']\nconv_layer.type, conv_layer.blobs[0].data.shape", "_____no_output_____" ] ], [ [ "### Getting input and output shape.\nThe net provides a `blobs dict`. These blobs contain `data`, i.e. all the intermediary computation results and `diff`, i.e. the gradients. ", "_____no_output_____" ] ], [ [ "for name, blob in net.blobs.items():\n print('{}: \\t {}'.format(name, blob.data.shape))", "data: \t (64, 1, 28, 28)\nlabel: \t (64,)\nconv2d_1: \t (64, 32, 26, 26)\nmax_pooling2d_1: \t (64, 32, 13, 13)\nconv2d_2: \t (64, 32, 11, 11)\ndropout_1: \t (64, 32, 11, 11)\ndense_1: \t (64, 64)\ndropout_2: \t (64, 64)\ndense_2: \t (64, 10)\nloss: \t ()\n" ] ], [ [ "### Getting the weigths.\nThe net provides access to a `param dict` that contains the weights. The first entry in param corresponds to the weights, the second corresponds to the bias.", "_____no_output_____" ] ], [ [ "net.params", "_____no_output_____" ], [ "for name, param in net.params.items():\n print('{}:\\t {} \\t{}'.format(name, param[0].data.shape, param[1].data.shape))", "conv2d_1:\t (32, 1, 3, 3) \t(32,)\nconv2d_2:\t (32, 32, 3, 3) \t(32,)\ndense_1:\t (64, 3872) \t(64,)\ndense_2:\t (10, 64) \t(10,)\n" ] ], [ [ "The weights are also accessible through the layer blobs.", "_____no_output_____" ] ], [ [ "for layer in net.layers:\n try:\n print (layer.type + '\\t' + str(layer.blobs[0].data.shape), str(layer.blobs[1].data.shape))\n except:\n continue\n ", "Convolution\t(32, 1, 3, 3) (32,)\nConvolution\t(32, 32, 3, 3) (32,)\nInnerProduct\t(64, 3872) (64,)\nInnerProduct\t(10, 64) (10,)\n" ], [ "weights = net.params['conv2d_1'][0].data\nweights.shape", "_____no_output_____" ] ], [ [ "For visualizing the weights the axis still have to be moved around.", "_____no_output_____" ] ], [ [ "for i in range(32):\n plt.imshow(np.moveaxis(weights[i], 0, -1)[..., 0])\n plt.show()", "_____no_output_____" ] ], [ [ "Layers that have no weights simply keep empty lists as their blob vector.", "_____no_output_____" ] ], [ [ "list(net.layer_dict['dropout_1'].blobs)", "_____no_output_____" ] ], [ [ "### Getting the activations and the net input.\nFor getting activations, first data has to be passed through the network. Then the activations can be read out from the blobs. If the activations are defined as in place operations, the net input will not be stored in any blob and can therefore not be recovered. This problem can be circumvented if the network definition is changed so that in place operations are avoided. This can also be done programatically as follows.", "_____no_output_____" ] ], [ [ "def remove_inplace(model_def):\n protonet = caffe_pb2.NetParameter()\n with open(model_def, 'r') as fp:\n google.protobuf.text_format.Parse(str(fp.read()), protonet)\n \n replaced_tops = {}\n for layer in protonet.layer:\n # Check whehter bottoms were renamed.\n for i in range(len(layer.bottom)):\n if layer.bottom[i] in replaced_tops.keys():\n layer.bottom[i] = replaced_tops[layer.bottom[i]]\n \n \n if layer.bottom == layer.top:\n for i in range(len(layer.top)):\n # Retain the mapping from the old to the new name.\n new_top = layer.top[i] + '_' + layer.name\n replaced_tops[layer.top[i]] = new_top\n # Redefine layer.top\n layer.top[i] = new_top\n \n return protonet\n\nmodel_def = 'example_caffe_mnist_model_deploy.prototxt'\n\nprotonet_no_inplace = remove_inplace(model_def)\nprotonet_no_inplace", "_____no_output_____" ], [ "model_def = 'example_caffe_network_no_inplace_deploy.prototxt'\nmodel_weights = 'mnist.caffemodel'\nnet_no_inplace = caffe.Net(model_def, model_weights, caffe.TEST)", "_____no_output_____" ], [ "net_no_inplace.layer_dict", "_____no_output_____" ], [ "net_no_inplace.blobs", "_____no_output_____" ], [ "# Loading and preprocessing data.\ndata = mnist.load_data()[1][0]\n# Normalize data.\ndata = data / data.max()\nplt.imshow(data[0, :, :])\nseven = data[0, :, :]\nprint(seven.shape)\nseven = seven[np.newaxis, ...]\nprint(seven.shape)", "(28, 28)\n(1, 28, 28)\n" ] ], [ [ "Feeding the input and forwarding it.", "_____no_output_____" ] ], [ [ "net_no_inplace.blobs['data'].data[...] = seven\noutput = net_no_inplace.forward()\noutput['prob'][0].argmax()", "_____no_output_____" ], [ "activations = net_no_inplace.blobs['relu_1'].data\n\nfor i in range(32):\n plt.imshow(activations[0, i, :, :])\n plt.title('Feature map %d' % i)\n plt.show()", "_____no_output_____" ], [ "net_input = net_no_inplace.blobs['conv2d_1'].data\n\nfor i in range(32):\n plt.imshow(net_input[0, i, :, :])\n plt.title('Feature map %d' % i)\n plt.show()", "_____no_output_____" ] ], [ [ "### Getting layer properties\nFrom the layer object not more then type information is available. There the original .prototxt has to be parsed to access attributes such as kernel size.", "_____no_output_____" ] ], [ [ "model_def = 'example_caffe_mnist_model.prototxt'\nf = open(model_def, 'r')\nprotonet = caffe_pb2.NetParameter()\ngoogle.protobuf.text_format.Parse(str(f.read()), protonet)\nf.close()\nprotonet", "_____no_output_____" ], [ "type(protonet)", "_____no_output_____" ] ], [ [ "Parsed messages for the layer can be found in `message.layer` list.", "_____no_output_____" ] ], [ [ "for i in range(0, len(protonet.layer)):\n if protonet.layer[i].type == 'Convolution':\n print('layer %s has kernel_size %d' \n % (protonet.layer[i].name, \n protonet.layer[i].convolution_param.kernel_size[0]))\n lconv_proto = protonet.layer[i]", "layer conv2d_1 has kernel_size 3\nlayer conv2d_2 has kernel_size 3\n" ], [ "len(protonet.layer), len(net.layers)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0aab32faa1e7fdce07441686faf63099f8d187b
255,700
ipynb
Jupyter Notebook
python/ch10/10.5.3-transpose-conv-upsampling.ipynb
krishnonwork/mathematical-methods-in-deep-learning
12a7e7a9981f8639b4524b7977bd185f82c04e2d
[ "MIT" ]
1
2020-03-20T20:46:58.000Z
2020-03-20T20:46:58.000Z
python/ch10/10.5.3-transpose-conv-upsampling.ipynb
sthagen/mathematical-methods-in-deep-learning-ipython
12a7e7a9981f8639b4524b7977bd185f82c04e2d
[ "MIT" ]
null
null
null
python/ch10/10.5.3-transpose-conv-upsampling.ipynb
sthagen/mathematical-methods-in-deep-learning-ipython
12a7e7a9981f8639b4524b7977bd185f82c04e2d
[ "MIT" ]
null
null
null
809.177215
180,644
0.947524
[ [ [ "# Transpose convolution: Upsampling\n\nIn section 10.5.3, we discussed how transpose convolutions are can be used to upsample a lower resolution input into a higher resolution output. This notebook contains fully functional PyTorch code for the same. ", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport torch\nimport math", "_____no_output_____" ] ], [ [ "First, let's look at how transpose convolution works on a simple input tensor. Then we will look at a real image. For this purpose, we will consider the example described in Figure 10.17. The input is a 2x2 array as follows:\n$$\nx = \\begin{bmatrix}\n5 & 6 \\\\\n7 & 8 \\\\\n\\end{bmatrix}\n$$\nand the transpose convolution kernel is also a 2x2 array as follows\n$$\nw = \\begin{bmatrix}\n1 & 2 \\\\\n3 & 4 \\\\\n\\end{bmatrix}\n$$\n\nTranspose convolution with stride 1 results in a 3x3 output as shown below. ", "_____no_output_____" ], [ "# Transpose conv 2D with stride 1", "_____no_output_____" ] ], [ [ "x = torch.tensor([\n [5., 6.],\n [7., 8.]\n ])\nw = torch.tensor([\n [1., 2.],\n [3., 4.]\n ])\n\nx = x.unsqueeze(0).unsqueeze(0)\nw = w.unsqueeze(0).unsqueeze(0)\n\ntranspose_conv2d = torch.nn.ConvTranspose2d(1, 1, kernel_size=2, stride=1, bias=False)\n# set weights of the TransposeConv2d object\nwith torch.no_grad():\n transpose_conv2d.weight = torch.nn.Parameter(w)\n\nwith torch.no_grad():\n y = transpose_conv2d(x)\n\ny", "_____no_output_____" ] ], [ [ "# Transpose conv 2D with stride 2\n\nIn the above example, we did not get a truly upsampled version of the input because we used a kernel stride of 1. Thei increase in resolution from 2 to 3 comes because of padding. Now, let's see how to truly upsample the image - we will run transpose convolution with stride 2. The step by step demonstration of this is shown in Figure 10.18. As you can see below, we obtained a 4z4 output. This is because we used a kernel a stride 2. Using a larger stride with further increase the output resolution", "_____no_output_____" ] ], [ [ "x = torch.tensor([\n [5., 6.],\n [7., 8.]\n ])\nw = torch.tensor([\n [1., 2.],\n [3., 4.]\n ])\n\nx = x.unsqueeze(0).unsqueeze(0)\nw = w.unsqueeze(0).unsqueeze(0)\n\ntranspose_conv2d = torch.nn.ConvTranspose2d(1, 1, kernel_size=2, stride=2, bias=False)\n# set weights of the TransposeConv2d object\nwith torch.no_grad():\n transpose_conv2d.weight = torch.nn.Parameter(w)\n\nwith torch.no_grad():\n y = transpose_conv2d(x)\n\ny", "_____no_output_____" ] ], [ [ "Now, let's take a sample image and see how the input compares to the output post transpose convolution with stride 2.", "_____no_output_____" ] ], [ [ "import cv2\nx = torch.tensor(cv2.imread(\"./Figures/dog2.jpg\", 0), dtype=torch.float32)\n\nw = torch.tensor([\n [1., 1.],\n [1., 1.]\n ])\n\nx = x.unsqueeze(0).unsqueeze(0)\nw = w.unsqueeze(0).unsqueeze(0)\n\ntranspose_conv2d = torch.nn.ConvTranspose2d(1, 1, kernel_size=2, \n stride=2, bias=False)\n# set weights of the TransposeConv2d object\nwith torch.no_grad():\n transpose_conv2d.weight = torch.nn.Parameter(w)\n\nwith torch.no_grad():\n y = transpose_conv2d(x)\n\ny", "_____no_output_____" ], [ "print(\"Input shape:\", x.shape)\nprint(\"Output shape:\", y.shape)", "Input shape: torch.Size([1, 1, 168, 300])\nOutput shape: torch.Size([1, 1, 336, 600])\n" ] ], [ [ "As expected, the output is twice the size of the input. The images below should make this clear", "_____no_output_____" ] ], [ [ "def display_image_in_actual_size(im_data, title):\n\n dpi = 80\n height, width = im_data.shape\n\n # What size does the figure need to be in inches to fit the image?\n figsize = width / float(dpi), height / float(dpi)\n\n # Create a figure of the right size with one axes that takes up the full figure\n fig = plt.figure(figsize=figsize)\n ax = fig.add_axes([0, 0, 1, 1])\n\n # Hide spines, ticks, etc.\n ax.axis('off')\n\n # Display the image.\n ax.imshow(im_data, cmap='gray')\n ax.set_title(title)\n\n plt.show()\n\ndisplay_image_in_actual_size(x.squeeze().squeeze(), \"Input image\")\ndisplay_image_in_actual_size(y.squeeze().squeeze(), \"Output image\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d0aac020c853a35cac25a26edce70940ab37e138
641,334
ipynb
Jupyter Notebook
legacy/arkady TF legacy/TF_2020_course4_week1_forcasting.ipynb
21kc-caracol/Acoustic_data_Image_vs_Mean
96801c0dd5c47859086c8b6f145a61333575d9b6
[ "MIT" ]
1
2020-10-23T06:02:41.000Z
2020-10-23T06:02:41.000Z
legacy/arkady TF legacy/TF_2020_course4_week1_forcasting.ipynb
21kc-caracol/Acoustic_data_Image_vs_Mean
96801c0dd5c47859086c8b6f145a61333575d9b6
[ "MIT" ]
null
null
null
legacy/arkady TF legacy/TF_2020_course4_week1_forcasting.ipynb
21kc-caracol/Acoustic_data_Image_vs_Mean
96801c0dd5c47859086c8b6f145a61333575d9b6
[ "MIT" ]
null
null
null
1,201
96,924
0.958808
[ [ [ "import tensorflow as tf\nprint(tf.__version__)\n", "2.0.0\n" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow import keras\n\ndef plot_series(time, series, format=\"-\", start=0, end=None):\n plt.plot(time[start:end], series[start:end], format)\n plt.xlabel(\"Time\")\n plt.ylabel(\"Value\")\n plt.grid(True)\n\ndef trend(time, slope=0):\n return slope * time\n\ndef seasonal_pattern(season_time):\n \"\"\"Just an arbitrary pattern, you can change it if you wish\"\"\"\n return np.where(season_time < 0.4,\n np.cos(season_time * 2 * np.pi),\n 1 / np.exp(3 * season_time))\n\ndef seasonality(time, period, amplitude=1, phase=0):\n \"\"\"Repeats the same pattern at each period\"\"\"\n season_time = ((time + phase) % period) / period\n return amplitude * seasonal_pattern(season_time)\n\ndef noise(time, noise_level=1, seed=None):\n rnd = np.random.RandomState(seed)\n return rnd.randn(len(time)) * noise_level\n\ntime = np.arange(4 * 365 + 1, dtype=\"float32\")\nbaseline = 10\nseries = trend(time, 0.1) \nbaseline = 10\namplitude = 40\nslope = 0.05\nnoise_level = 5\n\n# Create the series\nseries = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)\n# Update with noise\nseries += noise(time, noise_level, seed=42)\n\nplt.figure(figsize=(10, 6))\nplot_series(time, series)\nplt.show()", "_____no_output_____" ] ], [ [ "Now that we have the time series, let's split it so we can start forecasting", "_____no_output_____" ] ], [ [ "split_time = 1000\ntime_train = time[:split_time]\nx_train = series[:split_time]\ntime_valid = time[split_time:]\nx_valid = series[split_time:]\nplt.figure(figsize=(10, 6))\nplot_series(time_train, x_train)\nplt.show()\n\nplt.figure(figsize=(10, 6))\nplot_series(time_valid, x_valid)\nplt.show()", "_____no_output_____" ] ], [ [ "Naive Forecast", "_____no_output_____" ] ], [ [ "naive_forecast = series[split_time - 1:-1]", "_____no_output_____" ], [ "plt.figure(figsize=(10, 6))\nplot_series(time_valid, x_valid)\nplot_series(time_valid, naive_forecast)", "_____no_output_____" ], [ "x = [1,2, 3, 4, 5, 6, 7, 8, 9, 10]\nprint(x)\nsplit=7\nnaive=x[split - 1:-1]\nprint(naive)\nx_val = x[split:]\nprint(x_val)", "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n[7, 8, 9]\n[8, 9, 10]\n" ] ], [ [ "Let's zoom in on the start of the validation period:", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10, 6))\nplot_series(time_valid, x_valid, start=0, end=150)\nplot_series(time_valid, naive_forecast, start=1, end=151)", "_____no_output_____" ] ], [ [ "You can see that the naive forecast lags 1 step behind the time series.\n\nNow let's compute the mean squared error and the mean absolute error between the forecasts and the predictions in the validation period:\n", "_____no_output_____" ] ], [ [ "print(keras.metrics.mean_squared_error(x_valid, naive_forecast).numpy())\nprint(keras.metrics.mean_absolute_error(x_valid, naive_forecast).numpy())", "61.827538\n5.937908\n" ] ], [ [ "That's our baseline, now let's try a moving average:", "_____no_output_____" ] ], [ [ "def moving_average_forecast(series, window_size):\n \"\"\"Forecasts the mean of the last few values.\n If window_size=1, then this is equivalent to naive forecast\"\"\"\n forecast = []\n for time in range(len(series) - window_size):\n forecast.append(series[time:time + window_size].mean())\n return np.array(forecast)", "_____no_output_____" ], [ "moving_avg = moving_average_forecast(series, 30)[split_time - 30:]\n\nplt.figure(figsize=(10, 6))\nplot_series(time_valid, x_valid)\nplot_series(time_valid, moving_avg)", "_____no_output_____" ], [ "print(keras.metrics.mean_squared_error(x_valid, moving_avg).numpy())\nprint(keras.metrics.mean_absolute_error(x_valid, moving_avg).numpy())", "106.674576\n7.142419\n" ] ], [ [ "That's worse than naive forecast! The moving average does not anticipate trend or seasonality, so let's try to remove them by using differencing. Since the seasonality period is 365 days, we will subtract the value at time t – 365 from the value at time t.\n", "_____no_output_____" ] ], [ [ "diff_series = (series[365:] - series[:-365])\ndiff_time = time[365:]\n\nplt.figure(figsize=(10, 6))\nplot_series(diff_time, diff_series)\nplt.show()", "_____no_output_____" ] ], [ [ "Great, the trend and seasonality seem to be gone, so now we can use the moving average:", "_____no_output_____" ] ], [ [ "diff_moving_avg = moving_average_forecast(diff_series, 50)[split_time - 365 - 50:]\n\nplt.figure(figsize=(10, 6))\nplot_series(time_valid, diff_series[split_time - 365:])\nplot_series(time_valid, diff_moving_avg)\nplt.show()", "_____no_output_____" ] ], [ [ "Now let's bring back the trend and seasonality by adding the past values from t – 365:", "_____no_output_____" ] ], [ [ "diff_moving_avg_plus_past = series[split_time - 365:-365] + diff_moving_avg\n\nplt.figure(figsize=(10, 6))\nplot_series(time_valid, x_valid)\nplot_series(time_valid, diff_moving_avg_plus_past)\nplt.show()", "_____no_output_____" ], [ "print(keras.metrics.mean_squared_error(x_valid, diff_moving_avg_plus_past).numpy())\nprint(keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_past).numpy())", "52.97366\n5.839311\n" ] ], [ [ "Better than naive forecast, good. However the forecasts look a bit too random, because we're just adding past values, which were noisy. Let's use a moving averaging on past values to remove some of the noise:", "_____no_output_____" ] ], [ [ "diff_moving_avg_plus_smooth_past = moving_average_forecast(series[split_time - 370:-360], 10) + diff_moving_avg\n\nplt.figure(figsize=(10, 6))\nplot_series(time_valid, x_valid)\nplot_series(time_valid, diff_moving_avg_plus_smooth_past)\nplt.show()", "_____no_output_____" ], [ "print(keras.metrics.mean_squared_error(x_valid, diff_moving_avg_plus_smooth_past).numpy())\nprint(keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_smooth_past).numpy())", "33.45226\n4.569442\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0aac2c7068f1b9e97bb2a545ee49e2c8077b264
87,902
ipynb
Jupyter Notebook
_docs/nbs/T728835-Black-box-Attack-Model-DVC-Training-Pipeline.ipynb
sparsh-ai/recohut
4121f665761ffe38c9b6337eaa9293b26bee2376
[ "Apache-2.0" ]
null
null
null
_docs/nbs/T728835-Black-box-Attack-Model-DVC-Training-Pipeline.ipynb
sparsh-ai/recohut
4121f665761ffe38c9b6337eaa9293b26bee2376
[ "Apache-2.0" ]
1
2022-01-12T05:40:57.000Z
2022-01-12T05:40:57.000Z
_docs/nbs/T728835-Black-box-Attack-Model-DVC-Training-Pipeline.ipynb
RecoHut-Projects/recohut
4121f665761ffe38c9b6337eaa9293b26bee2376
[ "Apache-2.0" ]
null
null
null
87,902
87,902
0.595242
[ [ [ "!pip install -U -q dvc dvc[gdrive]\n# !dvc get https://github.com/sparsh-ai/reco-data ml1m/v0/ratings.dat", "\u001b[K |████████████████████████████████| 667 kB 5.2 MB/s \n\u001b[K |████████████████████████████████| 40 kB 15 kB/s \n\u001b[K |████████████████████████████████| 170 kB 59.5 MB/s \n\u001b[K |████████████████████████████████| 4.6 MB 47.4 MB/s \n\u001b[K |████████████████████████████████| 49 kB 6.1 MB/s \n\u001b[K |████████████████████████████████| 296 kB 69.2 MB/s \n\u001b[K |████████████████████████████████| 530 kB 66.2 MB/s \n\u001b[K |████████████████████████████████| 119 kB 73.8 MB/s \n\u001b[K |████████████████████████████████| 211 kB 64.3 MB/s \n\u001b[K |████████████████████████████████| 41 kB 685 kB/s \n\u001b[K |████████████████████████████████| 109 kB 66.1 MB/s \n\u001b[K |████████████████████████████████| 44 kB 2.5 MB/s \n\u001b[K |████████████████████████████████| 1.3 MB 50.7 MB/s \n\u001b[K |████████████████████████████████| 63 kB 1.7 MB/s \n\u001b[K |████████████████████████████████| 2.6 MB 37.9 MB/s \n\u001b[K |████████████████████████████████| 64 kB 2.4 MB/s \n\u001b[K |████████████████████████████████| 201 kB 54.1 MB/s \n\u001b[K |████████████████████████████████| 51 kB 6.5 MB/s \n\u001b[K |████████████████████████████████| 546 kB 56.7 MB/s \n\u001b[K |████████████████████████████████| 142 kB 58.7 MB/s \n\u001b[K |████████████████████████████████| 294 kB 69.4 MB/s \n\u001b[K |████████████████████████████████| 54 kB 2.4 MB/s \n\u001b[K |████████████████████████████████| 3.0 MB 49.5 MB/s \n\u001b[?25h Building wheel for configobj (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for flufl.lock (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for nanotime (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for pygtrie (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for atpublic (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for ftfy (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for mailchecker (setup.py) ... \u001b[?25l\u001b[?25hdone\n\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\ntensorflow 2.6.0 requires typing-extensions~=3.7.4, but you have typing-extensions 3.10.0.2 which is incompatible.\u001b[0m\n" ] ], [ [ "## Dataset", "_____no_output_____" ] ], [ [ "RAW_DATASET_ROOT_FOLDER = '/content/data/bronze'\nPREP_DATASET_ROOT_FOLDER = '/content/data/silver'\nFNL_DATASET_ROOT_FOLDER = '/content/data/gold'", "_____no_output_____" ], [ "import pickle\nimport shutil\nimport tempfile\nimport os\nfrom datetime import date\nfrom pathlib import Path\nimport gzip\nfrom abc import *\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\ntqdm.pandas()", "_____no_output_____" ], [ "class AbstractDataset(metaclass=ABCMeta):\n def __init__(self, args):\n self.args = args\n self.min_rating = args.min_rating\n self.min_uc = args.min_uc\n self.min_sc = args.min_sc\n self.split = args.split\n\n assert self.min_uc >= 2, 'Need at least 2 ratings per user for validation and test'\n\n @classmethod\n @abstractmethod\n def code(cls):\n pass\n\n @classmethod\n def raw_code(cls):\n return cls.code()\n\n @classmethod\n def all_raw_file_names(cls):\n return []\n\n @classmethod\n @abstractmethod\n def url(cls):\n pass\n\n @abstractmethod\n def preprocess(self):\n pass\n\n @abstractmethod\n def load_ratings_df(self):\n pass\n\n @abstractmethod\n def maybe_download_raw_dataset(self):\n pass\n\n def load_dataset(self):\n self.preprocess()\n dataset_path = self._get_preprocessed_dataset_path()\n dataset = pickle.load(dataset_path.open('rb'))\n return dataset\n\n def filter_triplets(self, df):\n print('Filtering triplets')\n if self.min_sc > 0:\n item_sizes = df.groupby('sid').size()\n good_items = item_sizes.index[item_sizes >= self.min_sc]\n df = df[df['sid'].isin(good_items)]\n\n if self.min_uc > 0:\n user_sizes = df.groupby('uid').size()\n good_users = user_sizes.index[user_sizes >= self.min_uc]\n df = df[df['uid'].isin(good_users)]\n return df\n\n def densify_index(self, df):\n print('Densifying index')\n umap = {u: i for i, u in enumerate(set(df['uid']), start=1)}\n smap = {s: i for i, s in enumerate(set(df['sid']), start=1)}\n df['uid'] = df['uid'].map(umap)\n df['sid'] = df['sid'].map(smap)\n return df, umap, smap\n\n def split_df(self, df, user_count):\n if self.args.split == 'leave_one_out':\n print('Splitting')\n user_group = df.groupby('uid')\n user2items = user_group.progress_apply(\n lambda d: list(d.sort_values(by=['timestamp', 'sid'])['sid']))\n train, val, test = {}, {}, {}\n for i in range(user_count):\n user = i + 1\n items = user2items[user]\n train[user], val[user], test[user] = items[:-2], items[-2:-1], items[-1:]\n return train, val, test\n else:\n raise NotImplementedError\n\n def _get_rawdata_root_path(self):\n return Path(RAW_DATASET_ROOT_FOLDER)\n\n def _get_rawdata_folder_path(self):\n root = self._get_rawdata_root_path()\n return root.joinpath(self.raw_code())\n\n def _get_preprocessed_root_path(self):\n root = Path(PREP_DATASET_ROOT_FOLDER)\n return root.joinpath(self.raw_code())\n\n def _get_preprocessed_folder_path(self):\n preprocessed_root = self._get_preprocessed_root_path()\n folder_name = '{}_min_rating{}-min_uc{}-min_sc{}-split{}' \\\n .format(self.code(), self.min_rating, self.min_uc, self.min_sc, self.split)\n return preprocessed_root.joinpath(folder_name)\n\n def _get_preprocessed_dataset_path(self):\n folder = self._get_preprocessed_folder_path()\n return folder.joinpath('dataset.pkl')", "_____no_output_____" ], [ "class ML1MDataset(AbstractDataset):\n @classmethod\n def code(cls):\n return 'ml-1m'\n\n @classmethod\n def url(cls):\n return {'path':'ml1m/v0',\n 'repo':'https://github.com/sparsh-ai/reco-data'}\n\n @classmethod\n def all_raw_file_names(cls):\n return ['ratings.dat']\n\n def maybe_download_raw_dataset(self):\n folder_path = self._get_rawdata_folder_path()\n if not folder_path.is_dir():\n folder_path.mkdir(parents=True)\n if all(folder_path.joinpath(filename).is_file() for filename in self.all_raw_file_names()):\n print('Raw data already exists. Skip downloading')\n return\n \n print(\"Raw file doesn't exist. Downloading...\")\n for filename in self.all_raw_file_names():\n with open(os.path.join(folder_path,filename), \"wb\") as f:\n with dvc.api.open(\n path=self.url()['path']+'/'+filename,\n repo=self.url()['repo'],\n mode='rb') as scan:\n f.write(scan.read())\n print()\n\n def preprocess(self):\n dataset_path = self._get_preprocessed_dataset_path()\n if dataset_path.is_file():\n print('Already preprocessed. Skip preprocessing')\n return\n if not dataset_path.parent.is_dir():\n dataset_path.parent.mkdir(parents=True)\n self.maybe_download_raw_dataset()\n df = self.load_ratings_df()\n df = self.filter_triplets(df)\n df, umap, smap = self.densify_index(df)\n train, val, test = self.split_df(df, len(umap))\n dataset = {'train': train,\n 'val': val,\n 'test': test,\n 'umap': umap,\n 'smap': smap}\n with dataset_path.open('wb') as f:\n pickle.dump(dataset, f)\n\n def load_ratings_df(self):\n folder_path = self._get_rawdata_folder_path()\n file_path = folder_path.joinpath('ratings.dat')\n df = pd.read_csv(file_path, sep='::', header=None)\n df.columns = ['uid', 'sid', 'rating', 'timestamp']\n return df", "_____no_output_____" ], [ "DATASETS = {\n ML1MDataset.code(): ML1MDataset,\n # ML20MDataset.code(): ML20MDataset,\n # SteamDataset.code(): SteamDataset,\n # GamesDataset.code(): GamesDataset,\n # BeautyDataset.code(): BeautyDataset,\n # BeautyDenseDataset.code(): BeautyDenseDataset,\n # YooChooseDataset.code(): YooChooseDataset\n}\n\n\ndef dataset_factory(args):\n dataset = DATASETS[args.dataset_code]\n return dataset(args)", "_____no_output_____" ] ], [ [ "## Negative Sampling", "_____no_output_____" ] ], [ [ "from abc import *\nfrom pathlib import Path\nimport pickle\n\n\nclass AbstractNegativeSampler(metaclass=ABCMeta):\n def __init__(self, train, val, test, user_count, item_count, sample_size, seed, flag, save_folder):\n self.train = train\n self.val = val\n self.test = test\n self.user_count = user_count\n self.item_count = item_count\n self.sample_size = sample_size\n self.seed = seed\n self.flag = flag\n self.save_folder = save_folder\n\n @classmethod\n @abstractmethod\n def code(cls):\n pass\n\n @abstractmethod\n def generate_negative_samples(self):\n pass\n\n def get_negative_samples(self):\n savefile_path = self._get_save_path()\n if savefile_path.is_file():\n print('Negatives samples exist. Loading.')\n seen_samples, negative_samples = pickle.load(savefile_path.open('rb'))\n return seen_samples, negative_samples\n print(\"Negative samples don't exist. Generating.\")\n seen_samples, negative_samples = self.generate_negative_samples()\n with savefile_path.open('wb') as f:\n pickle.dump([seen_samples, negative_samples], f)\n return seen_samples, negative_samples\n\n def _get_save_path(self):\n folder = Path(self.save_folder)\n filename = '{}-sample_size{}-seed{}-{}.pkl'.format(\n self.code(), self.sample_size, self.seed, self.flag)\n return folder.joinpath(filename)", "_____no_output_____" ], [ "from tqdm import trange\nimport numpy as np\n\n\nclass RandomNegativeSampler(AbstractNegativeSampler):\n @classmethod\n def code(cls):\n return 'random'\n\n def generate_negative_samples(self):\n assert self.seed is not None, 'Specify seed for random sampling'\n np.random.seed(self.seed)\n num_samples = 2 * self.user_count * self.sample_size\n all_samples = np.random.choice(self.item_count, num_samples) + 1\n\n seen_samples = {}\n negative_samples = {}\n print('Sampling negative items randomly...')\n j = 0\n for i in trange(self.user_count):\n user = i + 1\n seen = set(self.train[user])\n seen.update(self.val[user])\n seen.update(self.test[user])\n seen_samples[user] = seen\n\n samples = []\n while len(samples) < self.sample_size:\n item = all_samples[j % num_samples]\n j += 1\n if item in seen or item in samples:\n continue\n samples.append(item)\n negative_samples[user] = samples\n\n return seen_samples, negative_samples", "_____no_output_____" ], [ "from tqdm import trange\nfrom collections import Counter\nimport numpy as np\n\n\nclass PopularNegativeSampler(AbstractNegativeSampler):\n @classmethod\n def code(cls):\n return 'popular'\n\n def generate_negative_samples(self):\n assert self.seed is not None, 'Specify seed for random sampling'\n np.random.seed(self.seed)\n popularity = self.items_by_popularity()\n items = list(popularity.keys())\n total = 0\n for i in range(len(items)):\n total += popularity[items[i]]\n for i in range(len(items)):\n popularity[items[i]] /= total\n probs = list(popularity.values())\n num_samples = 2 * self.user_count * self.sample_size\n all_samples = np.random.choice(items, num_samples, p=probs)\n\n seen_samples = {}\n negative_samples = {}\n print('Sampling negative items by popularity...')\n j = 0\n for i in trange(self.user_count):\n user = i + 1\n seen = set(self.train[user])\n seen.update(self.val[user])\n seen.update(self.test[user])\n seen_samples[user] = seen\n\n samples = []\n while len(samples) < self.sample_size:\n item = all_samples[j % num_samples]\n j += 1\n if item in seen or item in samples:\n continue\n samples.append(item)\n negative_samples[user] = samples\n\n return seen_samples, negative_samples\n\n def items_by_popularity(self):\n popularity = Counter()\n self.users = sorted(self.train.keys())\n for user in self.users:\n popularity.update(self.train[user])\n popularity.update(self.val[user])\n popularity.update(self.test[user])\n\n popularity = dict(popularity)\n popularity = {k: v for k, v in sorted(popularity.items(), key=lambda item: item[1], reverse=True)}\n return popularity", "_____no_output_____" ], [ "NEGATIVE_SAMPLERS = {\n PopularNegativeSampler.code(): PopularNegativeSampler,\n RandomNegativeSampler.code(): RandomNegativeSampler,\n}\n\n\ndef negative_sampler_factory(code, train, val, test, user_count, item_count, sample_size, seed, flag, save_folder):\n negative_sampler = NEGATIVE_SAMPLERS[code]\n return negative_sampler(train, val, test, user_count, item_count, sample_size, seed, flag, save_folder)", "_____no_output_____" ] ], [ [ "## Dataloader", "_____no_output_____" ] ], [ [ "from abc import *\nimport random\n\n\nclass AbstractDataloader(metaclass=ABCMeta):\n def __init__(self, args, dataset):\n self.args = args\n self.rng = random.Random()\n self.save_folder = dataset._get_preprocessed_folder_path()\n dataset = dataset.load_dataset()\n self.train = dataset['train']\n self.val = dataset['val']\n self.test = dataset['test']\n self.umap = dataset['umap']\n self.smap = dataset['smap']\n self.user_count = len(self.umap)\n self.item_count = len(self.smap)\n\n @classmethod\n @abstractmethod\n def code(cls):\n pass\n\n @abstractmethod\n def get_pytorch_dataloaders(self):\n pass", "_____no_output_____" ], [ "import torch\nimport random\nimport torch.utils.data as data_utils\n\n\nclass RNNDataloader():\n def __init__(self, args, dataset):\n self.args = args\n self.rng = random.Random()\n self.save_folder = dataset._get_preprocessed_folder_path()\n dataset = dataset.load_dataset()\n self.train = dataset['train']\n self.val = dataset['val']\n self.test = dataset['test']\n self.umap = dataset['umap']\n self.smap = dataset['smap']\n self.user_count = len(self.umap)\n self.item_count = len(self.smap)\n\n args.num_items = len(self.smap)\n self.max_len = args.bert_max_len\n\n val_negative_sampler = negative_sampler_factory(args.test_negative_sampler_code,\n self.train, self.val, self.test,\n self.user_count, self.item_count,\n args.test_negative_sample_size,\n args.test_negative_sampling_seed,\n 'val', self.save_folder)\n test_negative_sampler = negative_sampler_factory(args.test_negative_sampler_code,\n self.train, self.val, self.test,\n self.user_count, self.item_count,\n args.test_negative_sample_size,\n args.test_negative_sampling_seed,\n 'test', self.save_folder)\n\n self.seen_samples, self.val_negative_samples = val_negative_sampler.get_negative_samples()\n self.seen_samples, self.test_negative_samples = test_negative_sampler.get_negative_samples()\n\n @classmethod\n def code(cls):\n return 'rnn'\n\n def get_pytorch_dataloaders(self):\n train_loader = self._get_train_loader()\n val_loader = self._get_val_loader()\n test_loader = self._get_test_loader()\n return train_loader, val_loader, test_loader\n\n def _get_train_loader(self):\n dataset = self._get_train_dataset()\n dataloader = data_utils.DataLoader(dataset, batch_size=self.args.train_batch_size,\n shuffle=True, pin_memory=True)\n return dataloader\n\n def _get_train_dataset(self):\n dataset = RNNTrainDataset(\n self.train, self.max_len)\n return dataset\n\n def _get_val_loader(self):\n return self._get_eval_loader(mode='val')\n\n def _get_test_loader(self):\n return self._get_eval_loader(mode='test')\n\n def _get_eval_loader(self, mode):\n batch_size = self.args.val_batch_size if mode == 'val' else self.args.test_batch_size\n dataset = self._get_eval_dataset(mode)\n dataloader = data_utils.DataLoader(dataset, batch_size=batch_size, shuffle=False, pin_memory=True)\n return dataloader\n\n def _get_eval_dataset(self, mode):\n if mode == 'val':\n dataset = RNNValidDataset(self.train, self.val, self.max_len, self.val_negative_samples)\n elif mode == 'test':\n dataset = RNNTestDataset(self.train, self.val, self.test, self.max_len, self.test_negative_samples)\n return dataset\n\n\nclass RNNTrainDataset(data_utils.Dataset):\n def __init__(self, u2seq, max_len):\n # self.u2seq = u2seq\n # self.users = sorted(self.u2seq.keys())\n self.max_len = max_len\n self.all_seqs = []\n self.all_labels = []\n for u in sorted(u2seq.keys()):\n seq = u2seq[u]\n for i in range(1, len(seq)):\n self.all_seqs += [seq[:-i]]\n self.all_labels += [seq[-i]]\n\n assert len(self.all_seqs) == len(self.all_labels)\n\n def __len__(self):\n return len(self.all_seqs)\n\n def __getitem__(self, index):\n tokens = self.all_seqs[index][-self.max_len:]\n length = len(tokens)\n tokens = tokens + [0] * (self.max_len - length)\n \n return torch.LongTensor(tokens), torch.LongTensor([length]), torch.LongTensor([self.all_labels[index]])\n\n\nclass RNNValidDataset(data_utils.Dataset):\n def __init__(self, u2seq, u2answer, max_len, negative_samples, valid_users=None):\n self.u2seq = u2seq # train\n if not valid_users:\n self.users = sorted(self.u2seq.keys())\n else:\n self.users = valid_users\n self.users = sorted(self.u2seq.keys())\n self.u2answer = u2answer\n self.max_len = max_len\n self.negative_samples = negative_samples\n \n def __len__(self):\n return len(self.users)\n\n def __getitem__(self, index):\n user = self.users[index]\n tokens = self.u2seq[user][-self.max_len:]\n length = len(tokens)\n tokens = tokens + [0] * (self.max_len - length)\n\n answer = self.u2answer[user]\n negs = self.negative_samples[user]\n candidates = answer + negs\n labels = [1] * len(answer) + [0] * len(negs)\n \n return torch.LongTensor(tokens), torch.LongTensor([length]), torch.LongTensor(candidates), torch.LongTensor(labels)\n\n\nclass RNNTestDataset(data_utils.Dataset):\n def __init__(self, u2seq, u2val, u2answer, max_len, negative_samples, test_users=None):\n self.u2seq = u2seq # train\n self.u2val = u2val # val\n if not test_users:\n self.users = sorted(self.u2seq.keys())\n else:\n self.users = test_users\n self.users = sorted(self.u2seq.keys())\n self.u2answer = u2answer # test\n self.max_len = max_len\n self.negative_samples = negative_samples\n\n def __len__(self):\n return len(self.users)\n\n def __getitem__(self, index):\n user = self.users[index]\n tokens = (self.u2seq[user] + self.u2val[user])[-self.max_len:] # append validation item after train seq\n length = len(tokens)\n tokens = tokens + [0] * (self.max_len - length)\n answer = self.u2answer[user]\n negs = self.negative_samples[user]\n candidates = answer + negs\n labels = [1] * len(answer) + [0] * len(negs)\n\n return torch.LongTensor(tokens), torch.LongTensor([length]), torch.LongTensor(candidates), torch.LongTensor(labels)", "_____no_output_____" ], [ "import torch\nimport random\nimport torch.utils.data as data_utils\n\n\nclass BERTDataloader():\n def __init__(self, args, dataset):\n self.args = args\n self.rng = random.Random()\n self.save_folder = dataset._get_preprocessed_folder_path()\n dataset = dataset.load_dataset()\n self.train = dataset['train']\n self.val = dataset['val']\n self.test = dataset['test']\n self.umap = dataset['umap']\n self.smap = dataset['smap']\n self.user_count = len(self.umap)\n self.item_count = len(self.smap)\n\n args.num_items = self.item_count\n self.max_len = args.bert_max_len\n self.mask_prob = args.bert_mask_prob\n self.max_predictions = args.bert_max_predictions\n self.sliding_size = args.sliding_window_size\n self.CLOZE_MASK_TOKEN = self.item_count + 1\n\n val_negative_sampler = negative_sampler_factory(args.test_negative_sampler_code,\n self.train, self.val, self.test,\n self.user_count, self.item_count,\n args.test_negative_sample_size,\n args.test_negative_sampling_seed,\n 'val', self.save_folder)\n test_negative_sampler = negative_sampler_factory(args.test_negative_sampler_code,\n self.train, self.val, self.test,\n self.user_count, self.item_count,\n args.test_negative_sample_size,\n args.test_negative_sampling_seed,\n 'test', self.save_folder)\n\n self.seen_samples, self.val_negative_samples = val_negative_sampler.get_negative_samples()\n self.seen_samples, self.test_negative_samples = test_negative_sampler.get_negative_samples()\n\n @classmethod\n def code(cls):\n return 'bert'\n\n def get_pytorch_dataloaders(self):\n train_loader = self._get_train_loader()\n val_loader = self._get_val_loader()\n test_loader = self._get_test_loader()\n return train_loader, val_loader, test_loader\n\n def _get_train_loader(self):\n dataset = self._get_train_dataset()\n dataloader = data_utils.DataLoader(dataset, batch_size=self.args.train_batch_size,\n shuffle=True, pin_memory=True)\n return dataloader\n\n def _get_train_dataset(self):\n dataset = BERTTrainDataset(\n self.train, self.max_len, self.mask_prob, self.max_predictions, self.sliding_size, self.CLOZE_MASK_TOKEN, self.item_count, self.rng)\n return dataset\n\n def _get_val_loader(self):\n return self._get_eval_loader(mode='val')\n\n def _get_test_loader(self):\n return self._get_eval_loader(mode='test')\n\n def _get_eval_loader(self, mode):\n batch_size = self.args.val_batch_size if mode == 'val' else self.args.test_batch_size\n dataset = self._get_eval_dataset(mode)\n dataloader = data_utils.DataLoader(dataset, batch_size=batch_size, shuffle=False, pin_memory=True)\n return dataloader\n\n def _get_eval_dataset(self, mode):\n if mode == 'val':\n dataset = BERTValidDataset(self.train, self.val, self.max_len, self.CLOZE_MASK_TOKEN, self.val_negative_samples)\n elif mode == 'test':\n dataset = BERTTestDataset(self.train, self.val, self.test, self.max_len, self.CLOZE_MASK_TOKEN, self.test_negative_samples)\n return dataset\n\n\nclass BERTTrainDataset(data_utils.Dataset):\n def __init__(self, u2seq, max_len, mask_prob, max_predictions, sliding_size, mask_token, num_items, rng):\n # self.u2seq = u2seq\n # self.users = sorted(self.u2seq.keys())\n self.max_len = max_len\n self.mask_prob = mask_prob\n self.max_predictions = max_predictions\n self.sliding_step = int(sliding_size * max_len)\n self.mask_token = mask_token\n self.num_items = num_items\n self.rng = rng\n \n assert self.sliding_step > 0\n self.all_seqs = []\n for u in sorted(u2seq.keys()):\n seq = u2seq[u]\n if len(seq) < self.max_len + self.sliding_step:\n self.all_seqs.append(seq)\n else:\n start_idx = range(len(seq) - max_len, -1, -self.sliding_step)\n self.all_seqs = self.all_seqs + [seq[i:i + max_len] for i in start_idx]\n\n def __len__(self):\n return len(self.all_seqs)\n # return len(self.users)\n\n def __getitem__(self, index):\n # user = self.users[index]\n # seq = self._getseq(user)\n seq = self.all_seqs[index]\n\n tokens = []\n labels = []\n covered_items = set()\n for i in range(len(seq)):\n s = seq[i]\n if (len(covered_items) >= self.max_predictions) or (s in covered_items):\n tokens.append(s)\n labels.append(0)\n continue\n \n temp_mask_prob = self.mask_prob\n if i == (len(seq) - 1):\n temp_mask_prob += 0.1 * (1 - self.mask_prob)\n\n prob = self.rng.random()\n if prob < temp_mask_prob:\n covered_items.add(s)\n prob /= temp_mask_prob\n if prob < 0.8:\n tokens.append(self.mask_token)\n elif prob < 0.9:\n tokens.append(self.rng.randint(1, self.num_items))\n else:\n tokens.append(s)\n\n labels.append(s)\n else:\n tokens.append(s)\n labels.append(0)\n\n tokens = tokens[-self.max_len:]\n labels = labels[-self.max_len:]\n\n mask_len = self.max_len - len(tokens)\n\n tokens = [0] * mask_len + tokens\n labels = [0] * mask_len + labels\n\n return torch.LongTensor(tokens), torch.LongTensor(labels)\n\n def _getseq(self, user):\n return self.u2seq[user]\n\n\nclass BERTValidDataset(data_utils.Dataset):\n def __init__(self, u2seq, u2answer, max_len, mask_token, negative_samples, valid_users=None):\n self.u2seq = u2seq # train\n if not valid_users:\n self.users = sorted(self.u2seq.keys())\n else:\n self.users = valid_users\n self.u2answer = u2answer\n self.max_len = max_len\n self.mask_token = mask_token\n self.negative_samples = negative_samples\n\n def __len__(self):\n return len(self.users)\n\n def __getitem__(self, index):\n user = self.users[index]\n seq = self.u2seq[user]\n answer = self.u2answer[user]\n negs = self.negative_samples[user]\n\n candidates = answer + negs\n labels = [1] * len(answer) + [0] * len(negs)\n\n seq = seq + [self.mask_token]\n seq = seq[-self.max_len:]\n padding_len = self.max_len - len(seq)\n seq = [0] * padding_len + seq\n\n return torch.LongTensor(seq), torch.LongTensor(candidates), torch.LongTensor(labels)\n\n\nclass BERTTestDataset(data_utils.Dataset):\n def __init__(self, u2seq, u2val, u2answer, max_len, mask_token, negative_samples, test_users=None):\n self.u2seq = u2seq # train\n self.u2val = u2val # val\n if not test_users:\n self.users = sorted(self.u2seq.keys())\n else:\n self.users = test_users\n self.users = sorted(self.u2seq.keys())\n self.u2answer = u2answer # test\n self.max_len = max_len\n self.mask_token = mask_token\n self.negative_samples = negative_samples\n\n def __len__(self):\n return len(self.users)\n\n def __getitem__(self, index):\n user = self.users[index]\n seq = self.u2seq[user] + self.u2val[user] # append validation item after train seq\n answer = self.u2answer[user]\n negs = self.negative_samples[user]\n\n candidates = answer + negs\n labels = [1] * len(answer) + [0] * len(negs)\n\n seq = seq + [self.mask_token]\n seq = seq[-self.max_len:]\n padding_len = self.max_len - len(seq)\n seq = [0] * padding_len + seq\n\n return torch.LongTensor(seq), torch.LongTensor(candidates), torch.LongTensor(labels)", "_____no_output_____" ], [ "def dataloader_factory(args):\n dataset = dataset_factory(args)\n if args.model_code == 'bert':\n dataloader = BERTDataloader(args, dataset)\n elif args.model_code == 'sas':\n dataloader = SASDataloader(args, dataset)\n else:\n dataloader = RNNDataloader(args, dataset)\n train, val, test = dataloader.get_pytorch_dataloaders()\n return train, val, test", "_____no_output_____" ] ], [ [ "## Args", "_____no_output_____" ] ], [ [ "import numpy as np\nimport random\nimport torch\nimport argparse", "_____no_output_____" ], [ "def set_template(args):\n args.min_uc = 5\n args.min_sc = 5\n args.split = 'leave_one_out'\n dataset_code = {'1': 'ml-1m', '20': 'ml-20m', 'b': 'beauty', 'bd': 'beauty_dense' , 'g': 'games', 's': 'steam', 'y': 'yoochoose'}\n args.dataset_code = dataset_code[input('Input 1 / 20 for movielens, b for beauty, bd for dense beauty, g for games, s for steam and y for yoochoose: ')]\n if args.dataset_code == 'ml-1m':\n args.sliding_window_size = 0.5\n args.bert_hidden_units = 64\n args.bert_dropout = 0.1\n args.bert_attn_dropout = 0.1\n args.bert_max_len = 200\n args.bert_mask_prob = 0.2\n args.bert_max_predictions = 40\n elif args.dataset_code == 'ml-20m':\n args.sliding_window_size = 0.5\n args.bert_hidden_units = 64\n args.bert_dropout = 0.1\n args.bert_attn_dropout = 0.1\n args.bert_max_len = 200\n args.bert_mask_prob = 0.2\n args.bert_max_predictions = 20\n elif args.dataset_code in ['beauty', 'beauty_dense']:\n args.sliding_window_size = 0.5\n args.bert_hidden_units = 64\n args.bert_dropout = 0.5\n args.bert_attn_dropout = 0.2\n args.bert_max_len = 50\n args.bert_mask_prob = 0.6\n args.bert_max_predictions = 30\n elif args.dataset_code == 'games':\n args.sliding_window_size = 0.5\n args.bert_hidden_units = 64\n args.bert_dropout = 0.5\n args.bert_attn_dropout = 0.5\n args.bert_max_len = 50\n args.bert_mask_prob = 0.5\n args.bert_max_predictions = 25\n elif args.dataset_code == 'steam':\n args.sliding_window_size = 0.5\n args.bert_hidden_units = 64\n args.bert_dropout = 0.2\n args.bert_attn_dropout = 0.2\n args.bert_max_len = 50\n args.bert_mask_prob = 0.4\n args.bert_max_predictions = 20\n elif args.dataset_code == 'yoochoose':\n args.sliding_window_size = 0.5\n args.bert_hidden_units = 256\n args.bert_dropout = 0.2\n args.bert_attn_dropout = 0.2\n args.bert_max_len = 50\n args.bert_mask_prob = 0.4\n args.bert_max_predictions = 20\n\n batch = 128\n args.train_batch_size = batch\n args.val_batch_size = batch\n args.test_batch_size = batch\n args.train_negative_sampler_code = 'random'\n args.train_negative_sample_size = 0\n args.train_negative_sampling_seed = 0\n args.test_negative_sampler_code = 'random'\n args.test_negative_sample_size = 100\n args.test_negative_sampling_seed = 98765\n\n model_codes = {'b': 'bert', 's':'sas', 'n':'narm'}\n args.model_code = model_codes[input('Input model code, b for BERT, s for SASRec and n for NARM: ')]\n\n if torch.cuda.is_available():\n args.device = 'cuda:' + input('Input GPU ID: ')\n else:\n args.device = 'cpu'\n args.optimizer = 'AdamW'\n args.lr = 0.001\n args.weight_decay = 0.01\n args.enable_lr_schedule = True\n args.decay_step = 10000\n args.gamma = 1.\n args.enable_lr_warmup = False\n args.warmup_steps = 100\n args.num_epochs = 1000\n\n args.metric_ks = [1, 5, 10]\n args.best_metric = 'NDCG@10'\n args.model_init_seed = 98765\n args.bert_num_blocks = 2\n args.bert_num_heads = 2\n args.bert_head_size = None", "_____no_output_____" ], [ "parser = argparse.ArgumentParser()\n\n################\n# Dataset\n################\nparser.add_argument('--dataset_code', type=str, default='ml-1m', choices=DATASETS.keys())\nparser.add_argument('--min_rating', type=int, default=0)\nparser.add_argument('--min_uc', type=int, default=5)\nparser.add_argument('--min_sc', type=int, default=5)\nparser.add_argument('--split', type=str, default='leave_one_out')\nparser.add_argument('--dataset_split_seed', type=int, default=0)\n\n################\n# Dataloader\n################\nparser.add_argument('--dataloader_random_seed', type=float, default=0)\nparser.add_argument('--train_batch_size', type=int, default=64)\nparser.add_argument('--val_batch_size', type=int, default=64)\nparser.add_argument('--test_batch_size', type=int, default=64)\nparser.add_argument('--sliding_window_size', type=float, default=0.5)\n\n################\n# NegativeSampler\n################\nparser.add_argument('--train_negative_sampler_code', type=str, default='random', choices=['popular', 'random'])\nparser.add_argument('--train_negative_sample_size', type=int, default=0)\nparser.add_argument('--train_negative_sampling_seed', type=int, default=0)\nparser.add_argument('--test_negative_sampler_code', type=str, default='random', choices=['popular', 'random'])\nparser.add_argument('--test_negative_sample_size', type=int, default=100)\nparser.add_argument('--test_negative_sampling_seed', type=int, default=0)\n\n################\n# Trainer\n################\n# device #\nparser.add_argument('--device', type=str, default='cpu', choices=['cpu', 'cuda'])\nparser.add_argument('--num_gpu', type=int, default=1)\n# optimizer & lr#\nparser.add_argument('--optimizer', type=str, default='AdamW', choices=['AdamW', 'Adam', 'SGD'])\nparser.add_argument('--weight_decay', type=float, default=0)\nparser.add_argument('--adam_epsilon', type=float, default=1e-9)\nparser.add_argument('--momentum', type=float, default=None)\nparser.add_argument('--lr', type=float, default=0.001)\nparser.add_argument('--enable_lr_schedule', type=bool, default=True)\nparser.add_argument('--decay_step', type=int, default=100)\nparser.add_argument('--gamma', type=float, default=1)\nparser.add_argument('--enable_lr_warmup', type=bool, default=True)\nparser.add_argument('--warmup_steps', type=int, default=100)\n# epochs #\nparser.add_argument('--num_epochs', type=int, default=100)\n# logger #\nparser.add_argument('--log_period_as_iter', type=int, default=12800)\n# evaluation #\nparser.add_argument('--metric_ks', nargs='+', type=int, default=[1, 5, 10, 20])\nparser.add_argument('--best_metric', type=str, default='NDCG@10')\n\n################\n# Model\n################\nparser.add_argument('--model_code', type=str, default='bert', choices=['bert', 'sas', 'narm'])\n# BERT specs, used for SASRec and NARM as well #\nparser.add_argument('--bert_max_len', type=int, default=None)\nparser.add_argument('--bert_hidden_units', type=int, default=64)\nparser.add_argument('--bert_num_blocks', type=int, default=2)\nparser.add_argument('--bert_num_heads', type=int, default=2)\nparser.add_argument('--bert_head_size', type=int, default=32)\nparser.add_argument('--bert_dropout', type=float, default=0.1)\nparser.add_argument('--bert_attn_dropout', type=float, default=0.1)\nparser.add_argument('--bert_mask_prob', type=float, default=0.2)\n\n################\n# Distillation & Retraining\n################\nparser.add_argument('--num_generated_seqs', type=int, default=3000)\nparser.add_argument('--num_original_seqs', type=int, default=0)\nparser.add_argument('--num_poisoned_seqs', type=int, default=100)\nparser.add_argument('--num_alter_items', type=int, default=10)\n\n################\n\nargs = parser.parse_args(args={})\n\nprint('\\n'.join(f'{k}={v}' for k, v in vars(args).items()))", "dataset_code=ml-1m\nmin_rating=0\nmin_uc=5\nmin_sc=5\nsplit=leave_one_out\ndataset_split_seed=0\ndataloader_random_seed=0\ntrain_batch_size=64\nval_batch_size=64\ntest_batch_size=64\nsliding_window_size=0.5\ntrain_negative_sampler_code=random\ntrain_negative_sample_size=0\ntrain_negative_sampling_seed=0\ntest_negative_sampler_code=random\ntest_negative_sample_size=100\ntest_negative_sampling_seed=0\ndevice=cpu\nnum_gpu=1\noptimizer=AdamW\nweight_decay=0\nadam_epsilon=1e-09\nmomentum=None\nlr=0.001\nenable_lr_schedule=True\ndecay_step=100\ngamma=1\nenable_lr_warmup=True\nwarmup_steps=100\nnum_epochs=100\nlog_period_as_iter=12800\nmetric_ks=[1, 5, 10, 20]\nbest_metric=NDCG@10\nmodel_code=bert\nbert_max_len=None\nbert_hidden_units=64\nbert_num_blocks=2\nbert_num_heads=2\nbert_head_size=32\nbert_dropout=0.1\nbert_attn_dropout=0.1\nbert_mask_prob=0.2\nnum_generated_seqs=3000\nnum_original_seqs=0\nnum_poisoned_seqs=100\nnum_alter_items=10\n" ], [ "set_template(args)\nprint('\\n'.join(f'{k}={v}' for k, v in vars(args).items()))", "Input 1 / 20 for movielens, b for beauty, bd for dense beauty, g for games, s for steam and y for yoochoose: 1\nInput model code, b for BERT, s for SASRec and n for NARM: b\ndataset_code=ml-1m\nmin_rating=0\nmin_uc=5\nmin_sc=5\nsplit=leave_one_out\ndataset_split_seed=0\ndataloader_random_seed=0\ntrain_batch_size=128\nval_batch_size=128\ntest_batch_size=128\nsliding_window_size=0.5\ntrain_negative_sampler_code=random\ntrain_negative_sample_size=0\ntrain_negative_sampling_seed=0\ntest_negative_sampler_code=random\ntest_negative_sample_size=100\ntest_negative_sampling_seed=98765\ndevice=cpu\nnum_gpu=1\noptimizer=AdamW\nweight_decay=0.01\nadam_epsilon=1e-09\nmomentum=None\nlr=0.001\nenable_lr_schedule=True\ndecay_step=10000\ngamma=1.0\nenable_lr_warmup=False\nwarmup_steps=100\nnum_epochs=1000\nlog_period_as_iter=12800\nmetric_ks=[1, 5, 10]\nbest_metric=NDCG@10\nmodel_code=bert\nbert_max_len=200\nbert_hidden_units=64\nbert_num_blocks=2\nbert_num_heads=2\nbert_head_size=None\nbert_dropout=0.1\nbert_attn_dropout=0.1\nbert_mask_prob=0.2\nnum_generated_seqs=3000\nnum_original_seqs=0\nnum_poisoned_seqs=100\nnum_alter_items=10\nbert_max_predictions=40\nmodel_init_seed=98765\n" ], [ "def fix_random_seed_as(random_seed):\n random.seed(random_seed)\n np.random.seed(random_seed)\n torch.manual_seed(random_seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False", "_____no_output_____" ], [ "fix_random_seed_as(args.model_init_seed)", "_____no_output_____" ], [ "import dvc.api", "_____no_output_____" ], [ "train_loader, val_loader, test_loader = dataloader_factory(args)", "Raw file doesn't exist. Downloading...\n" ] ], [ [ "## Model", "_____no_output_____" ] ], [ [ "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass TokenEmbedding(nn.Embedding):\n def __init__(self, vocab_size, embed_size=512):\n super().__init__(vocab_size, embed_size, padding_idx=0)\n\n\nclass PositionalEmbedding(nn.Module):\n def __init__(self, max_len, d_model):\n super().__init__()\n self.d_model = d_model\n self.pe = nn.Embedding(max_len+1, d_model)\n\n def forward(self, x):\n pose = (x > 0) * (x > 0).sum(dim=-1).unsqueeze(1).repeat(1, x.size(-1))\n pose += torch.arange(start=-(x.size(1)-1), end=1, step=1, device=x.device)\n pose = pose * (x > 0)\n\n return self.pe(pose)\n\n\nclass GELU(nn.Module):\n def forward(self, x):\n return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n\n\nclass PositionwiseFeedForward(nn.Module):\n def __init__(self, d_model, d_ff):\n super().__init__()\n self.w_1 = nn.Linear(d_model, d_ff)\n self.w_2 = nn.Linear(d_ff, d_model)\n self.activation = GELU()\n\n def forward(self, x):\n return self.w_2(self.activation(self.w_1(x)))\n\n\n# layer norm\nclass LayerNorm(nn.Module):\n def __init__(self, features, eps=1e-6):\n super().__init__()\n self.weight = nn.Parameter(torch.ones(features))\n self.bias = nn.Parameter(torch.zeros(features))\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.weight * (x - mean) / (std + self.eps) + self.bias\n\n\n# layer norm and dropout (dropout and then layer norm)\nclass SublayerConnection(nn.Module):\n def __init__(self, size, dropout):\n super().__init__()\n self.layer_norm = LayerNorm(size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, sublayer):\n # return x + self.dropout(sublayer(self.norm(x))) # original implementation\n return self.layer_norm(x + self.dropout(sublayer(x))) # BERT4Rec implementation\n\n\nclass Attention(nn.Module):\n def forward(self, query, key, value, mask=None, dropout=None, sas=False):\n scores = torch.matmul(query, key.transpose(-2, -1)) \\\n / math.sqrt(query.size(-1))\n\n if mask is not None:\n scores = scores.masked_fill(mask == 0, -1e9)\n\n if sas:\n direction_mask = torch.ones_like(scores)\n direction_mask = torch.tril(direction_mask)\n scores = scores.masked_fill(direction_mask == 0, -1e9)\n\n p_attn = F.softmax(scores, dim=-1)\n\n if dropout is not None:\n p_attn = dropout(p_attn)\n\n return torch.matmul(p_attn, value), p_attn\n\n\nclass MultiHeadedAttention(nn.Module):\n def __init__(self, h, d_model, head_size=None, dropout=0.1):\n super().__init__()\n assert d_model % h == 0\n\n self.h = h\n self.d_k = d_model // h\n if head_size is not None:\n self.head_size = head_size\n else:\n self.head_size = d_model // h\n\n self.linear_layers = nn.ModuleList(\n [nn.Linear(d_model, self.h * self.head_size) for _ in range(3)])\n self.attention = Attention()\n self.dropout = nn.Dropout(p=dropout)\n self.output_linear = nn.Linear(self.h * self.head_size, d_model)\n\n def forward(self, query, key, value, mask=None):\n batch_size = query.size(0)\n\n # 1) do all the linear projections in batch from d_model => h x d_k\n query, key, value = [l(x).view(batch_size, -1, self.h, self.head_size).transpose(1, 2)\n for l, x in zip(self.linear_layers, (query, key, value))]\n \n # 2) apply attention on all the projected vectors in batch.\n x, attn = self.attention(\n query, key, value, mask=mask, dropout=self.dropout)\n\n # 3) \"concat\" using a view and apply a final linear.\n x = x.transpose(1, 2).contiguous().view(\n batch_size, -1, self.h * self.head_size)\n return self.output_linear(x)\n\n\nclass TransformerBlock(nn.Module):\n def __init__(self, hidden, attn_heads, head_size, feed_forward_hidden, dropout, attn_dropout=0.1):\n super().__init__()\n self.attention = MultiHeadedAttention(\n h=attn_heads, d_model=hidden, head_size=head_size, dropout=attn_dropout)\n self.feed_forward = PositionwiseFeedForward(\n d_model=hidden, d_ff=feed_forward_hidden)\n self.input_sublayer = SublayerConnection(size=hidden, dropout=dropout)\n self.output_sublayer = SublayerConnection(size=hidden, dropout=dropout)\n\n def forward(self, x, mask):\n x = self.input_sublayer(\n x, lambda _x: self.attention.forward(_x, _x, _x, mask=mask))\n x = self.output_sublayer(x, self.feed_forward)\n return x\n\n\nclass SASMultiHeadedAttention(nn.Module):\n def __init__(self, h, d_model, head_size=None, dropout=0.1):\n super().__init__()\n assert d_model % h == 0\n\n self.h = h\n self.d_k = d_model // h\n if head_size is not None:\n self.head_size = head_size\n else:\n self.head_size = d_model // h\n\n self.linear_layers = nn.ModuleList(\n [nn.Linear(d_model, self.h * self.head_size) for _ in range(3)])\n self.attention = Attention()\n self.dropout = nn.Dropout(p=dropout)\n self.layer_norm = LayerNorm(d_model)\n\n def forward(self, query, key, value, mask=None):\n batch_size = query.size(0)\n\n # 1) do all the linear projections in batch from d_model => h x d_k\n query_, key_, value_ = [l(x).view(batch_size, -1, self.h, self.head_size).transpose(1, 2)\n for l, x in zip(self.linear_layers, (query, key, value))]\n \n # 2) apply attention on all the projected vectors in batch.\n x, attn = self.attention(\n query_, key_, value_, mask=mask, dropout=self.dropout, sas=True)\n\n # 3) \"concat\" using a view and apply a final linear.\n x = x.transpose(1, 2).contiguous().view(\n batch_size, -1, self.h * self.head_size)\n \n return self.layer_norm(x + query)\n\n\nclass SASPositionwiseFeedForward(nn.Module):\n def __init__(self, d_model, d_ff, dropout=0.1):\n super().__init__()\n self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)\n self.activation = nn.ReLU()\n self.dropout = nn.Dropout(dropout)\n self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)\n self.layer_norm = LayerNorm(d_model)\n\n def forward(self, x):\n x_ = self.dropout(self.activation(self.conv1(x.permute(0, 2, 1))))\n return self.layer_norm(self.dropout(self.conv2(x_)).permute(0, 2, 1) + x)\n\n\nclass SASTransformerBlock(nn.Module):\n def __init__(self, hidden, attn_heads, head_size, feed_forward_hidden, dropout, attn_dropout=0.1):\n super().__init__()\n self.layer_norm = LayerNorm(hidden)\n self.attention = SASMultiHeadedAttention(\n h=attn_heads, d_model=hidden, head_size=head_size, dropout=attn_dropout)\n self.feed_forward = SASPositionwiseFeedForward(\n d_model=hidden, d_ff=feed_forward_hidden, dropout=dropout)\n\n def forward(self, x, mask):\n x = self.attention(self.layer_norm(x), x, x, mask)\n x = self.feed_forward(x)\n return x", "_____no_output_____" ], [ "from torch import nn as nn\nimport math\n\n\nclass BERT(nn.Module):\n def __init__(self, args):\n super().__init__()\n self.args = args\n self.embedding = BERTEmbedding(self.args)\n self.model = BERTModel(self.args)\n self.truncated_normal_init()\n\n def truncated_normal_init(self, mean=0, std=0.02, lower=-0.04, upper=0.04):\n with torch.no_grad():\n l = (1. + math.erf(((lower - mean) / std) / math.sqrt(2.))) / 2.\n u = (1. + math.erf(((upper - mean) / std) / math.sqrt(2.))) / 2.\n\n for n, p in self.model.named_parameters():\n if not 'layer_norm' in n:\n p.uniform_(2 * l - 1, 2 * u - 1)\n p.erfinv_()\n p.mul_(std * math.sqrt(2.))\n p.add_(mean)\n \n def forward(self, x):\n x, mask = self.embedding(x)\n scores = self.model(x, self.embedding.token.weight, mask)\n return scores\n\n\nclass BERTEmbedding(nn.Module):\n def __init__(self, args):\n super().__init__()\n vocab_size = args.num_items + 2\n hidden = args.bert_hidden_units\n max_len = args.bert_max_len\n dropout = args.bert_dropout\n\n self.token = TokenEmbedding(\n vocab_size=vocab_size, embed_size=hidden)\n self.position = PositionalEmbedding(\n max_len=max_len, d_model=hidden)\n\n self.layer_norm = LayerNorm(features=hidden)\n self.dropout = nn.Dropout(p=dropout)\n\n def get_mask(self, x):\n if len(x.shape) > 2:\n x = torch.ones(x.shape[:2]).to(x.device)\n return (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)\n\n def forward(self, x):\n mask = self.get_mask(x)\n if len(x.shape) > 2:\n pos = self.position(torch.ones(x.shape[:2]).to(x.device))\n x = torch.matmul(x, self.token.weight) + pos\n else:\n x = self.token(x) + self.position(x)\n return self.dropout(self.layer_norm(x)), mask\n\n\nclass BERTModel(nn.Module):\n def __init__(self, args):\n super().__init__()\n hidden = args.bert_hidden_units\n heads = args.bert_num_heads\n head_size = args.bert_head_size\n dropout = args.bert_dropout\n attn_dropout = args.bert_attn_dropout\n layers = args.bert_num_blocks\n\n self.transformer_blocks = nn.ModuleList([TransformerBlock(\n hidden, heads, head_size, hidden * 4, dropout, attn_dropout) for _ in range(layers)])\n self.linear = nn.Linear(hidden, hidden)\n self.bias = torch.nn.Parameter(torch.zeros(args.num_items + 2))\n self.bias.requires_grad = True\n self.activation = GELU()\n\n def forward(self, x, embedding_weight, mask):\n for transformer in self.transformer_blocks:\n x = transformer.forward(x, mask)\n x = self.activation(self.linear(x))\n scores = torch.matmul(x, embedding_weight.permute(1, 0)) + self.bias\n return scores", "_____no_output_____" ] ], [ [ "## Run", "_____no_output_____" ] ], [ [ "if args.model_code == 'bert':\n model = BERT(args)\n# elif args.model_code == 'sas':\n# model = SASRec(args)\n# elif args.model_code == 'narm':\n# model = NARM(args)", "_____no_output_____" ], [ "export_root = 'experiments/' + args.model_code + '/' + args.dataset_code", "_____no_output_____" ], [ "resume=False\nif resume:\n try: \n model.load_state_dict(torch.load(os.path.join(export_root, 'models', 'best_acc_model.pth'), map_location='cpu').get(STATE_DICT_KEY))\n except FileNotFoundError:\n print('Failed to load old model, continue training new model...')", "_____no_output_____" ] ], [ [ "## Trainer", "_____no_output_____" ] ], [ [ "STATE_DICT_KEY = 'model_state_dict'\nOPTIMIZER_STATE_DICT_KEY = 'optimizer_state_dict'", "_____no_output_____" ], [ "import os\nimport torch\nfrom abc import ABCMeta, abstractmethod\n\n\ndef save_state_dict(state_dict, path, filename):\n torch.save(state_dict, os.path.join(path, filename))\n\n\nclass LoggerService(object):\n def __init__(self, train_loggers=None, val_loggers=None):\n self.train_loggers = train_loggers if train_loggers else []\n self.val_loggers = val_loggers if val_loggers else []\n\n def complete(self, log_data):\n for logger in self.train_loggers:\n logger.complete(**log_data)\n for logger in self.val_loggers:\n logger.complete(**log_data)\n\n def log_train(self, log_data):\n for logger in self.train_loggers:\n logger.log(**log_data)\n\n def log_val(self, log_data):\n for logger in self.val_loggers:\n logger.log(**log_data)\n\n\nclass AbstractBaseLogger(metaclass=ABCMeta):\n @abstractmethod\n def log(self, *args, **kwargs):\n raise NotImplementedError\n\n def complete(self, *args, **kwargs):\n pass\n\n\nclass RecentModelLogger(AbstractBaseLogger):\n def __init__(self, checkpoint_path, filename='checkpoint-recent.pth'):\n self.checkpoint_path = checkpoint_path\n if not os.path.exists(self.checkpoint_path):\n os.mkdir(self.checkpoint_path)\n self.recent_epoch = None\n self.filename = filename\n\n def log(self, *args, **kwargs):\n epoch = kwargs['epoch']\n\n if self.recent_epoch != epoch:\n self.recent_epoch = epoch\n state_dict = kwargs['state_dict']\n state_dict['epoch'] = kwargs['epoch']\n save_state_dict(state_dict, self.checkpoint_path, self.filename)\n\n def complete(self, *args, **kwargs):\n save_state_dict(kwargs['state_dict'],\n self.checkpoint_path, self.filename + '.final')\n\n\nclass BestModelLogger(AbstractBaseLogger):\n def __init__(self, checkpoint_path, metric_key='mean_iou', filename='best_acc_model.pth'):\n self.checkpoint_path = checkpoint_path\n if not os.path.exists(self.checkpoint_path):\n os.mkdir(self.checkpoint_path)\n\n self.best_metric = 0.\n self.metric_key = metric_key\n self.filename = filename\n\n def log(self, *args, **kwargs):\n current_metric = kwargs[self.metric_key]\n if self.best_metric < current_metric:\n print(\"Update Best {} Model at {}\".format(\n self.metric_key, kwargs['epoch']))\n self.best_metric = current_metric\n save_state_dict(kwargs['state_dict'],\n self.checkpoint_path, self.filename)\n\n\nclass MetricGraphPrinter(AbstractBaseLogger):\n def __init__(self, writer, key='train_loss', graph_name='Train Loss', group_name='metric'):\n self.key = key\n self.graph_label = graph_name\n self.group_name = group_name\n self.writer = writer\n\n def log(self, *args, **kwargs):\n if self.key in kwargs:\n self.writer.add_scalar(\n self.group_name + '/' + self.graph_label, kwargs[self.key], kwargs['accum_iter'])\n else:\n self.writer.add_scalar(\n self.group_name + '/' + self.graph_label, 0, kwargs['accum_iter'])\n\n def complete(self, *args, **kwargs):\n self.writer.close()", "_____no_output_____" ], [ "import json\nimport os\nimport pprint as pp\nimport random\nfrom datetime import date\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nfrom torch import optim as optim\n\n\ndef ndcg(scores, labels, k):\n scores = scores.cpu()\n labels = labels.cpu()\n rank = (-scores).argsort(dim=1)\n cut = rank[:, :k]\n hits = labels.gather(1, cut)\n position = torch.arange(2, 2+k)\n weights = 1 / torch.log2(position.float())\n dcg = (hits.float() * weights).sum(1)\n idcg = torch.Tensor([weights[:min(int(n), k)].sum()\n for n in labels.sum(1)])\n ndcg = dcg / idcg\n return ndcg.mean()\n\n\ndef recalls_and_ndcgs_for_ks(scores, labels, ks):\n metrics = {}\n\n scores = scores\n labels = labels\n answer_count = labels.sum(1)\n\n labels_float = labels.float()\n rank = (-scores).argsort(dim=1)\n\n cut = rank\n for k in sorted(ks, reverse=True):\n cut = cut[:, :k]\n hits = labels_float.gather(1, cut)\n metrics['Recall@%d' % k] = \\\n (hits.sum(1) / torch.min(torch.Tensor([k]).to(\n labels.device), labels.sum(1).float())).mean().cpu().item()\n\n position = torch.arange(2, 2+k)\n weights = 1 / torch.log2(position.float())\n dcg = (hits * weights.to(hits.device)).sum(1)\n idcg = torch.Tensor([weights[:min(int(n), k)].sum()\n for n in answer_count]).to(dcg.device)\n ndcg = (dcg / idcg).mean()\n metrics['NDCG@%d' % k] = ndcg.cpu().item()\n return metrics\n\n\ndef em_and_agreement(scores_rank, labels_rank):\n em = (scores_rank == labels_rank).float().mean()\n temp = np.hstack((scores_rank.numpy(), labels_rank.numpy()))\n temp = np.sort(temp, axis=1)\n agreement = np.mean(np.sum(temp[:, 1:] == temp[:, :-1], axis=1))\n return em, agreement\n\n\ndef kl_agreements_and_intersctions_for_ks(scores, soft_labels, ks, k_kl=100):\n metrics = {}\n scores = scores.cpu()\n soft_labels = soft_labels.cpu()\n scores_rank = (-scores).argsort(dim=1)\n labels_rank = (-soft_labels).argsort(dim=1)\n\n top_kl_scores = F.log_softmax(scores.gather(1, labels_rank[:, :k_kl]), dim=-1)\n top_kl_labels = F.softmax(soft_labels.gather(1, labels_rank[:, :k_kl]), dim=-1)\n kl = F.kl_div(top_kl_scores, top_kl_labels, reduction='batchmean')\n metrics['KL-Div'] = kl.item()\n for k in sorted(ks, reverse=True):\n em, agreement = em_and_agreement(scores_rank[:, :k], labels_rank[:, :k])\n metrics['EM@%d' % k] = em.item()\n metrics['Agr@%d' % k] = (agreement / k).item()\n return metrics\n\n\nclass AverageMeterSet(object):\n def __init__(self, meters=None):\n self.meters = meters if meters else {}\n\n def __getitem__(self, key):\n if key not in self.meters:\n meter = AverageMeter()\n meter.update(0)\n return meter\n return self.meters[key]\n\n def update(self, name, value, n=1):\n if name not in self.meters:\n self.meters[name] = AverageMeter()\n self.meters[name].update(value, n)\n\n def reset(self):\n for meter in self.meters.values():\n meter.reset()\n\n def values(self, format_string='{}'):\n return {format_string.format(name): meter.val for name, meter in self.meters.items()}\n\n def averages(self, format_string='{}'):\n return {format_string.format(name): meter.avg for name, meter in self.meters.items()}\n\n def sums(self, format_string='{}'):\n return {format_string.format(name): meter.sum for name, meter in self.meters.items()}\n\n def counts(self, format_string='{}'):\n return {format_string.format(name): meter.count for name, meter in self.meters.items()}\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val\n self.count += n\n self.avg = self.sum / self.count\n\n def __format__(self, format):\n return \"{self.val:{format}} ({self.avg:{format}})\".format(self=self, format=format)", "_____no_output_____" ], [ "!pip install faiss-cpu --no-cache\n!apt-get install libomp-dev", "Collecting faiss-cpu\n Downloading faiss_cpu-1.7.1.post2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (8.4 MB)\n |████████████████████████████████| 8.4 MB 2.0 MB/s \n\u001b[?25hInstalling collected packages: faiss-cpu\nSuccessfully installed faiss-cpu-1.7.1.post2\nReading package lists... Done\nBuilding dependency tree \nReading state information... Done\nThe following additional packages will be installed:\n libomp5\nSuggested packages:\n libomp-doc\nThe following NEW packages will be installed:\n libomp-dev libomp5\n0 upgraded, 2 newly installed, 0 to remove and 40 not upgraded.\nNeed to get 239 kB of archives.\nAfter this operation, 804 kB of additional disk space will be used.\nGet:1 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libomp5 amd64 5.0.1-1 [234 kB]\nGet:2 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libomp-dev amd64 5.0.1-1 [5,088 B]\nFetched 239 kB in 1s (361 kB/s)\nSelecting previously unselected package libomp5:amd64.\n(Reading database ... 148492 files and directories currently installed.)\nPreparing to unpack .../libomp5_5.0.1-1_amd64.deb ...\nUnpacking libomp5:amd64 (5.0.1-1) ...\nSelecting previously unselected package libomp-dev.\nPreparing to unpack .../libomp-dev_5.0.1-1_amd64.deb ...\nUnpacking libomp-dev (5.0.1-1) ...\nSetting up libomp5:amd64 (5.0.1-1) ...\nSetting up libomp-dev (5.0.1-1) ...\nProcessing triggers for libc-bin (2.27-3ubuntu1.2) ...\n/sbin/ldconfig.real: /usr/local/lib/python3.7/dist-packages/ideep4py/lib/libmkldnn.so.0 is not a symbolic link\n\n" ], [ "# from config import STATE_DICT_KEY, OPTIMIZER_STATE_DICT_KEY\n# from .utils import *\n# from .loggers import *\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\n\nimport json\nimport faiss\nimport numpy as np\nfrom abc import *\nfrom pathlib import Path\n\n\nclass BERTTrainer(metaclass=ABCMeta):\n def __init__(self, args, model, train_loader, val_loader, test_loader, export_root):\n self.args = args\n self.device = args.device\n self.model = model.to(self.device)\n self.is_parallel = args.num_gpu > 1\n if self.is_parallel:\n self.model = nn.DataParallel(self.model)\n\n self.num_epochs = args.num_epochs\n self.metric_ks = args.metric_ks\n self.best_metric = args.best_metric\n self.train_loader = train_loader\n self.val_loader = val_loader\n self.test_loader = test_loader\n self.optimizer = self._create_optimizer()\n if args.enable_lr_schedule:\n if args.enable_lr_warmup:\n self.lr_scheduler = self.get_linear_schedule_with_warmup(\n self.optimizer, args.warmup_steps, len(train_loader) * self.num_epochs)\n else:\n self.lr_scheduler = optim.lr_scheduler.StepLR(\n self.optimizer, step_size=args.decay_step, gamma=args.gamma)\n \n self.export_root = export_root\n self.writer, self.train_loggers, self.val_loggers = self._create_loggers()\n self.logger_service = LoggerService(\n self.train_loggers, self.val_loggers)\n self.log_period_as_iter = args.log_period_as_iter\n\n self.ce = nn.CrossEntropyLoss(ignore_index=0)\n\n def train(self):\n accum_iter = 0\n self.validate(0, accum_iter)\n for epoch in range(self.num_epochs):\n accum_iter = self.train_one_epoch(epoch, accum_iter)\n self.validate(epoch, accum_iter)\n self.logger_service.complete({\n 'state_dict': (self._create_state_dict()),\n })\n self.writer.close()\n\n def train_one_epoch(self, epoch, accum_iter):\n self.model.train()\n average_meter_set = AverageMeterSet()\n tqdm_dataloader = tqdm(self.train_loader)\n\n for batch_idx, batch in enumerate(tqdm_dataloader):\n batch_size = batch[0].size(0)\n batch = [x.to(self.device) for x in batch]\n\n self.optimizer.zero_grad()\n loss = self.calculate_loss(batch)\n loss.backward()\n self.clip_gradients(5)\n self.optimizer.step()\n if self.args.enable_lr_schedule:\n self.lr_scheduler.step()\n\n average_meter_set.update('loss', loss.item())\n tqdm_dataloader.set_description(\n 'Epoch {}, loss {:.3f} '.format(epoch+1, average_meter_set['loss'].avg))\n\n accum_iter += batch_size\n\n if self._needs_to_log(accum_iter):\n tqdm_dataloader.set_description('Logging to Tensorboard')\n log_data = {\n 'state_dict': (self._create_state_dict()),\n 'epoch': epoch + 1,\n 'accum_iter': accum_iter,\n }\n log_data.update(average_meter_set.averages())\n self.logger_service.log_train(log_data)\n\n return accum_iter\n\n def validate(self, epoch, accum_iter):\n self.model.eval()\n\n average_meter_set = AverageMeterSet()\n\n with torch.no_grad():\n tqdm_dataloader = tqdm(self.val_loader)\n for batch_idx, batch in enumerate(tqdm_dataloader):\n batch = [x.to(self.device) for x in batch]\n\n metrics = self.calculate_metrics(batch)\n self._update_meter_set(average_meter_set, metrics)\n self._update_dataloader_metrics(\n tqdm_dataloader, average_meter_set)\n\n log_data = {\n 'state_dict': (self._create_state_dict()),\n 'epoch': epoch+1,\n 'accum_iter': accum_iter,\n }\n log_data.update(average_meter_set.averages())\n self.logger_service.log_val(log_data)\n\n def test(self):\n best_model_dict = torch.load(os.path.join(\n self.export_root, 'models', 'best_acc_model.pth')).get(STATE_DICT_KEY)\n self.model.load_state_dict(best_model_dict)\n self.model.eval()\n\n average_meter_set = AverageMeterSet()\n\n all_scores = []\n average_scores = []\n with torch.no_grad():\n tqdm_dataloader = tqdm(self.test_loader)\n for batch_idx, batch in enumerate(tqdm_dataloader):\n batch = [x.to(self.device) for x in batch]\n metrics = self.calculate_metrics(batch)\n \n # seqs, candidates, labels = batch\n # scores = self.model(seqs)\n # scores = scores[:, -1, :]\n # scores_sorted, indices = torch.sort(scores, dim=-1, descending=True)\n # all_scores += scores_sorted[:, :100].cpu().numpy().tolist()\n # average_scores += scores_sorted.cpu().numpy().tolist()\n # scores = scores.gather(1, candidates)\n # metrics = recalls_and_ndcgs_for_ks(scores, labels, self.metric_ks)\n\n self._update_meter_set(average_meter_set, metrics)\n self._update_dataloader_metrics(\n tqdm_dataloader, average_meter_set)\n\n average_metrics = average_meter_set.averages()\n with open(os.path.join(self.export_root, 'logs', 'test_metrics.json'), 'w') as f:\n json.dump(average_metrics, f, indent=4)\n \n return average_metrics\n\n def calculate_loss(self, batch):\n seqs, labels = batch\n logits = self.model(seqs)\n\n logits = logits.view(-1, logits.size(-1))\n labels = labels.view(-1)\n loss = self.ce(logits, labels)\n return loss\n\n def calculate_metrics(self, batch):\n seqs, candidates, labels = batch\n\n scores = self.model(seqs)\n scores = scores[:, -1, :]\n scores = scores.gather(1, candidates)\n\n metrics = recalls_and_ndcgs_for_ks(scores, labels, self.metric_ks)\n return metrics\n\n def clip_gradients(self, limit=5):\n for p in self.model.parameters():\n nn.utils.clip_grad_norm_(p, 5)\n\n def _update_meter_set(self, meter_set, metrics):\n for k, v in metrics.items():\n meter_set.update(k, v)\n\n def _update_dataloader_metrics(self, tqdm_dataloader, meter_set):\n description_metrics = ['NDCG@%d' % k for k in self.metric_ks[:3]\n ] + ['Recall@%d' % k for k in self.metric_ks[:3]]\n description = 'Eval: ' + \\\n ', '.join(s + ' {:.3f}' for s in description_metrics)\n description = description.replace('NDCG', 'N').replace('Recall', 'R')\n description = description.format(\n *(meter_set[k].avg for k in description_metrics))\n tqdm_dataloader.set_description(description)\n\n def _create_optimizer(self):\n args = self.args\n param_optimizer = list(self.model.named_parameters())\n no_decay = ['bias', 'layer_norm']\n optimizer_grouped_parameters = [\n {\n 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay': args.weight_decay,\n },\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},\n ]\n if args.optimizer.lower() == 'adamw':\n return optim.AdamW(optimizer_grouped_parameters, lr=args.lr, eps=args.adam_epsilon)\n elif args.optimizer.lower() == 'adam':\n return optim.Adam(optimizer_grouped_parameters, lr=args.lr, weight_decay=args.weight_decay)\n elif args.optimizer.lower() == 'sgd':\n return optim.SGD(optimizer_grouped_parameters, lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)\n else:\n raise ValueError\n\n def get_linear_schedule_with_warmup(self, optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):\n # based on hugging face get_linear_schedule_with_warmup\n def lr_lambda(current_step: int):\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1, num_warmup_steps))\n return max(\n 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))\n )\n\n return LambdaLR(optimizer, lr_lambda, last_epoch)\n\n def _create_loggers(self):\n root = Path(self.export_root)\n writer = SummaryWriter(root.joinpath('logs'))\n model_checkpoint = root.joinpath('models')\n\n train_loggers = [\n MetricGraphPrinter(writer, key='epoch',\n graph_name='Epoch', group_name='Train'),\n MetricGraphPrinter(writer, key='loss',\n graph_name='Loss', group_name='Train'),\n ]\n\n val_loggers = []\n for k in self.metric_ks:\n val_loggers.append(\n MetricGraphPrinter(writer, key='NDCG@%d' % k, graph_name='NDCG@%d' % k, group_name='Validation'))\n val_loggers.append(\n MetricGraphPrinter(writer, key='Recall@%d' % k, graph_name='Recall@%d' % k, group_name='Validation'))\n val_loggers.append(RecentModelLogger(model_checkpoint))\n val_loggers.append(BestModelLogger(\n model_checkpoint, metric_key=self.best_metric))\n return writer, train_loggers, val_loggers\n\n def _create_state_dict(self):\n return {\n STATE_DICT_KEY: self.model.module.state_dict() if self.is_parallel else self.model.state_dict(),\n OPTIMIZER_STATE_DICT_KEY: self.optimizer.state_dict(),\n }\n\n def _needs_to_log(self, accum_iter):\n return accum_iter % self.log_period_as_iter < self.args.train_batch_size and accum_iter != 0", "_____no_output_____" ] ], [ [ "## Run", "_____no_output_____" ] ], [ [ "if args.model_code == 'bert':\n trainer = BERTTrainer(args, model, train_loader, val_loader, test_loader, export_root)\nif args.model_code == 'sas':\n trainer = SASTrainer(args, model, train_loader, val_loader, test_loader, export_root)\neli f args.model_code == 'narm':\n args.num_epochs = 100\n trainer = RNNTrainer(args, model, train_loader, val_loader, test_loader, export_root)", "_____no_output_____" ], [ "trainer.train()", "Eval: N@1 0.009, N@5 0.025, N@10 0.040, R@1 0.009, R@5 0.043, R@10 0.091: 100%|██████████| 48/48 [00:51<00:00, 1.07s/it]\n" ], [ "trainer.test()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0aad8f24bfdb42e73bc24f501f493cfcf3de1f9
283,556
ipynb
Jupyter Notebook
EDA.ipynb
haataa/Home-Credit-Default-Risk
629e6bbb886e4854d3b96ffee497df6508cba18e
[ "CNRI-Python" ]
null
null
null
EDA.ipynb
haataa/Home-Credit-Default-Risk
629e6bbb886e4854d3b96ffee497df6508cba18e
[ "CNRI-Python" ]
null
null
null
EDA.ipynb
haataa/Home-Credit-Default-Risk
629e6bbb886e4854d3b96ffee497df6508cba18e
[ "CNRI-Python" ]
null
null
null
78.612698
25,772
0.679185
[ [ [ "# Introduction\nrefer to this solution :https://www.kaggle.com/jsaguiar/lightgbm-7th-place-solution\n\nhttps://www.tinymind.cn/articles/3655\n\nhttps://github.com/Featuretools/Automated-Manual-Comparison/tree/master/Loan%20Repayment\n\nhttps://www.kaggle.com/willkoehrsen/start-here-a-gentle-introduction/", "_____no_output_____" ], [ "# Read Data", "_____no_output_____" ] ], [ [ "# import packages\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport featuretools as ft\nimport lightgbm as lgb\n%matplotlib inline\nimport seaborn as sns\n\nRSEED = 50", "_____no_output_____" ], [ "# read in data and check basic info\ndef read_check_data(file_path):\n data = pd.read_csv(file_path)\n print('Training data shape: ', data.shape)\n print(data.head())\n return data", "_____no_output_____" ], [ "app_train = read_check_data(\"./data/application_train.csv\")", "Training data shape: (307511, 122)\n SK_ID_CURR TARGET NAME_CONTRACT_TYPE CODE_GENDER FLAG_OWN_CAR \\\n0 100002 1 Cash loans M N \n1 100003 0 Cash loans F N \n2 100004 0 Revolving loans M Y \n3 100006 0 Cash loans F N \n4 100007 0 Cash loans M N \n\n FLAG_OWN_REALTY CNT_CHILDREN AMT_INCOME_TOTAL AMT_CREDIT AMT_ANNUITY \\\n0 Y 0 202500.0 406597.5 24700.5 \n1 N 0 270000.0 1293502.5 35698.5 \n2 Y 0 67500.0 135000.0 6750.0 \n3 Y 0 135000.0 312682.5 29686.5 \n4 Y 0 121500.0 513000.0 21865.5 \n\n ... FLAG_DOCUMENT_18 FLAG_DOCUMENT_19 \\\n0 ... 0 0 \n1 ... 0 0 \n2 ... 0 0 \n3 ... 0 0 \n4 ... 0 0 \n\n FLAG_DOCUMENT_20 FLAG_DOCUMENT_21 AMT_REQ_CREDIT_BUREAU_HOUR \\\n0 0 0 0.0 \n1 0 0 0.0 \n2 0 0 0.0 \n3 0 0 NaN \n4 0 0 0.0 \n\n AMT_REQ_CREDIT_BUREAU_DAY AMT_REQ_CREDIT_BUREAU_WEEK \\\n0 0.0 0.0 \n1 0.0 0.0 \n2 0.0 0.0 \n3 NaN NaN \n4 0.0 0.0 \n\n AMT_REQ_CREDIT_BUREAU_MON AMT_REQ_CREDIT_BUREAU_QRT \\\n0 0.0 0.0 \n1 0.0 0.0 \n2 0.0 0.0 \n3 NaN NaN \n4 0.0 0.0 \n\n AMT_REQ_CREDIT_BUREAU_YEAR \n0 1.0 \n1 0.0 \n2 0.0 \n3 NaN \n4 0.0 \n\n[5 rows x 122 columns]\n" ], [ "app_test = read_check_data(\"./data/application_test.csv\")", "Training data shape: (48744, 121)\n SK_ID_CURR NAME_CONTRACT_TYPE CODE_GENDER FLAG_OWN_CAR FLAG_OWN_REALTY \\\n0 100001 Cash loans F N Y \n1 100005 Cash loans M N Y \n2 100013 Cash loans M Y Y \n3 100028 Cash loans F N Y \n4 100038 Cash loans M Y N \n\n CNT_CHILDREN AMT_INCOME_TOTAL AMT_CREDIT AMT_ANNUITY AMT_GOODS_PRICE \\\n0 0 135000.0 568800.0 20560.5 450000.0 \n1 0 99000.0 222768.0 17370.0 180000.0 \n2 0 202500.0 663264.0 69777.0 630000.0 \n3 2 315000.0 1575000.0 49018.5 1575000.0 \n4 1 180000.0 625500.0 32067.0 625500.0 \n\n ... FLAG_DOCUMENT_18 FLAG_DOCUMENT_19 \\\n0 ... 0 0 \n1 ... 0 0 \n2 ... 0 0 \n3 ... 0 0 \n4 ... 0 0 \n\n FLAG_DOCUMENT_20 FLAG_DOCUMENT_21 AMT_REQ_CREDIT_BUREAU_HOUR \\\n0 0 0 0.0 \n1 0 0 0.0 \n2 0 0 0.0 \n3 0 0 0.0 \n4 0 0 NaN \n\n AMT_REQ_CREDIT_BUREAU_DAY AMT_REQ_CREDIT_BUREAU_WEEK \\\n0 0.0 0.0 \n1 0.0 0.0 \n2 0.0 0.0 \n3 0.0 0.0 \n4 NaN NaN \n\n AMT_REQ_CREDIT_BUREAU_MON AMT_REQ_CREDIT_BUREAU_QRT \\\n0 0.0 0.0 \n1 0.0 0.0 \n2 0.0 1.0 \n3 0.0 0.0 \n4 NaN NaN \n\n AMT_REQ_CREDIT_BUREAU_YEAR \n0 0.0 \n1 3.0 \n2 4.0 \n3 3.0 \n4 NaN \n\n[5 rows x 121 columns]\n" ], [ "app = app_train.append(app_test,sort=False)\napp.tail()", "_____no_output_____" ], [ "POS_CASH_balance = read_check_data(\"./data/POS_CASH_balance.csv\")", "Training data shape: (10001358, 8)\n SK_ID_PREV SK_ID_CURR MONTHS_BALANCE CNT_INSTALMENT \\\n0 1803195 182943 -31 48.0 \n1 1715348 367990 -33 36.0 \n2 1784872 397406 -32 12.0 \n3 1903291 269225 -35 48.0 \n4 2341044 334279 -35 36.0 \n\n CNT_INSTALMENT_FUTURE NAME_CONTRACT_STATUS SK_DPD SK_DPD_DEF \n0 45.0 Active 0 0 \n1 35.0 Active 0 0 \n2 9.0 Active 0 0 \n3 42.0 Active 0 0 \n4 35.0 Active 0 0 \n" ], [ "bureau_balance = read_check_data(\"./data/bureau_balance.csv\")", "Training data shape: (27299925, 3)\n SK_ID_BUREAU MONTHS_BALANCE STATUS\n0 5715448 0 C\n1 5715448 -1 C\n2 5715448 -2 C\n3 5715448 -3 C\n4 5715448 -4 C\n" ], [ "previous_application = read_check_data(\"./data/previous_application.csv\")", "Training data shape: (1670214, 37)\n SK_ID_PREV SK_ID_CURR NAME_CONTRACT_TYPE AMT_ANNUITY AMT_APPLICATION \\\n0 2030495 271877 Consumer loans 1730.430 17145.0 \n1 2802425 108129 Cash loans 25188.615 607500.0 \n2 2523466 122040 Cash loans 15060.735 112500.0 \n3 2819243 176158 Cash loans 47041.335 450000.0 \n4 1784265 202054 Cash loans 31924.395 337500.0 \n\n AMT_CREDIT AMT_DOWN_PAYMENT AMT_GOODS_PRICE WEEKDAY_APPR_PROCESS_START \\\n0 17145.0 0.0 17145.0 SATURDAY \n1 679671.0 NaN 607500.0 THURSDAY \n2 136444.5 NaN 112500.0 TUESDAY \n3 470790.0 NaN 450000.0 MONDAY \n4 404055.0 NaN 337500.0 THURSDAY \n\n HOUR_APPR_PROCESS_START ... NAME_SELLER_INDUSTRY \\\n0 15 ... Connectivity \n1 11 ... XNA \n2 11 ... XNA \n3 7 ... XNA \n4 9 ... XNA \n\n CNT_PAYMENT NAME_YIELD_GROUP PRODUCT_COMBINATION \\\n0 12.0 middle POS mobile with interest \n1 36.0 low_action Cash X-Sell: low \n2 12.0 high Cash X-Sell: high \n3 12.0 middle Cash X-Sell: middle \n4 24.0 high Cash Street: high \n\n DAYS_FIRST_DRAWING DAYS_FIRST_DUE DAYS_LAST_DUE_1ST_VERSION DAYS_LAST_DUE \\\n0 365243.0 -42.0 300.0 -42.0 \n1 365243.0 -134.0 916.0 365243.0 \n2 365243.0 -271.0 59.0 365243.0 \n3 365243.0 -482.0 -152.0 -182.0 \n4 NaN NaN NaN NaN \n\n DAYS_TERMINATION NFLAG_INSURED_ON_APPROVAL \n0 -37.0 0.0 \n1 365243.0 1.0 \n2 365243.0 1.0 \n3 -177.0 1.0 \n4 NaN NaN \n\n[5 rows x 37 columns]\n" ], [ "credit_card_balance = read_check_data(\"./data/previous_application.csv\")", "Training data shape: (1670214, 37)\n SK_ID_PREV SK_ID_CURR NAME_CONTRACT_TYPE AMT_ANNUITY AMT_APPLICATION \\\n0 2030495 271877 Consumer loans 1730.430 17145.0 \n1 2802425 108129 Cash loans 25188.615 607500.0 \n2 2523466 122040 Cash loans 15060.735 112500.0 \n3 2819243 176158 Cash loans 47041.335 450000.0 \n4 1784265 202054 Cash loans 31924.395 337500.0 \n\n AMT_CREDIT AMT_DOWN_PAYMENT AMT_GOODS_PRICE WEEKDAY_APPR_PROCESS_START \\\n0 17145.0 0.0 17145.0 SATURDAY \n1 679671.0 NaN 607500.0 THURSDAY \n2 136444.5 NaN 112500.0 TUESDAY \n3 470790.0 NaN 450000.0 MONDAY \n4 404055.0 NaN 337500.0 THURSDAY \n\n HOUR_APPR_PROCESS_START ... NAME_SELLER_INDUSTRY \\\n0 15 ... Connectivity \n1 11 ... XNA \n2 11 ... XNA \n3 7 ... XNA \n4 9 ... XNA \n\n CNT_PAYMENT NAME_YIELD_GROUP PRODUCT_COMBINATION \\\n0 12.0 middle POS mobile with interest \n1 36.0 low_action Cash X-Sell: low \n2 12.0 high Cash X-Sell: high \n3 12.0 middle Cash X-Sell: middle \n4 24.0 high Cash Street: high \n\n DAYS_FIRST_DRAWING DAYS_FIRST_DUE DAYS_LAST_DUE_1ST_VERSION DAYS_LAST_DUE \\\n0 365243.0 -42.0 300.0 -42.0 \n1 365243.0 -134.0 916.0 365243.0 \n2 365243.0 -271.0 59.0 365243.0 \n3 365243.0 -482.0 -152.0 -182.0 \n4 NaN NaN NaN NaN \n\n DAYS_TERMINATION NFLAG_INSURED_ON_APPROVAL \n0 -37.0 0.0 \n1 365243.0 1.0 \n2 365243.0 1.0 \n3 -177.0 1.0 \n4 NaN NaN \n\n[5 rows x 37 columns]\n" ], [ "bureau = read_check_data(\"./data/bureau.csv\")", "Training data shape: (1716428, 17)\n SK_ID_CURR SK_ID_BUREAU CREDIT_ACTIVE CREDIT_CURRENCY DAYS_CREDIT \\\n0 215354 5714462 Closed currency 1 -497 \n1 215354 5714463 Active currency 1 -208 \n2 215354 5714464 Active currency 1 -203 \n3 215354 5714465 Active currency 1 -203 \n4 215354 5714466 Active currency 1 -629 \n\n CREDIT_DAY_OVERDUE DAYS_CREDIT_ENDDATE DAYS_ENDDATE_FACT \\\n0 0 -153.0 -153.0 \n1 0 1075.0 NaN \n2 0 528.0 NaN \n3 0 NaN NaN \n4 0 1197.0 NaN \n\n AMT_CREDIT_MAX_OVERDUE CNT_CREDIT_PROLONG AMT_CREDIT_SUM \\\n0 NaN 0 91323.0 \n1 NaN 0 225000.0 \n2 NaN 0 464323.5 \n3 NaN 0 90000.0 \n4 77674.5 0 2700000.0 \n\n AMT_CREDIT_SUM_DEBT AMT_CREDIT_SUM_LIMIT AMT_CREDIT_SUM_OVERDUE \\\n0 0.0 NaN 0.0 \n1 171342.0 NaN 0.0 \n2 NaN NaN 0.0 \n3 NaN NaN 0.0 \n4 NaN NaN 0.0 \n\n CREDIT_TYPE DAYS_CREDIT_UPDATE AMT_ANNUITY \n0 Consumer credit -131 NaN \n1 Credit card -20 NaN \n2 Consumer credit -16 NaN \n3 Credit card -16 NaN \n4 Consumer credit -21 NaN \n" ] ], [ [ "# Check Missing Values", "_____no_output_____" ] ], [ [ "def check_missing_col(df):\n missing_num = df.isnull().sum().sort_values(ascending = False)\n minssing_percent = (df.isnull().mean()*100).sort_values(ascending = False)\n missing_info = pd.concat([missing_num, minssing_percent], axis=1, keys=['missing_num', 'minssing_percent'])\n print(missing_info.head())\n return missing_info", "_____no_output_____" ], [ "def check_missing_row(df):\n missing_num = df.isnull().sum(axis=1).sort_values(ascending = False)\n minssing_percent = (df.isnull().mean(axis=1)*100).sort_values(ascending = False)\n missing_info = pd.concat([missing_num, minssing_percent], axis=1, keys=['missing_num', 'minssing_percent'])\n print(missing_info.head())\n return missing_info", "_____no_output_____" ], [ "def missing_hist(missing_info):\n plt.hist(missing_info.minssing_percent)\n print(missing_info.describe())", "_____no_output_____" ], [ "# check missing value of app_train\nmiss_app_train = check_missing_col(app_train)\nmissing_hist(miss_app_train)", " missing_num minssing_percent\nCOMMONAREA_MEDI 214865 69.872297\nCOMMONAREA_AVG 214865 69.872297\nCOMMONAREA_MODE 214865 69.872297\nNONLIVINGAPARTMENTS_MODE 213514 69.432963\nNONLIVINGAPARTMENTS_MEDI 213514 69.432963\n missing_num minssing_percent\ncount 122.000000 122.000000\nmean 75020.204918 24.395942\nstd 87476.136655 28.446507\nmin 0.000000 0.000000\n25% 0.000000 0.000000\n50% 1021.000000 0.332021\n75% 156271.000000 50.818020\nmax 214865.000000 69.872297\n" ], [ "missing_hist(check_missing_row(app_train))", " missing_num minssing_percent\n133770 61 50.0\n244833 61 50.0\n150206 61 50.0\n69707 61 50.0\n269786 61 50.0\n missing_num minssing_percent\ncount 307511.000000 307511.000000\nmean 29.763049 24.395942\nstd 20.919345 17.147004\nmin 0.000000 0.000000\n25% 5.000000 4.098361\n50% 36.000000 29.508197\n75% 49.000000 40.163934\nmax 61.000000 50.000000\n" ], [ "# check missing value of POS_CASH_balance\nmiss_POS_CASH = check_missing_col(POS_CASH_balance)\nmissing_hist(miss_POS_CASH)", " missing_num minssing_percent\nCNT_INSTALMENT_FUTURE 26087 0.260835\nCNT_INSTALMENT 26071 0.260675\nSK_DPD_DEF 0 0.000000\nSK_DPD 0 0.000000\nNAME_CONTRACT_STATUS 0 0.000000\n missing_num minssing_percent\ncount 8.000000 8.000000\nmean 6519.750000 0.065189\nstd 12072.231948 0.120706\nmin 0.000000 0.000000\n25% 0.000000 0.000000\n50% 0.000000 0.000000\n75% 6517.750000 0.065169\nmax 26087.000000 0.260835\n" ], [ "missing_hist(check_missing_row(POS_CASH_balance))", " missing_num minssing_percent\n8095302 2 25.0\n6099819 2 25.0\n7188626 2 25.0\n7188625 2 25.0\n5799073 2 25.0\n missing_num minssing_percent\ncount 1.000136e+07 1.000136e+07\nmean 5.215092e-03 6.518865e-02\nstd 1.018921e-01 1.273651e+00\nmin 0.000000e+00 0.000000e+00\n25% 0.000000e+00 0.000000e+00\n50% 0.000000e+00 0.000000e+00\n75% 0.000000e+00 0.000000e+00\nmax 2.000000e+00 2.500000e+01\n" ], [ "miss_bureau_balance = check_missing_col(bureau_balance)\nmissing_hist(miss_bureau_balance)", " missing_num minssing_percent\nSTATUS 0 0.0\nMONTHS_BALANCE 0 0.0\nSK_ID_BUREAU 0 0.0\n missing_num minssing_percent\ncount 3.0 3.0\nmean 0.0 0.0\nstd 0.0 0.0\nmin 0.0 0.0\n25% 0.0 0.0\n50% 0.0 0.0\n75% 0.0 0.0\nmax 0.0 0.0\n" ], [ "missing_hist(check_missing_row(bureau_balance))", " missing_num minssing_percent\n27299924 0 0.0\n9099834 0 0.0\n9099966 0 0.0\n9099967 0 0.0\n9099968 0 0.0\n missing_num minssing_percent\ncount 27299925.0 27299925.0\nmean 0.0 0.0\nstd 0.0 0.0\nmin 0.0 0.0\n25% 0.0 0.0\n50% 0.0 0.0\n75% 0.0 0.0\nmax 0.0 0.0\n" ], [ "#check_missing(previous_application)\nmissing_hist(check_missing_col(previous_application))", " missing_num minssing_percent\nRATE_INTEREST_PRIVILEGED 1664263 99.643698\nRATE_INTEREST_PRIMARY 1664263 99.643698\nRATE_DOWN_PAYMENT 895844 53.636480\nAMT_DOWN_PAYMENT 895844 53.636480\nNAME_TYPE_SUITE 820405 49.119754\n missing_num minssing_percent\ncount 3.700000e+01 37.000000\nmean 3.002523e+05 17.976877\nstd 4.602509e+05 27.556401\nmin 0.000000e+00 0.000000\n25% 0.000000e+00 0.000000\n50% 0.000000e+00 0.000000\n75% 6.730650e+05 40.298129\nmax 1.664263e+06 99.643698\n" ], [ "missing_hist(check_missing_row(previous_application))", " missing_num minssing_percent\n402355 15 40.540541\n545870 15 40.540541\n24543 15 40.540541\n205845 15 40.540541\n366790 15 40.540541\n missing_num minssing_percent\ncount 1.670214e+06 1.670214e+06\nmean 6.651445e+00 1.797688e+01\nstd 4.648636e+00 1.256388e+01\nmin 0.000000e+00 0.000000e+00\n25% 2.000000e+00 5.405405e+00\n50% 4.000000e+00 1.081081e+01\n75% 1.100000e+01 2.972973e+01\nmax 1.500000e+01 4.054054e+01\n" ], [ "#check_missing(credit_card_balance)\nmissing_hist(check_missing_col(credit_card_balance))", " missing_num minssing_percent\nRATE_INTEREST_PRIVILEGED 1664263 99.643698\nRATE_INTEREST_PRIMARY 1664263 99.643698\nRATE_DOWN_PAYMENT 895844 53.636480\nAMT_DOWN_PAYMENT 895844 53.636480\nNAME_TYPE_SUITE 820405 49.119754\n missing_num minssing_percent\ncount 3.700000e+01 37.000000\nmean 3.002523e+05 17.976877\nstd 4.602509e+05 27.556401\nmin 0.000000e+00 0.000000\n25% 0.000000e+00 0.000000\n50% 0.000000e+00 0.000000\n75% 6.730650e+05 40.298129\nmax 1.664263e+06 99.643698\n" ], [ "missing_hist(check_missing_row(credit_card_balance))", " missing_num minssing_percent\n402355 15 40.540541\n545870 15 40.540541\n24543 15 40.540541\n205845 15 40.540541\n366790 15 40.540541\n missing_num minssing_percent\ncount 1.670214e+06 1.670214e+06\nmean 6.651445e+00 1.797688e+01\nstd 4.648636e+00 1.256388e+01\nmin 0.000000e+00 0.000000e+00\n25% 2.000000e+00 5.405405e+00\n50% 4.000000e+00 1.081081e+01\n75% 1.100000e+01 2.972973e+01\nmax 1.500000e+01 4.054054e+01\n" ], [ "#check_missing(bureau)\nmissing_hist(check_missing_col(bureau))", " missing_num minssing_percent\nAMT_ANNUITY 1226791 71.473490\nAMT_CREDIT_MAX_OVERDUE 1124488 65.513264\nDAYS_ENDDATE_FACT 633653 36.916958\nAMT_CREDIT_SUM_LIMIT 591780 34.477415\nAMT_CREDIT_SUM_DEBT 257669 15.011932\n missing_num minssing_percent\ncount 1.700000e+01 17.000000\nmean 2.317616e+05 13.502552\nstd 4.096962e+05 23.869116\nmin 0.000000e+00 0.000000\n25% 0.000000e+00 0.000000\n50% 0.000000e+00 0.000000\n75% 2.576690e+05 15.011932\nmax 1.226791e+06 71.473490\n" ], [ "missing_hist(check_missing_row(bureau))", " missing_num minssing_percent\n793046 6 35.294118\n773821 6 35.294118\n871866 6 35.294118\n468959 6 35.294118\n701505 6 35.294118\n missing_num minssing_percent\ncount 1.716428e+06 1.716428e+06\nmean 2.295434e+00 1.350255e+01\nstd 1.126084e+00 6.624026e+00\nmin 0.000000e+00 0.000000e+00\n25% 1.000000e+00 5.882353e+00\n50% 2.000000e+00 1.176471e+01\n75% 3.000000e+00 1.764706e+01\nmax 6.000000e+00 3.529412e+01\n" ] ], [ [ "# Select and Re-Encode Features", "_____no_output_____" ] ], [ [ "def select_feature_type(df,data_type):\n return df.select_dtypes(include=[data_type])", "_____no_output_____" ], [ "def count_feature_type(df):\n return df.dtypes.value_counts()", "_____no_output_____" ], [ "def count_col_unique(df,data_type):\n '''count the total unique value number of each colunm\n data_type could be object,category'''\n return df.select_dtypes(data_type).apply(pd.Series.nunique, axis = 0)", "_____no_output_____" ], [ "def count_col_each_unique(df,col_name):\n '''count total number of observations of each unique value of a colunm'''\n return df.groupby(col_name)[col_name].count()", "_____no_output_____" ], [ "def check_distribution(df,col_name):\n print(df[col_name].describe())\n print('Total missing value number: ',df[col_name].isnull().sum())\n plt.figure(figsize=(12,5))\n sns.distplot(df[col_name].dropna())", "_____no_output_____" ], [ "def label_encoder(df, categorical_columns=None):\n \"\"\"Encode categorical values as integers (0,1,2,3...) with pandas.factorize. \"\"\"\n # if categorical_colunms are not given than treat object as categorical features\n if not categorical_columns:\n categorical_columns = [col for col in df.columns if df[col].dtype == 'object']\n for col in categorical_columns:\n df[col], uniques = pd.factorize(df[col])\n return df, categorical_columns", "_____no_output_____" ], [ "def one_hot_encoder(df, categorical_columns=None, nan_as_category=True):\n \"\"\"Create a new column for each categorical value in categorical columns. \"\"\"\n original_columns = list(df.columns)\n if not categorical_columns:\n categorical_columns = [col for col in df.columns if df[col].dtype == 'object']\n df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category)\n categorical_columns = [c for c in df.columns if c not in original_columns]\n return df, categorical_columns\n", "_____no_output_____" ] ], [ [ "## check value type and correct wrong values", "_____no_output_____" ] ], [ [ "count_feature_type(app)", "_____no_output_____" ], [ "count_col_unique(app_train,'object')", "_____no_output_____" ], [ "count_col_each_unique(app,'CODE_GENDER')", "_____no_output_____" ], [ "check_distribution(app,'AMT_INCOME_TOTAL')", "count 3.562550e+05\nmean 1.701161e+05\nstd 2.235068e+05\nmin 2.565000e+04\n25% 1.125000e+05\n50% 1.530000e+05\n75% 2.025000e+05\nmax 1.170000e+08\nName: AMT_INCOME_TOTAL, dtype: float64\nTotal missing value number: 0\n" ], [ "# this discussion https://www.kaggle.com/c/home-credit-default-risk/discussion/57247#332033\n# says 365243 means NA\ncheck_distribution(app,'DAYS_EMPLOYED')", "count 356255.000000\nmean 64317.231413\nstd 141705.532576\nmin -17912.000000\n25% -2781.000000\n50% -1224.000000\n75% -290.000000\nmax 365243.000000\nName: DAYS_EMPLOYED, dtype: float64\nTotal missing value number: 0\n" ], [ "# total number of 1 missing value and add spike on value 0\n# 0 could mean nan value\ncheck_distribution(app,'DAYS_LAST_PHONE_CHANGE')", "count 356254.000000\nmean -978.580852\nstd 835.063902\nmin -4361.000000\n25% -1592.000000\n50% -771.000000\n75% -286.000000\nmax 0.000000\nName: DAYS_LAST_PHONE_CHANGE, dtype: float64\nTotal missing value number: 1\n" ], [ "# remove 4 people code_gender value 'XNA'\napp = app[app['CODE_GENDER'] != 'XNA'] # 4 people with XNA code gender\napp.tail()", "_____no_output_____" ], [ "app['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)\napp.tail()", "_____no_output_____" ], [ "app['DAYS_LAST_PHONE_CHANGE'].replace(0, np.nan, inplace=True)\napp.tail()", "_____no_output_____" ], [ "# change all categorical feature to numerical \napp_clean, categorical_columns = label_encoder(app, categorical_columns=None)", "_____no_output_____" ], [ "app_clean.tail()", "_____no_output_____" ], [ "# check bureau data\ncount_feature_type(bureau)", "_____no_output_____" ], [ "count_col_unique(bureau,'object')", "_____no_output_____" ], [ "check_distribution(bureau,'DAYS_CREDIT')", "count 1.716428e+06\nmean -1.142108e+03\nstd 7.951649e+02\nmin -2.922000e+03\n25% -1.666000e+03\n50% -9.870000e+02\n75% -4.740000e+02\nmax 0.000000e+00\nName: DAYS_CREDIT, dtype: float64\nTotal missing value number: 0\n" ], [ "check_distribution(bureau,'AMT_CREDIT_SUM')", "count 1.716415e+06\nmean 3.549946e+05\nstd 1.149811e+06\nmin 0.000000e+00\n25% 5.130000e+04\n50% 1.255185e+05\n75% 3.150000e+05\nmax 5.850000e+08\nName: AMT_CREDIT_SUM, dtype: float64\nTotal missing value number: 13\n" ] ], [ [ "# Baseline Model", "_____no_output_____" ] ], [ [ "app_base_train = app_clean[app_clean['TARGET'].notnull()]\napp_base_test = app_clean[app_clean['TARGET'].isnull()]\napp_base_test.head()", "_____no_output_____" ], [ "def cross_validate(train):\n \"\"\"Compute cross validation ROC AUC of a gradient boosting model for a given training dataset\"\"\"\n \n # Extract the labels\n train_labels = np.array(train['TARGET'].astype(np.int32)).reshape((-1, ))\n train = train.drop(columns = ['TARGET', 'SK_ID_CURR'])\n\n # Create a lgb training set\n train_set = lgb.Dataset(train, label = train_labels)\n\n # Find default hyperparameters\n model = lgb.LGBMClassifier()\n params = model.get_params()\n\n # Number of estimators will be selected through early stopping\n del params['n_estimators'], params['silent']\n\n # Early stoppping with 5 fold cross validation\n cv_results = lgb.cv(params, train_set, num_boost_round = 10000, metrics = 'auc', \n early_stopping_rounds = 100, seed = RSEED, nfold = 5)\n\n print('Cross Validation ROC AUC: {:.5f} with std: {:.5f}.'.format(cv_results['auc-mean'][-1],\n cv_results['auc-stdv'][-1]))\n\n print('Number of estimators trained: {}'.format(len(cv_results['auc-mean'])))\n \n return cv_results", "_____no_output_____" ], [ "cv_results_baseline = cross_validate(app_base_train)", "Cross Validation ROC AUC: 0.75575 with std: 0.00582.\nNumber of estimators trained: 172\n" ], [ "def make_submission(cv_results, train, test):\n \"\"\"Make a submission dataframe for the Kaggle competition for a given dataset.\"\"\"\n \n # Extract the labels\n train_labels = np.array(train['TARGET'].astype(np.int32)).reshape((-1, ))\n train = train.drop(columns = ['TARGET', 'SK_ID_CURR'])\n test_ids = list(test['SK_ID_CURR'])\n test = test.drop(columns = ['TARGET','SK_ID_CURR'])\n \n # Make model with optimal number of estimators and train on training data\n model = lgb.LGBMClassifier(n_estimators = len(cv_results['auc-mean']), random_state=RSEED)\n model.fit(train, train_labels)\n \n # Make predictions on the testing data\n preds = model.predict_proba(test)[:, 1]\n submission = pd.DataFrame({'SK_ID_CURR': test_ids, \n 'TARGET': preds})\n \n return submission", "_____no_output_____" ], [ "submission_baseline = make_submission(cv_results_baseline, app_base_train, app_base_test)\nsubmission_baseline.to_csv('./data/submission_baseline.csv', index = False)", "_____no_output_____" ] ], [ [ "# Semi-Auto Feature", "_____no_output_____" ] ], [ [ "def agg_numeric(df_child, parent_var, df_col_name):\n \"\"\"\n Groups and aggregates the numeric values in a child dataframe\n by the parent variable.\n \n Parameters\n --------\n df_child (dataframe): \n the child dataframe to calculate the statistics on\n parent_var (string): \n the parent variable used for grouping and aggregating\n df_col_name (string): \n the variable used to rename the columns\n \n Return\n --------\n agg (dataframe): \n a dataframe with the statistics aggregated by the `parent_var` for \n all numeric columns. The aggregate function are 'count', 'mean', 'max', 'min', 'sum'\n Each observation of the parent variable will have \n one row in the dataframe with the parent variable as the index. \n The columns are also renamed using the `df_col_name`. Columns with all duplicate\n values are removed. \n \n \"\"\"\n \n # Remove id variables other than grouping variable\n # e.g. SK_ID_BUREAU\n for col in df_child:\n if col != parent_var and 'SK_ID' in col:\n df_child = df_child.drop(columns = col)\n \n # Only want the numeric variables\n parent_ids = df_child[parent_var].copy()\n numeric_df = df_child.select_dtypes('number').copy()\n numeric_df[parent_var] = parent_ids\n\n # Group by the specified variable and calculate the statistics\n agg = numeric_df.groupby(parent_var).agg(['count', 'mean', 'max', 'min', 'sum'])\n\n # Need to create new column names\n columns = []\n\n # Iterate through the variables names\n for var in agg.columns.levels[0]:\n if var != parent_var:\n # Iterate through the stat names\n for stat in agg.columns.levels[1]:\n # Make a new column name for the variable and stat\n columns.append('%s_%s_%s' % (df_col_name, var, stat))\n \n agg.columns = columns\n \n # Remove the columns with all redundant values\n _, idx = np.unique(agg, axis = 1, return_index=True)\n agg = agg.iloc[:, idx]\n \n return agg", "_____no_output_____" ], [ "bureau_agg = agg_numeric(bureau, 'SK_ID_CURR', 'BUREAU')\nbureau_agg.head()", "_____no_output_____" ], [ "app_clean_second = pd.merge(app_clean,bureau_agg,on='SK_ID_CURR',how='left')", "_____no_output_____" ], [ "app_base_train_second = app_clean_second[app_clean_second['TARGET'].notnull()]\napp_base_test_second = app_clean_second[app_clean_second['TARGET'].isnull()]\n", "_____no_output_____" ], [ "app_base_train_second.head()\napp_base_test_second.head()", "_____no_output_____" ], [ "cv_results_second = cross_validate(app_base_train_second)", "Cross Validation ROC AUC: 0.76083 with std: 0.00462.\nNumber of estimators trained: 199\n" ], [ "submission_second = make_submission(cv_results_second, app_base_train_second, app_base_test_second)\nsubmission_second.to_csv('./data/submission_second.csv', index = False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
d0aad9c5b98b4de037aaeb46958fc72d46ddbb96
183,291
ipynb
Jupyter Notebook
cervical-cancer-screening/data/cc_feature_selection.ipynb
Ravirajadrangi/projects6
bc7dc98a01dabe4b64d7de7738521746b09426cd
[ "BSD-2-Clause" ]
24
2015-12-16T14:44:05.000Z
2021-04-03T12:11:47.000Z
cervical-cancer-screening/data/cc_feature_selection.ipynb
Ravirajadrangi/projects6
bc7dc98a01dabe4b64d7de7738521746b09426cd
[ "BSD-2-Clause" ]
null
null
null
cervical-cancer-screening/data/cc_feature_selection.ipynb
Ravirajadrangi/projects6
bc7dc98a01dabe4b64d7de7738521746b09426cd
[ "BSD-2-Clause" ]
8
2017-03-31T17:36:03.000Z
2020-01-11T15:59:13.000Z
30.04278
227
0.332455
[ [ [ "# Genentech Cervical Cancer - Feature Selection\n\nhttps://www.kaggle.com/c/cervical-cancer-screening/", "_____no_output_____" ] ], [ [ "# imports\nimport sys # for stderr\nimport numpy as np\nimport pandas as pd\nimport sklearn as skl\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "# settings \n%logstop\n%logstart -o 'cc_feature_selection.log' rotate\nplt.style.use('ggplot')\n# constants\n# plt.rcParams['figure.figsize'] = (10.0, 10.0)\n# pd.set_option('display.max_rows', 50)\n# pd.set_option('display.max_columns', 50)", "Logging hadn't been started.\nActivating auto-logging. Current session state plus future input saved.\nFilename : cc_feature_selection.log\nMode : rotate\nOutput logging : True\nRaw input log : False\nTimestamping : False\nState : active\n" ], [ "# versions \nimport sys\nprint(pd.datetime.now())\nprint('Python: '+sys.version)\nprint('numpy: '+np.__version__)\nprint('pandas: '+pd.__version__)\nprint('sklearn: '+skl.__version__)", "2016-01-27 08:09:59.769290\nPython: 2.7.11 |Anaconda 2.4.0 (x86_64)| (default, Dec 6 2015, 18:57:58) \n[GCC 4.2.1 (Apple Inc. build 5577)]\nnumpy: 1.10.2\npandas: 0.17.1\nsklearn: 0.17\n" ], [ "from sqlalchemy import create_engine\nengine = create_engine('postgresql://paulperry:@localhost:5432/ccancer')", "_____no_output_____" ], [ "from pyace import ace", "_____no_output_____" ] ], [ [ "## Load ", "_____no_output_____" ] ], [ [ "fdir = './features/'", "_____no_output_____" ], [ "train_file = './input/patients_train.csv.gz'\ntrain = pd.read_csv(train_file)\ntrain.drop('patient_gender', axis=1, inplace=True)\ntrain.set_index('patient_id', inplace=True)\ntrain[:3]", "_____no_output_____" ], [ "files =[\n'diag_code_0_1000.csv.gz',\n'diag_code_1000_2000.csv.gz',\n'diag_code_2000_3000.csv.gz',\n'diag_code_3000_4000.csv.gz',\n'diag_code_4000_5000.csv.gz',\n'diag_code_5000_6000.csv.gz',\n'diag_code_6000_7000.csv.gz',\n'diag_code_7000_8000.csv.gz',\n'diag_code_8000_9000.csv.gz',\n'diag_code_9000_10000.csv.gz',\n'diag_code_10000_11000.csv.gz',\n'diag_code_11000_12000.csv.gz',\n'diag_code_12000_13000.csv.gz',\n'diag_code_13000_14000.csv.gz',\n'diag_code_14000_15000.csv.gz',\n'diag_code_15000_16000.csv.gz',\n]", "_____no_output_____" ], [ "len(files)", "_____no_output_____" ], [ "# # \n# tab = pd.read_csv(fdir+files[0])\n# tab.set_index('patient_id', inplace=True)\n\n# for f in files[1:]:\n# tab2 = pd.read_csv(fdir+f)\n# tab2.set_index('patient_id', inplace=True)\n# tab = tab.merge(tab2, left_index=True, right_index=True, how='left')\n# gc.collect()\n# print(f)", "_____no_output_____" ] ], [ [ "## Run", "_____no_output_____" ] ], [ [ "nnn = 4", "_____no_output_____" ], [ "import datetime\nstart = datetime.datetime.now()\nprint(start)", "2016-01-25 13:26:48.285460\n" ], [ "tab = pd.read_csv(fdir+files[nnn])\ntab.set_index('patient_id', inplace=True)", "_____no_output_____" ], [ "tab.shape", "_____no_output_____" ], [ "dfall = pd.merge(train, tab, left_index=True, right_index=True, how='left')", "_____no_output_____" ], [ "cat_cols = ['patient_age_group','patient_state','ethinicity','household_income','education_level']", "_____no_output_____" ], [ "ranks = ace(dfall, 'is_screener', cat_cols=[])\ndf_ranks = pd.DataFrame(ranks, index=dfall.columns, columns=['ace','mean'])\ndf_ranks = df_ranks.sort_values(by='ace', ascending=False)\ntop_ranks = df_ranks[df_ranks.ace > 0]\ntop_ranks[:20]", "_____no_output_____" ], [ "top_ranks.to_csv('diagnosis_ranks_'+str(nnn)+'.csv')", "_____no_output_____" ], [ "import gc\ngc.collect()", "_____no_output_____" ], [ "end = datetime.datetime.now()\nprint('run time: '+str(end-start)+' at: '+str(end))", "run time: 0:02:18.890138 at: 2016-01-25 13:29:07.175598\n" ], [ "break", "_____no_output_____" ], [ "top_ranks = pd.read_csv('diagnosis_ranks_15.csv')\ntop_ranks.set_index('Unnamed: 0', inplace=True)\ntop_ranks[:10]", "_____no_output_____" ], [ "qlist = list(top_ranks[top_ranks.ace > 0.003891].index)\nfor c in cat_cols: \n qlist.remove(c)\nqlist_str = \"('\"+qlist[0]+\"'\"\nfor c in qlist[1:]: \n qlist_str=qlist_str+\",'\"+c+\"'\"\nqlist_str=qlist_str+')'\nqlist_str", "_____no_output_____" ], [ "q = 'select * from diagnosis_code where diagnosis_code in '+qlist_str\n\ndiag_codes = pd.read_sql_query(q, engine)\ndiag_codes", "_____no_output_____" ], [ "diag_codes.to_csv('diagnosis_top_codes.csv', mode='a', header=False)", "_____no_output_____" ], [ "qlist", "_____no_output_____" ], [ "tab[qlist].to_csv('diagnosis_top_'+str(nnn)+'.csv')", "_____no_output_____" ] ], [ [ "## Merge feature values", "_____no_output_____" ] ], [ [ "diag_top_features = [\n 'diagnosis_top_0.csv',\n 'diagnosis_top_3.csv',\n 'diagnosis_top_4.csv',\n 'diagnosis_top_6.csv',\n 'diagnosis_top_10.csv',\n 'diagnosis_top_12.csv',\n 'diagnosis_top_15.csv'\n ]", "_____no_output_____" ], [ "dff = pd.read_csv(diag_top_features[0])\ndff.set_index('patient_id', inplace=True)\nprint(dff.shape)\n\nfor f in diag_top_features[1:]:\n df2 = pd.read_csv(f)\n df2.set_index('patient_id', inplace=True)\n dff = dff.merge(df2, left_index=True, right_index=True, how='outer')\n gc.collect()\n print(f)", "(785605, 3)\ndiagnosis_top_10.csv\ndiagnosis_top_12.csv\ndiagnosis_top_15.csv\ndiagnosis_top_3.csv\ndiagnosis_top_4.csv\ndiagnosis_top_6.csv\n" ], [ "gc.collect()", "_____no_output_____" ], [ "dff.shape", "_____no_output_____" ], [ "dff[:5]", "_____no_output_____" ], [ "dff.columns", "_____no_output_____" ], [ "big_table = pd.read_csv(fdir+'train_big_table.csv.gz')\nbig_table.set_index('patient_id', inplace=True)\nbig_table.shape", "_____no_output_____" ], [ "big_table[:2]", "_____no_output_____" ], [ "dff[:2]", "_____no_output_____" ], [ "dff.columns", "_____no_output_____" ], [ "big_table.columns", "_____no_output_____" ], [ "bad_cols = ['CLINIC', 'INPATIENT', 'OTHER', 'OUTPATIENT', 'UNKNOWN', \n '0001', '0002', '0003', '0004', '0005', '0006', \n 'HX01', 'HX02', 'HX03', 'HX04', 'HX05', 'HXPR',\n 'pract_screen_pct', 'cbsa_pct', 'age_pct', 'state_pct',\n '632','650', u'57452', u'57454', u'57455', u'57456',\n u'81252', u'90696', u'G0143', u'S4020', u'S4023']", "_____no_output_____" ], [ "# take only a subset of the features, the rest I think is junk\n\ncols = list(big_table.columns)\ncols = [x for x in cols if x not in bad_cols]\n\ntest_cols = list(cols)\ntest_cols.remove('is_screener')", "_____no_output_____" ], [ "bigt = big_table[cols].merge(dff, left_index=True, right_index=True, how='left')", "_____no_output_____" ], [ "bigt.columns", "_____no_output_____" ], [ "bigt.shape", "_____no_output_____" ], [ "bigt.to_csv(fdir+'train_big_table.csv')", "_____no_output_____" ], [ "big_table_encoded = pd.read_csv(fdir+'train_big_table_encoded.csv.gz')\nbig_table_encoded.set_index('patient_id', inplace=True)\nbig_table_encoded.shape", "_____no_output_____" ], [ "big_table_encoded[:2]", "_____no_output_____" ], [ "dffd = dff.copy()", "_____no_output_____" ], [ "dffd[dffd > 0] = 1", "_____no_output_____" ], [ "dffd[:10]", "_____no_output_____" ], [ "bigte = big_table_encoded[cols].merge(dffd,left_index=True, right_index=True, how='left')\nbigte.shape", "_____no_output_____" ], [ "bigte[:10]", "_____no_output_____" ], [ "bigte.to_csv(fdir+'train_big_table_encoded.csv')", "_____no_output_____" ] ], [ [ "## Procedures", "_____no_output_____" ] ], [ [ "procs = pd.read_csv(fdir+'procedure/procedure_counts_selected.csv.gz')\nprocs.shape", "_____no_output_____" ], [ "procs.set_index('patient_id', inplace=True)\nprocs[:2]", "_____no_output_____" ], [ "print(bigt.shape)\nbigtp = bigt.merge(procs, left_index=True, right_index=True, how='left')\nbigtp.shape", "(1157817, 79)\n" ], [ "bigtp.to_csv(fdir+'train_big_table.csv')", "_____no_output_____" ], [ "print(bigte.shape)\nbigtep = bigte.merge(procs, left_index=True, right_index=True, how='left')\nbigtep.shape", "(1157817, 79)\n" ], [ "bigtep.to_csv(fdir+'train_big_table_encoded.csv')", "_____no_output_____" ] ], [ [ "## test_top", "_____no_output_____" ] ], [ [ "test_diagnosis_top = pd.read_csv('test_diagnosis_top.csv')\ntest_diagnosis_top.shape", "_____no_output_____" ], [ "#test_diagnosis_top.set_index('patient_id', inplace=True)\ntest_diagnosis_top[:5]", "_____no_output_____" ], [ "test_pivot = test_diagnosis_top.pivot(index='patient_id', columns='diagnosis_code', values='diagnosis_code_count')", "_____no_output_____" ], [ "test_pivot[:5]", "_____no_output_____" ], [ "test_pivot.shape", "_____no_output_____" ], [ "test_big_table = pd.read_csv(fdir+'test_big_table.csv.gz')\ntest_big_table.set_index('patient_id', inplace=True)\ntest_big_table = test_big_table[test_cols]\ntest_big_table.shape", "_____no_output_____" ], [ "test_big_table[:4]", "_____no_output_____" ], [ "test_big_table.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 1701813 entries, 148341312 to 204245024\nData columns (total 18 columns):\npatient_age_group object\npatient_state object\nethinicity object\nhousehold_income object\neducation_level object\nvisits int64\ncbsa float64\nnum_visits float64\nnum_procedures int64\nnum_diagnosis float64\nnum_rx float64\nRX_ASSISTANCE float64\nRX_CASH float64\nRX_COMMERCIAL float64\nRX_MANAGED_MEDICAID float64\nRX_MEDICAID float64\nRX_MEDICARE float64\n632 float64\ndtypes: float64(11), int64(2), object(5)\nmemory usage: 246.7+ MB\n" ], [ "test_bigt = test_big_table.merge(test_pivot, left_index=True, right_index=True, how='left')", "_____no_output_____" ], [ "test_bigt.shape", "_____no_output_____" ], [ "test_bigt[:5]", "_____no_output_____" ], [ "test_bigt.to_csv(fdir+'test_big_table.csv')", "_____no_output_____" ], [ "test_big_table_encoded = pd.read_csv(fdir+'test_big_table_encoded.csv.gz')\ntest_big_table_encoded.set_index('patient_id', inplace=True)\ntest_big_table_encoded = test_big_table_encoded[test_cols]\ntest_big_table_encoded.shape", "_____no_output_____" ], [ "test_big_table_encoded.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 1701813 entries, 148341312 to 204245024\nData columns (total 18 columns):\npatient_age_group int64\npatient_state int64\nethinicity int64\nhousehold_income int64\neducation_level int64\nvisits int64\ncbsa float64\nnum_visits float64\nnum_procedures int64\nnum_diagnosis float64\nnum_rx float64\nRX_ASSISTANCE float64\nRX_CASH float64\nRX_COMMERCIAL float64\nRX_MANAGED_MEDICAID float64\nRX_MEDICAID float64\nRX_MEDICARE float64\n632 float64\ndtypes: float64(11), int64(7)\nmemory usage: 246.7 MB\n" ], [ "test_big_table_encoded[:2]", "_____no_output_____" ], [ "test_dffd = test_pivot.copy()", "_____no_output_____" ], [ "test_dffd[test_dffd > 0] = 1", "_____no_output_____" ], [ "test_dffd[:10]", "_____no_output_____" ], [ "test_bigte = test_big_table_encoded.merge(test_dffd,left_index=True, right_index=True, how='left')\ntest_bigte.shape", "_____no_output_____" ], [ "test_bigte[:10]", "_____no_output_____" ], [ "test_bigte.to_csv(fdir+'test_big_table_encoded.csv')", "_____no_output_____" ], [ "dff.shape", "_____no_output_____" ], [ "test_pivot.shape", "_____no_output_____" ], [ "sorted_cols = [u'401.9', u'462', u'496', u'585.3', u'616.0', u'616.10', u'620.2',\n u'622.10', u'622.11', u'623.5', u'625.3', u'625.9', u'626.0', u'626.2',\n u'626.4', u'626.8', u'646.83', u'648.93', u'650', u'795.00',\n u'V22.0', u'V22.1', u'V22.2', u'V24.2', u'V25.2', u'V27.0', u'V28.3',\n u'V70.0', u'V74.5']", "_____no_output_____" ], [ "dff[sorted_cols].to_csv('train_diagnosis_top.csv')", "_____no_output_____" ], [ "test_pivot.to_csv('test_diagnosis_top.csv')", "_____no_output_____" ], [ "test_big_table.shape, big_table.shape", "_____no_output_____" ], [ "test_bigt.columns", "_____no_output_____" ], [ "len(test_bigt.columns)", "_____no_output_____" ], [ "len(big_table.columns)", "_____no_output_____" ], [ "len(bigte.columns)", "_____no_output_____" ], [ "test_bigt.drop('632_y', axis=1, inplace=True)\ntest_bigt.rename(columns={'632_x':'632'}, inplace=True)\ntest_bigte.drop('632_y', axis=1, inplace=True)\ntest_bigte.rename(columns={'632_x':'632'}, inplace=True)", "_____no_output_____" ] ], [ [ "## Check results", "_____no_output_____" ] ], [ [ "train_diagnosis_top = fdir+'train_diagnosis_top.csv.gz'\ntrain_diagnosis_top = pd.read_csv(train_diagnosis_top)\ntrain_diagnosis_top.set_index('patient_id', inplace=True)\ntrain_diagnosis_top[:3]", "_____no_output_____" ], [ "train_diagnosis_top.shape", "_____no_output_____" ], [ "test_diagnosis_top = fdir+'test_diagnosis_top.csv.gz'\ntest_diagnosis_top = pd.read_csv(test_diagnosis_top)\ntest_diagnosis_top.set_index('patient_id', inplace=True)\ntest_diagnosis_top[:3]", "_____no_output_____" ], [ "test_diagnosis_top.shape", "_____no_output_____" ], [ "set(test_diagnosis_top.columns) - set(train_diagnosis_top.columns)", "_____no_output_____" ], [ "pd.read_csv(fdir+'diagnosis_top_codes.csv')", "_____no_output_____" ], [ "test_diagnosis_top.columns", "_____no_output_____" ], [ "train_632 = pd.read_csv(fdir+'train_big_table.csv.gz')", "_____no_output_____" ], [ "train_632.set_index('patient_id', inplace=True)\ntrain_632['632'][:5]", "_____no_output_____" ], [ "train_diagnosis_top['632'] = train_632['632']", "_____no_output_____" ], [ "train_diagnosis_top.sort_index(axis=1, inplace=True)", "_____no_output_____" ], [ "train_diagnosis_top.columns", "_____no_output_____" ], [ "train_diagnosis_top.to_csv(fdir+'train_diagnosis_top.csv')", "_____no_output_____" ], [ "train_632.columns", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]